Merge lp:~james-page/charms/trusty/nova-cloud-controller/service-guard into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/trunk
- Trusty Tahr (14.04)
- service-guard
- Merge into trunk
Proposed by
James Page
Status: | Superseded |
---|---|
Proposed branch: | lp:~james-page/charms/trusty/nova-cloud-controller/service-guard |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/trunk |
Diff against target: |
4341 lines (+2840/-302) (has conflicts) 45 files modified
.bzrignore (+2/-0) Makefile (+17/-5) README.txt (+5/-0) charm-helpers-hooks.yaml (+11/-0) charm-helpers-tests.yaml (+5/-0) charm-helpers.yaml (+0/-10) config.yaml (+54/-11) hooks/charmhelpers/contrib/hahelpers/cluster.py (+3/-2) hooks/charmhelpers/contrib/network/ip.py (+156/-0) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+55/-0) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+209/-0) hooks/charmhelpers/contrib/openstack/context.py (+95/-22) hooks/charmhelpers/contrib/openstack/ip.py (+75/-0) hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+6/-1) hooks/charmhelpers/contrib/openstack/templating.py (+22/-23) hooks/charmhelpers/contrib/openstack/utils.py (+11/-3) hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1) hooks/charmhelpers/contrib/storage/linux/utils.py (+1/-0) hooks/charmhelpers/core/fstab.py (+116/-0) hooks/charmhelpers/core/hookenv.py (+5/-4) hooks/charmhelpers/core/host.py (+32/-12) hooks/charmhelpers/fetch/__init__.py (+33/-16) hooks/charmhelpers/fetch/bzrurl.py (+2/-1) hooks/nova_cc_context.py (+32/-1) hooks/nova_cc_hooks.py (+211/-57) hooks/nova_cc_utils.py (+218/-103) metadata.yaml (+2/-0) revision (+1/-1) tests/00-setup (+10/-0) tests/10-basic-precise-essex (+10/-0) tests/11-basic-precise-folsom (+18/-0) tests/12-basic-precise-grizzly (+12/-0) tests/13-basic-precise-havana (+12/-0) tests/14-basic-precise-icehouse (+12/-0) tests/15-basic-trusty-icehouse (+10/-0) tests/README (+47/-0) tests/basic_deployment.py (+520/-0) tests/charmhelpers/contrib/amulet/deployment.py (+58/-0) tests/charmhelpers/contrib/amulet/utils.py (+157/-0) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+55/-0) tests/charmhelpers/contrib/openstack/amulet/utils.py (+209/-0) unit_tests/test_nova_cc_hooks.py (+146/-12) unit_tests/test_nova_cc_utils.py (+167/-14) unit_tests/test_utils.py (+3/-3) Text conflict in config.yaml |
To merge this branch: | bzr merge lp:~james-page/charms/trusty/nova-cloud-controller/service-guard |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+228669@code.launchpad.net |
This proposal has been superseded by a proposal from 2014-07-29.
Commit message
Description of the change
Add support for service-guard configuration to disable services prior to relations being completely formed.
To post a comment you must log in.
- 95. By James Page
-
Don't add neutron stuff if related to neutron-api charm
- 96. By James Page
-
Fixup unit tests
- 97. By James Page
-
Tidy lint
Unmerged revisions
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added file '.bzrignore' | |||
2 | --- .bzrignore 1970-01-01 00:00:00 +0000 | |||
3 | +++ .bzrignore 2014-07-29 13:07:23 +0000 | |||
4 | @@ -0,0 +1,2 @@ | |||
5 | 1 | bin | ||
6 | 2 | .coverage | ||
7 | 0 | 3 | ||
8 | === modified file 'Makefile' | |||
9 | --- Makefile 2014-05-21 10:14:28 +0000 | |||
10 | +++ Makefile 2014-07-29 13:07:23 +0000 | |||
11 | @@ -2,16 +2,28 @@ | |||
12 | 2 | PYTHON := /usr/bin/env python | 2 | PYTHON := /usr/bin/env python |
13 | 3 | 3 | ||
14 | 4 | lint: | 4 | lint: |
16 | 5 | @flake8 --exclude hooks/charmhelpers hooks unit_tests | 5 | @flake8 --exclude hooks/charmhelpers hooks unit_tests tests |
17 | 6 | @charm proof | 6 | @charm proof |
18 | 7 | 7 | ||
19 | 8 | unit_test: | ||
20 | 9 | @echo Starting unit tests... | ||
21 | 10 | @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests | ||
22 | 11 | |||
23 | 12 | bin/charm_helpers_sync.py: | ||
24 | 13 | @mkdir -p bin | ||
25 | 14 | @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ | ||
26 | 15 | > bin/charm_helpers_sync.py | ||
27 | 8 | test: | 16 | test: |
30 | 9 | @echo Starting tests... | 17 | @echo Starting Amulet tests... |
31 | 10 | @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests | 18 | # coreycb note: The -v should only be temporary until Amulet sends |
32 | 19 | # raise_status() messages to stderr: | ||
33 | 20 | # https://bugs.launchpad.net/amulet/+bug/1320357 | ||
34 | 21 | @juju test -v -p AMULET_HTTP_PROXY | ||
35 | 11 | 22 | ||
36 | 12 | sync: | 23 | sync: |
38 | 13 | @charm-helper-sync -c charm-helpers.yaml | 24 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml |
39 | 25 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml | ||
40 | 14 | 26 | ||
42 | 15 | publish: lint test | 27 | publish: lint unit_test |
43 | 16 | bzr push lp:charms/nova-cloud-controller | 28 | bzr push lp:charms/nova-cloud-controller |
44 | 17 | bzr push lp:charms/trusty/nova-cloud-controller | 29 | bzr push lp:charms/trusty/nova-cloud-controller |
45 | 18 | 30 | ||
46 | === modified file 'README.txt' | |||
47 | --- README.txt 2014-03-25 09:11:04 +0000 | |||
48 | +++ README.txt 2014-07-29 13:07:23 +0000 | |||
49 | @@ -4,6 +4,11 @@ | |||
50 | 4 | 4 | ||
51 | 5 | Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore. | 5 | Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore. |
52 | 6 | 6 | ||
53 | 7 | The neutron-api interface can be used join this charm with an external neutron-api server. If this is done | ||
54 | 8 | then this charm will shutdown its neutron-api service and the external charm will be registered as the | ||
55 | 9 | neutron-api endpoint in keystone. It will also use the quantum-security-groups setting which is passed to | ||
56 | 10 | it by the api service rather than its own quantum-security-groups setting. | ||
57 | 11 | |||
58 | 7 | ****************************************************** | 12 | ****************************************************** |
59 | 8 | Special considerations to be deployed using Postgresql | 13 | Special considerations to be deployed using Postgresql |
60 | 9 | ****************************************************** | 14 | ****************************************************** |
61 | 10 | 15 | ||
62 | === added file 'charm-helpers-hooks.yaml' | |||
63 | --- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000 | |||
64 | +++ charm-helpers-hooks.yaml 2014-07-29 13:07:23 +0000 | |||
65 | @@ -0,0 +1,11 @@ | |||
66 | 1 | branch: lp:charm-helpers | ||
67 | 2 | destination: hooks/charmhelpers | ||
68 | 3 | include: | ||
69 | 4 | - core | ||
70 | 5 | - fetch | ||
71 | 6 | - contrib.openstack|inc=* | ||
72 | 7 | - contrib.storage | ||
73 | 8 | - contrib.hahelpers: | ||
74 | 9 | - apache | ||
75 | 10 | - payload.execd | ||
76 | 11 | - contrib.network.ip | ||
77 | 0 | 12 | ||
78 | === added file 'charm-helpers-tests.yaml' | |||
79 | --- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000 | |||
80 | +++ charm-helpers-tests.yaml 2014-07-29 13:07:23 +0000 | |||
81 | @@ -0,0 +1,5 @@ | |||
82 | 1 | branch: lp:charm-helpers | ||
83 | 2 | destination: tests/charmhelpers | ||
84 | 3 | include: | ||
85 | 4 | - contrib.amulet | ||
86 | 5 | - contrib.openstack.amulet | ||
87 | 0 | 6 | ||
88 | === removed file 'charm-helpers.yaml' | |||
89 | --- charm-helpers.yaml 2014-04-02 17:04:22 +0000 | |||
90 | +++ charm-helpers.yaml 1970-01-01 00:00:00 +0000 | |||
91 | @@ -1,10 +0,0 @@ | |||
92 | 1 | branch: lp:charm-helpers | ||
93 | 2 | destination: hooks/charmhelpers | ||
94 | 3 | include: | ||
95 | 4 | - core | ||
96 | 5 | - fetch | ||
97 | 6 | - contrib.openstack|inc=* | ||
98 | 7 | - contrib.storage | ||
99 | 8 | - contrib.hahelpers: | ||
100 | 9 | - apache | ||
101 | 10 | - payload.execd | ||
102 | 11 | 0 | ||
103 | === modified file 'config.yaml' | |||
104 | --- config.yaml 2014-06-17 10:01:21 +0000 | |||
105 | +++ config.yaml 2014-07-29 13:07:23 +0000 | |||
106 | @@ -97,15 +97,11 @@ | |||
107 | 97 | # HA configuration settings | 97 | # HA configuration settings |
108 | 98 | vip: | 98 | vip: |
109 | 99 | type: string | 99 | type: string |
119 | 100 | description: "Virtual IP to use to front API services in ha configuration" | 100 | description: | |
120 | 101 | vip_iface: | 101 | Virtual IP(s) to use to front API services in HA configuration. |
121 | 102 | type: string | 102 | . |
122 | 103 | default: eth0 | 103 | If multiple networks are being used, a VIP should be provided for each |
123 | 104 | description: "Network Interface where to place the Virtual IP" | 104 | network, separated by spaces. |
115 | 105 | vip_cidr: | ||
116 | 106 | type: int | ||
117 | 107 | default: 24 | ||
118 | 108 | description: "Netmask that will be used for the Virtual IP" | ||
124 | 109 | ha-bindiface: | 105 | ha-bindiface: |
125 | 110 | type: string | 106 | type: string |
126 | 111 | default: eth0 | 107 | default: eth0 |
127 | @@ -163,5 +159,52 @@ | |||
128 | 163 | nvp-l3-uuid: | 159 | nvp-l3-uuid: |
129 | 164 | type: string | 160 | type: string |
130 | 165 | description: | | 161 | description: | |
133 | 166 | This is uuid of the default NVP/NSX L3 Gateway Service. | 162 | <<<<<<< TREE |
134 | 167 | # end of NVP/NSX configuration | 163 | This is uuid of the default NVP/NSX L3 Gateway Service. |
135 | 164 | # end of NVP/NSX configuration | ||
136 | 165 | ======= | ||
137 | 166 | This is uuid of the default NVP/NSX L3 Gateway Service. | ||
138 | 167 | # end of NVP/NSX configuration | ||
139 | 168 | # Network configuration options | ||
140 | 169 | # by default all access is over 'private-address' | ||
141 | 170 | os-admin-network: | ||
142 | 171 | type: string | ||
143 | 172 | description: | | ||
144 | 173 | The IP address and netmask of the OpenStack Admin network (e.g., | ||
145 | 174 | 192.168.0.0/24) | ||
146 | 175 | . | ||
147 | 176 | This network will be used for admin endpoints. | ||
148 | 177 | os-internal-network: | ||
149 | 178 | type: string | ||
150 | 179 | description: | | ||
151 | 180 | The IP address and netmask of the OpenStack Internal network (e.g., | ||
152 | 181 | 192.168.0.0/24) | ||
153 | 182 | . | ||
154 | 183 | This network will be used for internal endpoints. | ||
155 | 184 | os-public-network: | ||
156 | 185 | type: string | ||
157 | 186 | description: | | ||
158 | 187 | The IP address and netmask of the OpenStack Public network (e.g., | ||
159 | 188 | 192.168.0.0/24) | ||
160 | 189 | . | ||
161 | 190 | This network will be used for public endpoints. | ||
162 | 191 | service-guard: | ||
163 | 192 | type: boolean | ||
164 | 193 | default: false | ||
165 | 194 | description: | | ||
166 | 195 | Ensure required relations are made and complete before allowing services | ||
167 | 196 | to be started | ||
168 | 197 | . | ||
169 | 198 | By default, services may be up and accepting API request from install | ||
170 | 199 | onwards. | ||
171 | 200 | . | ||
172 | 201 | Enabling this flag ensures that services will not be started until the | ||
173 | 202 | minimum 'core relations' have been made between this charm and other | ||
174 | 203 | charms. | ||
175 | 204 | . | ||
176 | 205 | For this charm the following relations must be made: | ||
177 | 206 | . | ||
178 | 207 | * shared-db or (pgsql-nova-db, pgsql-neutron-db) | ||
179 | 208 | * amqp | ||
180 | 209 | * identity-service | ||
181 | 210 | >>>>>>> MERGE-SOURCE | ||
182 | 168 | 211 | ||
183 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
184 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-02-17 12:10:27 +0000 | |||
185 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-29 13:07:23 +0000 | |||
186 | @@ -146,12 +146,12 @@ | |||
187 | 146 | Obtains all relevant configuration from charm configuration required | 146 | Obtains all relevant configuration from charm configuration required |
188 | 147 | for initiating a relation to hacluster: | 147 | for initiating a relation to hacluster: |
189 | 148 | 148 | ||
191 | 149 | ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr | 149 | ha-bindiface, ha-mcastport, vip |
192 | 150 | 150 | ||
193 | 151 | returns: dict: A dict containing settings keyed by setting name. | 151 | returns: dict: A dict containing settings keyed by setting name. |
194 | 152 | raises: HAIncompleteConfig if settings are missing. | 152 | raises: HAIncompleteConfig if settings are missing. |
195 | 153 | ''' | 153 | ''' |
197 | 154 | settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] | 154 | settings = ['ha-bindiface', 'ha-mcastport', 'vip'] |
198 | 155 | conf = {} | 155 | conf = {} |
199 | 156 | for setting in settings: | 156 | for setting in settings: |
200 | 157 | conf[setting] = config_get(setting) | 157 | conf[setting] = config_get(setting) |
201 | @@ -170,6 +170,7 @@ | |||
202 | 170 | 170 | ||
203 | 171 | :configs : OSTemplateRenderer: A config tempating object to inspect for | 171 | :configs : OSTemplateRenderer: A config tempating object to inspect for |
204 | 172 | a complete https context. | 172 | a complete https context. |
205 | 173 | |||
206 | 173 | :vip_setting: str: Setting in charm config that specifies | 174 | :vip_setting: str: Setting in charm config that specifies |
207 | 174 | VIP address. | 175 | VIP address. |
208 | 175 | ''' | 176 | ''' |
209 | 176 | 177 | ||
210 | === added directory 'hooks/charmhelpers/contrib/network' | |||
211 | === added file 'hooks/charmhelpers/contrib/network/__init__.py' | |||
212 | === added file 'hooks/charmhelpers/contrib/network/ip.py' | |||
213 | --- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000 | |||
214 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-07-29 13:07:23 +0000 | |||
215 | @@ -0,0 +1,156 @@ | |||
216 | 1 | import sys | ||
217 | 2 | |||
218 | 3 | from functools import partial | ||
219 | 4 | |||
220 | 5 | from charmhelpers.fetch import apt_install | ||
221 | 6 | from charmhelpers.core.hookenv import ( | ||
222 | 7 | ERROR, log, | ||
223 | 8 | ) | ||
224 | 9 | |||
225 | 10 | try: | ||
226 | 11 | import netifaces | ||
227 | 12 | except ImportError: | ||
228 | 13 | apt_install('python-netifaces') | ||
229 | 14 | import netifaces | ||
230 | 15 | |||
231 | 16 | try: | ||
232 | 17 | import netaddr | ||
233 | 18 | except ImportError: | ||
234 | 19 | apt_install('python-netaddr') | ||
235 | 20 | import netaddr | ||
236 | 21 | |||
237 | 22 | |||
238 | 23 | def _validate_cidr(network): | ||
239 | 24 | try: | ||
240 | 25 | netaddr.IPNetwork(network) | ||
241 | 26 | except (netaddr.core.AddrFormatError, ValueError): | ||
242 | 27 | raise ValueError("Network (%s) is not in CIDR presentation format" % | ||
243 | 28 | network) | ||
244 | 29 | |||
245 | 30 | |||
246 | 31 | def get_address_in_network(network, fallback=None, fatal=False): | ||
247 | 32 | """ | ||
248 | 33 | Get an IPv4 or IPv6 address within the network from the host. | ||
249 | 34 | |||
250 | 35 | :param network (str): CIDR presentation format. For example, | ||
251 | 36 | '192.168.1.0/24'. | ||
252 | 37 | :param fallback (str): If no address is found, return fallback. | ||
253 | 38 | :param fatal (boolean): If no address is found, fallback is not | ||
254 | 39 | set and fatal is True then exit(1). | ||
255 | 40 | |||
256 | 41 | """ | ||
257 | 42 | |||
258 | 43 | def not_found_error_out(): | ||
259 | 44 | log("No IP address found in network: %s" % network, | ||
260 | 45 | level=ERROR) | ||
261 | 46 | sys.exit(1) | ||
262 | 47 | |||
263 | 48 | if network is None: | ||
264 | 49 | if fallback is not None: | ||
265 | 50 | return fallback | ||
266 | 51 | else: | ||
267 | 52 | if fatal: | ||
268 | 53 | not_found_error_out() | ||
269 | 54 | |||
270 | 55 | _validate_cidr(network) | ||
271 | 56 | network = netaddr.IPNetwork(network) | ||
272 | 57 | for iface in netifaces.interfaces(): | ||
273 | 58 | addresses = netifaces.ifaddresses(iface) | ||
274 | 59 | if network.version == 4 and netifaces.AF_INET in addresses: | ||
275 | 60 | addr = addresses[netifaces.AF_INET][0]['addr'] | ||
276 | 61 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | ||
277 | 62 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | ||
278 | 63 | if cidr in network: | ||
279 | 64 | return str(cidr.ip) | ||
280 | 65 | if network.version == 6 and netifaces.AF_INET6 in addresses: | ||
281 | 66 | for addr in addresses[netifaces.AF_INET6]: | ||
282 | 67 | if not addr['addr'].startswith('fe80'): | ||
283 | 68 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | ||
284 | 69 | addr['netmask'])) | ||
285 | 70 | if cidr in network: | ||
286 | 71 | return str(cidr.ip) | ||
287 | 72 | |||
288 | 73 | if fallback is not None: | ||
289 | 74 | return fallback | ||
290 | 75 | |||
291 | 76 | if fatal: | ||
292 | 77 | not_found_error_out() | ||
293 | 78 | |||
294 | 79 | return None | ||
295 | 80 | |||
296 | 81 | |||
297 | 82 | def is_ipv6(address): | ||
298 | 83 | '''Determine whether provided address is IPv6 or not''' | ||
299 | 84 | try: | ||
300 | 85 | address = netaddr.IPAddress(address) | ||
301 | 86 | except netaddr.AddrFormatError: | ||
302 | 87 | # probably a hostname - so not an address at all! | ||
303 | 88 | return False | ||
304 | 89 | else: | ||
305 | 90 | return address.version == 6 | ||
306 | 91 | |||
307 | 92 | |||
308 | 93 | def is_address_in_network(network, address): | ||
309 | 94 | """ | ||
310 | 95 | Determine whether the provided address is within a network range. | ||
311 | 96 | |||
312 | 97 | :param network (str): CIDR presentation format. For example, | ||
313 | 98 | '192.168.1.0/24'. | ||
314 | 99 | :param address: An individual IPv4 or IPv6 address without a net | ||
315 | 100 | mask or subnet prefix. For example, '192.168.1.1'. | ||
316 | 101 | :returns boolean: Flag indicating whether address is in network. | ||
317 | 102 | """ | ||
318 | 103 | try: | ||
319 | 104 | network = netaddr.IPNetwork(network) | ||
320 | 105 | except (netaddr.core.AddrFormatError, ValueError): | ||
321 | 106 | raise ValueError("Network (%s) is not in CIDR presentation format" % | ||
322 | 107 | network) | ||
323 | 108 | try: | ||
324 | 109 | address = netaddr.IPAddress(address) | ||
325 | 110 | except (netaddr.core.AddrFormatError, ValueError): | ||
326 | 111 | raise ValueError("Address (%s) is not in correct presentation format" % | ||
327 | 112 | address) | ||
328 | 113 | if address in network: | ||
329 | 114 | return True | ||
330 | 115 | else: | ||
331 | 116 | return False | ||
332 | 117 | |||
333 | 118 | |||
334 | 119 | def _get_for_address(address, key): | ||
335 | 120 | """Retrieve an attribute of or the physical interface that | ||
336 | 121 | the IP address provided could be bound to. | ||
337 | 122 | |||
338 | 123 | :param address (str): An individual IPv4 or IPv6 address without a net | ||
339 | 124 | mask or subnet prefix. For example, '192.168.1.1'. | ||
340 | 125 | :param key: 'iface' for the physical interface name or an attribute | ||
341 | 126 | of the configured interface, for example 'netmask'. | ||
342 | 127 | :returns str: Requested attribute or None if address is not bindable. | ||
343 | 128 | """ | ||
344 | 129 | address = netaddr.IPAddress(address) | ||
345 | 130 | for iface in netifaces.interfaces(): | ||
346 | 131 | addresses = netifaces.ifaddresses(iface) | ||
347 | 132 | if address.version == 4 and netifaces.AF_INET in addresses: | ||
348 | 133 | addr = addresses[netifaces.AF_INET][0]['addr'] | ||
349 | 134 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | ||
350 | 135 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | ||
351 | 136 | if address in cidr: | ||
352 | 137 | if key == 'iface': | ||
353 | 138 | return iface | ||
354 | 139 | else: | ||
355 | 140 | return addresses[netifaces.AF_INET][0][key] | ||
356 | 141 | if address.version == 6 and netifaces.AF_INET6 in addresses: | ||
357 | 142 | for addr in addresses[netifaces.AF_INET6]: | ||
358 | 143 | if not addr['addr'].startswith('fe80'): | ||
359 | 144 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | ||
360 | 145 | addr['netmask'])) | ||
361 | 146 | if address in cidr: | ||
362 | 147 | if key == 'iface': | ||
363 | 148 | return iface | ||
364 | 149 | else: | ||
365 | 150 | return addr[key] | ||
366 | 151 | return None | ||
367 | 152 | |||
368 | 153 | |||
369 | 154 | get_iface_for_address = partial(_get_for_address, key='iface') | ||
370 | 155 | |||
371 | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') | ||
372 | 0 | 157 | ||
373 | === added directory 'hooks/charmhelpers/contrib/openstack/amulet' | |||
374 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
375 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
376 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
377 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-29 13:07:23 +0000 | |||
378 | @@ -0,0 +1,55 @@ | |||
379 | 1 | from charmhelpers.contrib.amulet.deployment import ( | ||
380 | 2 | AmuletDeployment | ||
381 | 3 | ) | ||
382 | 4 | |||
383 | 5 | |||
384 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
385 | 7 | """This class inherits from AmuletDeployment and has additional support | ||
386 | 8 | that is specifically for use by OpenStack charms.""" | ||
387 | 9 | |||
388 | 10 | def __init__(self, series=None, openstack=None, source=None): | ||
389 | 11 | """Initialize the deployment environment.""" | ||
390 | 12 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
391 | 13 | self.openstack = openstack | ||
392 | 14 | self.source = source | ||
393 | 15 | |||
394 | 16 | def _add_services(self, this_service, other_services): | ||
395 | 17 | """Add services to the deployment and set openstack-origin.""" | ||
396 | 18 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
397 | 19 | other_services) | ||
398 | 20 | name = 0 | ||
399 | 21 | services = other_services | ||
400 | 22 | services.append(this_service) | ||
401 | 23 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
402 | 24 | |||
403 | 25 | if self.openstack: | ||
404 | 26 | for svc in services: | ||
405 | 27 | if svc[name] not in use_source: | ||
406 | 28 | config = {'openstack-origin': self.openstack} | ||
407 | 29 | self.d.configure(svc[name], config) | ||
408 | 30 | |||
409 | 31 | if self.source: | ||
410 | 32 | for svc in services: | ||
411 | 33 | if svc[name] in use_source: | ||
412 | 34 | config = {'source': self.source} | ||
413 | 35 | self.d.configure(svc[name], config) | ||
414 | 36 | |||
415 | 37 | def _configure_services(self, configs): | ||
416 | 38 | """Configure all of the services.""" | ||
417 | 39 | for service, config in configs.iteritems(): | ||
418 | 40 | self.d.configure(service, config) | ||
419 | 41 | |||
420 | 42 | def _get_openstack_release(self): | ||
421 | 43 | """Return an integer representing the enum value of the openstack | ||
422 | 44 | release.""" | ||
423 | 45 | self.precise_essex, self.precise_folsom, self.precise_grizzly, \ | ||
424 | 46 | self.precise_havana, self.precise_icehouse, \ | ||
425 | 47 | self.trusty_icehouse = range(6) | ||
426 | 48 | releases = { | ||
427 | 49 | ('precise', None): self.precise_essex, | ||
428 | 50 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
429 | 51 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
430 | 52 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
431 | 53 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
432 | 54 | ('trusty', None): self.trusty_icehouse} | ||
433 | 55 | return releases[(self.series, self.openstack)] | ||
434 | 0 | 56 | ||
435 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
436 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
437 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-29 13:07:23 +0000 | |||
438 | @@ -0,0 +1,209 @@ | |||
439 | 1 | import logging | ||
440 | 2 | import os | ||
441 | 3 | import time | ||
442 | 4 | import urllib | ||
443 | 5 | |||
444 | 6 | import glanceclient.v1.client as glance_client | ||
445 | 7 | import keystoneclient.v2_0 as keystone_client | ||
446 | 8 | import novaclient.v1_1.client as nova_client | ||
447 | 9 | |||
448 | 10 | from charmhelpers.contrib.amulet.utils import ( | ||
449 | 11 | AmuletUtils | ||
450 | 12 | ) | ||
451 | 13 | |||
452 | 14 | DEBUG = logging.DEBUG | ||
453 | 15 | ERROR = logging.ERROR | ||
454 | 16 | |||
455 | 17 | |||
456 | 18 | class OpenStackAmuletUtils(AmuletUtils): | ||
457 | 19 | """This class inherits from AmuletUtils and has additional support | ||
458 | 20 | that is specifically for use by OpenStack charms.""" | ||
459 | 21 | |||
460 | 22 | def __init__(self, log_level=ERROR): | ||
461 | 23 | """Initialize the deployment environment.""" | ||
462 | 24 | super(OpenStackAmuletUtils, self).__init__(log_level) | ||
463 | 25 | |||
464 | 26 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | ||
465 | 27 | public_port, expected): | ||
466 | 28 | """Validate actual endpoint data vs expected endpoint data. The ports | ||
467 | 29 | are used to find the matching endpoint.""" | ||
468 | 30 | found = False | ||
469 | 31 | for ep in endpoints: | ||
470 | 32 | self.log.debug('endpoint: {}'.format(repr(ep))) | ||
471 | 33 | if admin_port in ep.adminurl and internal_port in ep.internalurl \ | ||
472 | 34 | and public_port in ep.publicurl: | ||
473 | 35 | found = True | ||
474 | 36 | actual = {'id': ep.id, | ||
475 | 37 | 'region': ep.region, | ||
476 | 38 | 'adminurl': ep.adminurl, | ||
477 | 39 | 'internalurl': ep.internalurl, | ||
478 | 40 | 'publicurl': ep.publicurl, | ||
479 | 41 | 'service_id': ep.service_id} | ||
480 | 42 | ret = self._validate_dict_data(expected, actual) | ||
481 | 43 | if ret: | ||
482 | 44 | return 'unexpected endpoint data - {}'.format(ret) | ||
483 | 45 | |||
484 | 46 | if not found: | ||
485 | 47 | return 'endpoint not found' | ||
486 | 48 | |||
487 | 49 | def validate_svc_catalog_endpoint_data(self, expected, actual): | ||
488 | 50 | """Validate a list of actual service catalog endpoints vs a list of | ||
489 | 51 | expected service catalog endpoints.""" | ||
490 | 52 | self.log.debug('actual: {}'.format(repr(actual))) | ||
491 | 53 | for k, v in expected.iteritems(): | ||
492 | 54 | if k in actual: | ||
493 | 55 | ret = self._validate_dict_data(expected[k][0], actual[k][0]) | ||
494 | 56 | if ret: | ||
495 | 57 | return self.endpoint_error(k, ret) | ||
496 | 58 | else: | ||
497 | 59 | return "endpoint {} does not exist".format(k) | ||
498 | 60 | return ret | ||
499 | 61 | |||
500 | 62 | def validate_tenant_data(self, expected, actual): | ||
501 | 63 | """Validate a list of actual tenant data vs list of expected tenant | ||
502 | 64 | data.""" | ||
503 | 65 | self.log.debug('actual: {}'.format(repr(actual))) | ||
504 | 66 | for e in expected: | ||
505 | 67 | found = False | ||
506 | 68 | for act in actual: | ||
507 | 69 | a = {'enabled': act.enabled, 'description': act.description, | ||
508 | 70 | 'name': act.name, 'id': act.id} | ||
509 | 71 | if e['name'] == a['name']: | ||
510 | 72 | found = True | ||
511 | 73 | ret = self._validate_dict_data(e, a) | ||
512 | 74 | if ret: | ||
513 | 75 | return "unexpected tenant data - {}".format(ret) | ||
514 | 76 | if not found: | ||
515 | 77 | return "tenant {} does not exist".format(e['name']) | ||
516 | 78 | return ret | ||
517 | 79 | |||
518 | 80 | def validate_role_data(self, expected, actual): | ||
519 | 81 | """Validate a list of actual role data vs a list of expected role | ||
520 | 82 | data.""" | ||
521 | 83 | self.log.debug('actual: {}'.format(repr(actual))) | ||
522 | 84 | for e in expected: | ||
523 | 85 | found = False | ||
524 | 86 | for act in actual: | ||
525 | 87 | a = {'name': act.name, 'id': act.id} | ||
526 | 88 | if e['name'] == a['name']: | ||
527 | 89 | found = True | ||
528 | 90 | ret = self._validate_dict_data(e, a) | ||
529 | 91 | if ret: | ||
530 | 92 | return "unexpected role data - {}".format(ret) | ||
531 | 93 | if not found: | ||
532 | 94 | return "role {} does not exist".format(e['name']) | ||
533 | 95 | return ret | ||
534 | 96 | |||
535 | 97 | def validate_user_data(self, expected, actual): | ||
536 | 98 | """Validate a list of actual user data vs a list of expected user | ||
537 | 99 | data.""" | ||
538 | 100 | self.log.debug('actual: {}'.format(repr(actual))) | ||
539 | 101 | for e in expected: | ||
540 | 102 | found = False | ||
541 | 103 | for act in actual: | ||
542 | 104 | a = {'enabled': act.enabled, 'name': act.name, | ||
543 | 105 | 'email': act.email, 'tenantId': act.tenantId, | ||
544 | 106 | 'id': act.id} | ||
545 | 107 | if e['name'] == a['name']: | ||
546 | 108 | found = True | ||
547 | 109 | ret = self._validate_dict_data(e, a) | ||
548 | 110 | if ret: | ||
549 | 111 | return "unexpected user data - {}".format(ret) | ||
550 | 112 | if not found: | ||
551 | 113 | return "user {} does not exist".format(e['name']) | ||
552 | 114 | return ret | ||
553 | 115 | |||
554 | 116 | def validate_flavor_data(self, expected, actual): | ||
555 | 117 | """Validate a list of actual flavors vs a list of expected flavors.""" | ||
556 | 118 | self.log.debug('actual: {}'.format(repr(actual))) | ||
557 | 119 | act = [a.name for a in actual] | ||
558 | 120 | return self._validate_list_data(expected, act) | ||
559 | 121 | |||
560 | 122 | def tenant_exists(self, keystone, tenant): | ||
561 | 123 | """Return True if tenant exists""" | ||
562 | 124 | return tenant in [t.name for t in keystone.tenants.list()] | ||
563 | 125 | |||
564 | 126 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | ||
565 | 127 | tenant): | ||
566 | 128 | """Authenticates admin user with the keystone admin endpoint.""" | ||
567 | 129 | service_ip = \ | ||
568 | 130 | keystone_sentry.relation('shared-db', | ||
569 | 131 | 'mysql:shared-db')['private-address'] | ||
570 | 132 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | ||
571 | 133 | return keystone_client.Client(username=user, password=password, | ||
572 | 134 | tenant_name=tenant, auth_url=ep) | ||
573 | 135 | |||
574 | 136 | def authenticate_keystone_user(self, keystone, user, password, tenant): | ||
575 | 137 | """Authenticates a regular user with the keystone public endpoint.""" | ||
576 | 138 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
577 | 139 | endpoint_type='publicURL') | ||
578 | 140 | return keystone_client.Client(username=user, password=password, | ||
579 | 141 | tenant_name=tenant, auth_url=ep) | ||
580 | 142 | |||
581 | 143 | def authenticate_glance_admin(self, keystone): | ||
582 | 144 | """Authenticates admin user with glance.""" | ||
583 | 145 | ep = keystone.service_catalog.url_for(service_type='image', | ||
584 | 146 | endpoint_type='adminURL') | ||
585 | 147 | return glance_client.Client(ep, token=keystone.auth_token) | ||
586 | 148 | |||
587 | 149 | def authenticate_nova_user(self, keystone, user, password, tenant): | ||
588 | 150 | """Authenticates a regular user with nova-api.""" | ||
589 | 151 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
590 | 152 | endpoint_type='publicURL') | ||
591 | 153 | return nova_client.Client(username=user, api_key=password, | ||
592 | 154 | project_id=tenant, auth_url=ep) | ||
593 | 155 | |||
594 | 156 | def create_cirros_image(self, glance, image_name): | ||
595 | 157 | """Download the latest cirros image and upload it to glance.""" | ||
596 | 158 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
597 | 159 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
598 | 160 | if http_proxy: | ||
599 | 161 | proxies = {'http': http_proxy} | ||
600 | 162 | opener = urllib.FancyURLopener(proxies) | ||
601 | 163 | else: | ||
602 | 164 | opener = urllib.FancyURLopener() | ||
603 | 165 | |||
604 | 166 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
605 | 167 | version = f.read().strip() | ||
606 | 168 | cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) | ||
607 | 169 | |||
608 | 170 | if not os.path.exists(cirros_img): | ||
609 | 171 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
610 | 172 | version, cirros_img) | ||
611 | 173 | opener.retrieve(cirros_url, cirros_img) | ||
612 | 174 | f.close() | ||
613 | 175 | |||
614 | 176 | with open(cirros_img) as f: | ||
615 | 177 | image = glance.images.create(name=image_name, is_public=True, | ||
616 | 178 | disk_format='qcow2', | ||
617 | 179 | container_format='bare', data=f) | ||
618 | 180 | return image | ||
619 | 181 | |||
620 | 182 | def delete_image(self, glance, image): | ||
621 | 183 | """Delete the specified image.""" | ||
622 | 184 | glance.images.delete(image) | ||
623 | 185 | |||
624 | 186 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
625 | 187 | """Create the specified instance.""" | ||
626 | 188 | image = nova.images.find(name=image_name) | ||
627 | 189 | flavor = nova.flavors.find(name=flavor) | ||
628 | 190 | instance = nova.servers.create(name=instance_name, image=image, | ||
629 | 191 | flavor=flavor) | ||
630 | 192 | |||
631 | 193 | count = 1 | ||
632 | 194 | status = instance.status | ||
633 | 195 | while status != 'ACTIVE' and count < 60: | ||
634 | 196 | time.sleep(3) | ||
635 | 197 | instance = nova.servers.get(instance.id) | ||
636 | 198 | status = instance.status | ||
637 | 199 | self.log.debug('instance status: {}'.format(status)) | ||
638 | 200 | count += 1 | ||
639 | 201 | |||
640 | 202 | if status == 'BUILD': | ||
641 | 203 | return None | ||
642 | 204 | |||
643 | 205 | return instance | ||
644 | 206 | |||
645 | 207 | def delete_instance(self, nova, instance): | ||
646 | 208 | """Delete the specified instance.""" | ||
647 | 209 | nova.servers.delete(instance) | ||
648 | 0 | 210 | ||
649 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
650 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-05-19 11:38:09 +0000 | |||
651 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-29 13:07:23 +0000 | |||
652 | @@ -21,9 +21,11 @@ | |||
653 | 21 | relation_get, | 21 | relation_get, |
654 | 22 | relation_ids, | 22 | relation_ids, |
655 | 23 | related_units, | 23 | related_units, |
656 | 24 | relation_set, | ||
657 | 24 | unit_get, | 25 | unit_get, |
658 | 25 | unit_private_ip, | 26 | unit_private_ip, |
659 | 26 | ERROR, | 27 | ERROR, |
660 | 28 | INFO | ||
661 | 27 | ) | 29 | ) |
662 | 28 | 30 | ||
663 | 29 | from charmhelpers.contrib.hahelpers.cluster import ( | 31 | from charmhelpers.contrib.hahelpers.cluster import ( |
664 | @@ -42,6 +44,8 @@ | |||
665 | 42 | neutron_plugin_attribute, | 44 | neutron_plugin_attribute, |
666 | 43 | ) | 45 | ) |
667 | 44 | 46 | ||
668 | 47 | from charmhelpers.contrib.network.ip import get_address_in_network | ||
669 | 48 | |||
670 | 45 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 49 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
671 | 46 | 50 | ||
672 | 47 | 51 | ||
673 | @@ -134,8 +138,26 @@ | |||
674 | 134 | 'Missing required charm config options. ' | 138 | 'Missing required charm config options. ' |
675 | 135 | '(database name and user)') | 139 | '(database name and user)') |
676 | 136 | raise OSContextError | 140 | raise OSContextError |
677 | 141 | |||
678 | 137 | ctxt = {} | 142 | ctxt = {} |
679 | 138 | 143 | ||
680 | 144 | # NOTE(jamespage) if mysql charm provides a network upon which | ||
681 | 145 | # access to the database should be made, reconfigure relation | ||
682 | 146 | # with the service units local address and defer execution | ||
683 | 147 | access_network = relation_get('access-network') | ||
684 | 148 | if access_network is not None: | ||
685 | 149 | if self.relation_prefix is not None: | ||
686 | 150 | hostname_key = "{}_hostname".format(self.relation_prefix) | ||
687 | 151 | else: | ||
688 | 152 | hostname_key = "hostname" | ||
689 | 153 | access_hostname = get_address_in_network(access_network, | ||
690 | 154 | unit_get('private-address')) | ||
691 | 155 | set_hostname = relation_get(attribute=hostname_key, | ||
692 | 156 | unit=local_unit()) | ||
693 | 157 | if set_hostname != access_hostname: | ||
694 | 158 | relation_set(relation_settings={hostname_key: access_hostname}) | ||
695 | 159 | return ctxt # Defer any further hook execution for now.... | ||
696 | 160 | |||
697 | 139 | password_setting = 'password' | 161 | password_setting = 'password' |
698 | 140 | if self.relation_prefix: | 162 | if self.relation_prefix: |
699 | 141 | password_setting = self.relation_prefix + '_password' | 163 | password_setting = self.relation_prefix + '_password' |
700 | @@ -243,23 +265,31 @@ | |||
701 | 243 | 265 | ||
702 | 244 | 266 | ||
703 | 245 | class AMQPContext(OSContextGenerator): | 267 | class AMQPContext(OSContextGenerator): |
704 | 246 | interfaces = ['amqp'] | ||
705 | 247 | 268 | ||
707 | 248 | def __init__(self, ssl_dir=None): | 269 | def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): |
708 | 249 | self.ssl_dir = ssl_dir | 270 | self.ssl_dir = ssl_dir |
709 | 271 | self.rel_name = rel_name | ||
710 | 272 | self.relation_prefix = relation_prefix | ||
711 | 273 | self.interfaces = [rel_name] | ||
712 | 250 | 274 | ||
713 | 251 | def __call__(self): | 275 | def __call__(self): |
714 | 252 | log('Generating template context for amqp') | 276 | log('Generating template context for amqp') |
715 | 253 | conf = config() | 277 | conf = config() |
716 | 278 | user_setting = 'rabbit-user' | ||
717 | 279 | vhost_setting = 'rabbit-vhost' | ||
718 | 280 | if self.relation_prefix: | ||
719 | 281 | user_setting = self.relation_prefix + '-rabbit-user' | ||
720 | 282 | vhost_setting = self.relation_prefix + '-rabbit-vhost' | ||
721 | 283 | |||
722 | 254 | try: | 284 | try: |
725 | 255 | username = conf['rabbit-user'] | 285 | username = conf[user_setting] |
726 | 256 | vhost = conf['rabbit-vhost'] | 286 | vhost = conf[vhost_setting] |
727 | 257 | except KeyError as e: | 287 | except KeyError as e: |
728 | 258 | log('Could not generate shared_db context. ' | 288 | log('Could not generate shared_db context. ' |
729 | 259 | 'Missing required charm config options: %s.' % e) | 289 | 'Missing required charm config options: %s.' % e) |
730 | 260 | raise OSContextError | 290 | raise OSContextError |
731 | 261 | ctxt = {} | 291 | ctxt = {} |
733 | 262 | for rid in relation_ids('amqp'): | 292 | for rid in relation_ids(self.rel_name): |
734 | 263 | ha_vip_only = False | 293 | ha_vip_only = False |
735 | 264 | for unit in related_units(rid): | 294 | for unit in related_units(rid): |
736 | 265 | if relation_get('clustered', rid=rid, unit=unit): | 295 | if relation_get('clustered', rid=rid, unit=unit): |
737 | @@ -332,10 +362,12 @@ | |||
738 | 332 | use_syslog = str(config('use-syslog')).lower() | 362 | use_syslog = str(config('use-syslog')).lower() |
739 | 333 | for rid in relation_ids('ceph'): | 363 | for rid in relation_ids('ceph'): |
740 | 334 | for unit in related_units(rid): | 364 | for unit in related_units(rid): |
741 | 335 | mon_hosts.append(relation_get('private-address', rid=rid, | ||
742 | 336 | unit=unit)) | ||
743 | 337 | auth = relation_get('auth', rid=rid, unit=unit) | 365 | auth = relation_get('auth', rid=rid, unit=unit) |
744 | 338 | key = relation_get('key', rid=rid, unit=unit) | 366 | key = relation_get('key', rid=rid, unit=unit) |
745 | 367 | ceph_addr = \ | ||
746 | 368 | relation_get('ceph-public-address', rid=rid, unit=unit) or \ | ||
747 | 369 | relation_get('private-address', rid=rid, unit=unit) | ||
748 | 370 | mon_hosts.append(ceph_addr) | ||
749 | 339 | 371 | ||
750 | 340 | ctxt = { | 372 | ctxt = { |
751 | 341 | 'mon_hosts': ' '.join(mon_hosts), | 373 | 'mon_hosts': ' '.join(mon_hosts), |
752 | @@ -369,7 +401,9 @@ | |||
753 | 369 | 401 | ||
754 | 370 | cluster_hosts = {} | 402 | cluster_hosts = {} |
755 | 371 | l_unit = local_unit().replace('/', '-') | 403 | l_unit = local_unit().replace('/', '-') |
757 | 372 | cluster_hosts[l_unit] = unit_get('private-address') | 404 | cluster_hosts[l_unit] = \ |
758 | 405 | get_address_in_network(config('os-internal-network'), | ||
759 | 406 | unit_get('private-address')) | ||
760 | 373 | 407 | ||
761 | 374 | for rid in relation_ids('cluster'): | 408 | for rid in relation_ids('cluster'): |
762 | 375 | for unit in related_units(rid): | 409 | for unit in related_units(rid): |
763 | @@ -418,12 +452,13 @@ | |||
764 | 418 | """ | 452 | """ |
765 | 419 | Generates a context for an apache vhost configuration that configures | 453 | Generates a context for an apache vhost configuration that configures |
766 | 420 | HTTPS reverse proxying for one or many endpoints. Generated context | 454 | HTTPS reverse proxying for one or many endpoints. Generated context |
773 | 421 | looks something like: | 455 | looks something like:: |
774 | 422 | { | 456 | |
775 | 423 | 'namespace': 'cinder', | 457 | { |
776 | 424 | 'private_address': 'iscsi.mycinderhost.com', | 458 | 'namespace': 'cinder', |
777 | 425 | 'endpoints': [(8776, 8766), (8777, 8767)] | 459 | 'private_address': 'iscsi.mycinderhost.com', |
778 | 426 | } | 460 | 'endpoints': [(8776, 8766), (8777, 8767)] |
779 | 461 | } | ||
780 | 427 | 462 | ||
781 | 428 | The endpoints list consists of a tuples mapping external ports | 463 | The endpoints list consists of a tuples mapping external ports |
782 | 429 | to internal ports. | 464 | to internal ports. |
783 | @@ -541,6 +576,26 @@ | |||
784 | 541 | 576 | ||
785 | 542 | return nvp_ctxt | 577 | return nvp_ctxt |
786 | 543 | 578 | ||
787 | 579 | def n1kv_ctxt(self): | ||
788 | 580 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
789 | 581 | self.network_manager) | ||
790 | 582 | n1kv_config = neutron_plugin_attribute(self.plugin, 'config', | ||
791 | 583 | self.network_manager) | ||
792 | 584 | n1kv_ctxt = { | ||
793 | 585 | 'core_plugin': driver, | ||
794 | 586 | 'neutron_plugin': 'n1kv', | ||
795 | 587 | 'neutron_security_groups': self.neutron_security_groups, | ||
796 | 588 | 'local_ip': unit_private_ip(), | ||
797 | 589 | 'config': n1kv_config, | ||
798 | 590 | 'vsm_ip': config('n1kv-vsm-ip'), | ||
799 | 591 | 'vsm_username': config('n1kv-vsm-username'), | ||
800 | 592 | 'vsm_password': config('n1kv-vsm-password'), | ||
801 | 593 | 'restrict_policy_profiles': config( | ||
802 | 594 | 'n1kv_restrict_policy_profiles'), | ||
803 | 595 | } | ||
804 | 596 | |||
805 | 597 | return n1kv_ctxt | ||
806 | 598 | |||
807 | 544 | def neutron_ctxt(self): | 599 | def neutron_ctxt(self): |
808 | 545 | if https(): | 600 | if https(): |
809 | 546 | proto = 'https' | 601 | proto = 'https' |
810 | @@ -572,6 +627,8 @@ | |||
811 | 572 | ctxt.update(self.ovs_ctxt()) | 627 | ctxt.update(self.ovs_ctxt()) |
812 | 573 | elif self.plugin in ['nvp', 'nsx']: | 628 | elif self.plugin in ['nvp', 'nsx']: |
813 | 574 | ctxt.update(self.nvp_ctxt()) | 629 | ctxt.update(self.nvp_ctxt()) |
814 | 630 | elif self.plugin == 'n1kv': | ||
815 | 631 | ctxt.update(self.n1kv_ctxt()) | ||
816 | 575 | 632 | ||
817 | 576 | alchemy_flags = config('neutron-alchemy-flags') | 633 | alchemy_flags = config('neutron-alchemy-flags') |
818 | 577 | if alchemy_flags: | 634 | if alchemy_flags: |
819 | @@ -611,7 +668,7 @@ | |||
820 | 611 | The subordinate interface allows subordinates to export their | 668 | The subordinate interface allows subordinates to export their |
821 | 612 | configuration requirements to the principle for multiple config | 669 | configuration requirements to the principle for multiple config |
822 | 613 | files and multiple serivces. Ie, a subordinate that has interfaces | 670 | files and multiple serivces. Ie, a subordinate that has interfaces |
824 | 614 | to both glance and nova may export to following yaml blob as json: | 671 | to both glance and nova may export to following yaml blob as json:: |
825 | 615 | 672 | ||
826 | 616 | glance: | 673 | glance: |
827 | 617 | /etc/glance/glance-api.conf: | 674 | /etc/glance/glance-api.conf: |
828 | @@ -630,7 +687,8 @@ | |||
829 | 630 | 687 | ||
830 | 631 | It is then up to the principle charms to subscribe this context to | 688 | It is then up to the principle charms to subscribe this context to |
831 | 632 | the service+config file it is interestd in. Configuration data will | 689 | the service+config file it is interestd in. Configuration data will |
833 | 633 | be available in the template context, in glance's case, as: | 690 | be available in the template context, in glance's case, as:: |
834 | 691 | |||
835 | 634 | ctxt = { | 692 | ctxt = { |
836 | 635 | ... other context ... | 693 | ... other context ... |
837 | 636 | 'subordinate_config': { | 694 | 'subordinate_config': { |
838 | @@ -657,7 +715,7 @@ | |||
839 | 657 | self.interface = interface | 715 | self.interface = interface |
840 | 658 | 716 | ||
841 | 659 | def __call__(self): | 717 | def __call__(self): |
843 | 660 | ctxt = {} | 718 | ctxt = {'sections': {}} |
844 | 661 | for rid in relation_ids(self.interface): | 719 | for rid in relation_ids(self.interface): |
845 | 662 | for unit in related_units(rid): | 720 | for unit in related_units(rid): |
846 | 663 | sub_config = relation_get('subordinate_configuration', | 721 | sub_config = relation_get('subordinate_configuration', |
847 | @@ -683,11 +741,26 @@ | |||
848 | 683 | 741 | ||
849 | 684 | sub_config = sub_config[self.config_file] | 742 | sub_config = sub_config[self.config_file] |
850 | 685 | for k, v in sub_config.iteritems(): | 743 | for k, v in sub_config.iteritems(): |
856 | 686 | ctxt[k] = v | 744 | if k == 'sections': |
857 | 687 | 745 | for section, config_dict in v.iteritems(): | |
858 | 688 | if not ctxt: | 746 | log("adding section '%s'" % (section)) |
859 | 689 | ctxt['sections'] = {} | 747 | ctxt[k][section] = config_dict |
860 | 690 | 748 | else: | |
861 | 749 | ctxt[k] = v | ||
862 | 750 | |||
863 | 751 | log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) | ||
864 | 752 | |||
865 | 753 | return ctxt | ||
866 | 754 | |||
867 | 755 | |||
868 | 756 | class LogLevelContext(OSContextGenerator): | ||
869 | 757 | |||
870 | 758 | def __call__(self): | ||
871 | 759 | ctxt = {} | ||
872 | 760 | ctxt['debug'] = \ | ||
873 | 761 | False if config('debug') is None else config('debug') | ||
874 | 762 | ctxt['verbose'] = \ | ||
875 | 763 | False if config('verbose') is None else config('verbose') | ||
876 | 691 | return ctxt | 764 | return ctxt |
877 | 692 | 765 | ||
878 | 693 | 766 | ||
879 | 694 | 767 | ||
880 | === added file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
881 | --- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000 | |||
882 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-07-29 13:07:23 +0000 | |||
883 | @@ -0,0 +1,75 @@ | |||
884 | 1 | from charmhelpers.core.hookenv import ( | ||
885 | 2 | config, | ||
886 | 3 | unit_get, | ||
887 | 4 | ) | ||
888 | 5 | |||
889 | 6 | from charmhelpers.contrib.network.ip import ( | ||
890 | 7 | get_address_in_network, | ||
891 | 8 | is_address_in_network, | ||
892 | 9 | is_ipv6, | ||
893 | 10 | ) | ||
894 | 11 | |||
895 | 12 | from charmhelpers.contrib.hahelpers.cluster import is_clustered | ||
896 | 13 | |||
897 | 14 | PUBLIC = 'public' | ||
898 | 15 | INTERNAL = 'int' | ||
899 | 16 | ADMIN = 'admin' | ||
900 | 17 | |||
901 | 18 | _address_map = { | ||
902 | 19 | PUBLIC: { | ||
903 | 20 | 'config': 'os-public-network', | ||
904 | 21 | 'fallback': 'public-address' | ||
905 | 22 | }, | ||
906 | 23 | INTERNAL: { | ||
907 | 24 | 'config': 'os-internal-network', | ||
908 | 25 | 'fallback': 'private-address' | ||
909 | 26 | }, | ||
910 | 27 | ADMIN: { | ||
911 | 28 | 'config': 'os-admin-network', | ||
912 | 29 | 'fallback': 'private-address' | ||
913 | 30 | } | ||
914 | 31 | } | ||
915 | 32 | |||
916 | 33 | |||
917 | 34 | def canonical_url(configs, endpoint_type=PUBLIC): | ||
918 | 35 | ''' | ||
919 | 36 | Returns the correct HTTP URL to this host given the state of HTTPS | ||
920 | 37 | configuration, hacluster and charm configuration. | ||
921 | 38 | |||
922 | 39 | :configs OSTemplateRenderer: A config tempating object to inspect for | ||
923 | 40 | a complete https context. | ||
924 | 41 | :endpoint_type str: The endpoint type to resolve. | ||
925 | 42 | |||
926 | 43 | :returns str: Base URL for services on the current service unit. | ||
927 | 44 | ''' | ||
928 | 45 | scheme = 'http' | ||
929 | 46 | if 'https' in configs.complete_contexts(): | ||
930 | 47 | scheme = 'https' | ||
931 | 48 | address = resolve_address(endpoint_type) | ||
932 | 49 | if is_ipv6(address): | ||
933 | 50 | address = "[{}]".format(address) | ||
934 | 51 | return '%s://%s' % (scheme, address) | ||
935 | 52 | |||
936 | 53 | |||
937 | 54 | def resolve_address(endpoint_type=PUBLIC): | ||
938 | 55 | resolved_address = None | ||
939 | 56 | if is_clustered(): | ||
940 | 57 | if config(_address_map[endpoint_type]['config']) is None: | ||
941 | 58 | # Assume vip is simple and pass back directly | ||
942 | 59 | resolved_address = config('vip') | ||
943 | 60 | else: | ||
944 | 61 | for vip in config('vip').split(): | ||
945 | 62 | if is_address_in_network( | ||
946 | 63 | config(_address_map[endpoint_type]['config']), | ||
947 | 64 | vip): | ||
948 | 65 | resolved_address = vip | ||
949 | 66 | else: | ||
950 | 67 | resolved_address = get_address_in_network( | ||
951 | 68 | config(_address_map[endpoint_type]['config']), | ||
952 | 69 | unit_get(_address_map[endpoint_type]['fallback']) | ||
953 | 70 | ) | ||
954 | 71 | if resolved_address is None: | ||
955 | 72 | raise ValueError('Unable to resolve a suitable IP address' | ||
956 | 73 | ' based on charm state and configuration') | ||
957 | 74 | else: | ||
958 | 75 | return resolved_address | ||
959 | 0 | 76 | ||
960 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
961 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-19 11:38:09 +0000 | |||
962 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-07-29 13:07:23 +0000 | |||
963 | @@ -128,6 +128,20 @@ | |||
964 | 128 | 'server_packages': ['neutron-server', | 128 | 'server_packages': ['neutron-server', |
965 | 129 | 'neutron-plugin-vmware'], | 129 | 'neutron-plugin-vmware'], |
966 | 130 | 'server_services': ['neutron-server'] | 130 | 'server_services': ['neutron-server'] |
967 | 131 | }, | ||
968 | 132 | 'n1kv': { | ||
969 | 133 | 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', | ||
970 | 134 | 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', | ||
971 | 135 | 'contexts': [ | ||
972 | 136 | context.SharedDBContext(user=config('neutron-database-user'), | ||
973 | 137 | database=config('neutron-database'), | ||
974 | 138 | relation_prefix='neutron', | ||
975 | 139 | ssl_dir=NEUTRON_CONF_DIR)], | ||
976 | 140 | 'services': [], | ||
977 | 141 | 'packages': [['neutron-plugin-cisco']], | ||
978 | 142 | 'server_packages': ['neutron-server', | ||
979 | 143 | 'neutron-plugin-cisco'], | ||
980 | 144 | 'server_services': ['neutron-server'] | ||
981 | 131 | } | 145 | } |
982 | 132 | } | 146 | } |
983 | 133 | if release >= 'icehouse': | 147 | if release >= 'icehouse': |
984 | 134 | 148 | ||
985 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' | |||
986 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-02-27 09:26:38 +0000 | |||
987 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-29 13:07:23 +0000 | |||
988 | @@ -27,7 +27,12 @@ | |||
989 | 27 | 27 | ||
990 | 28 | {% if units -%} | 28 | {% if units -%} |
991 | 29 | {% for service, ports in service_ports.iteritems() -%} | 29 | {% for service, ports in service_ports.iteritems() -%} |
993 | 30 | listen {{ service }} 0.0.0.0:{{ ports[0] }} | 30 | listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }} |
994 | 31 | balance roundrobin | ||
995 | 32 | {% for unit, address in units.iteritems() -%} | ||
996 | 33 | server {{ unit }} {{ address }}:{{ ports[1] }} check | ||
997 | 34 | {% endfor %} | ||
998 | 35 | listen {{ service }}_ipv6 :::{{ ports[0] }} | ||
999 | 31 | balance roundrobin | 36 | balance roundrobin |
1000 | 32 | {% for unit, address in units.iteritems() -%} | 37 | {% for unit, address in units.iteritems() -%} |
1001 | 33 | server {{ unit }} {{ address }}:{{ ports[1] }} check | 38 | server {{ unit }} {{ address }}:{{ ports[1] }} check |
1002 | 34 | 39 | ||
1003 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
1004 | --- hooks/charmhelpers/contrib/openstack/templating.py 2014-02-24 19:31:57 +0000 | |||
1005 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2014-07-29 13:07:23 +0000 | |||
1006 | @@ -30,17 +30,17 @@ | |||
1007 | 30 | loading dir. | 30 | loading dir. |
1008 | 31 | 31 | ||
1009 | 32 | A charm may also ship a templates dir with this module | 32 | A charm may also ship a templates dir with this module |
1021 | 33 | and it will be appended to the bottom of the search list, eg: | 33 | and it will be appended to the bottom of the search list, eg:: |
1022 | 34 | hooks/charmhelpers/contrib/openstack/templates. | 34 | |
1023 | 35 | 35 | hooks/charmhelpers/contrib/openstack/templates | |
1024 | 36 | :param templates_dir: str: Base template directory containing release | 36 | |
1025 | 37 | sub-directories. | 37 | :param templates_dir (str): Base template directory containing release |
1026 | 38 | :param os_release : str: OpenStack release codename to construct template | 38 | sub-directories. |
1027 | 39 | loader. | 39 | :param os_release (str): OpenStack release codename to construct template |
1028 | 40 | 40 | loader. | |
1029 | 41 | :returns : jinja2.ChoiceLoader constructed with a list of | 41 | :returns: jinja2.ChoiceLoader constructed with a list of |
1030 | 42 | jinja2.FilesystemLoaders, ordered in descending | 42 | jinja2.FilesystemLoaders, ordered in descending |
1031 | 43 | order by OpenStack release. | 43 | order by OpenStack release. |
1032 | 44 | """ | 44 | """ |
1033 | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) |
1034 | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] |
1035 | @@ -111,7 +111,8 @@ | |||
1036 | 111 | and ease the burden of managing config templates across multiple OpenStack | 111 | and ease the burden of managing config templates across multiple OpenStack |
1037 | 112 | releases. | 112 | releases. |
1038 | 113 | 113 | ||
1040 | 114 | Basic usage: | 114 | Basic usage:: |
1041 | 115 | |||
1042 | 115 | # import some common context generates from charmhelpers | 116 | # import some common context generates from charmhelpers |
1043 | 116 | from charmhelpers.contrib.openstack import context | 117 | from charmhelpers.contrib.openstack import context |
1044 | 117 | 118 | ||
1045 | @@ -131,21 +132,19 @@ | |||
1046 | 131 | # write out all registered configs | 132 | # write out all registered configs |
1047 | 132 | configs.write_all() | 133 | configs.write_all() |
1048 | 133 | 134 | ||
1050 | 134 | Details: | 135 | **OpenStack Releases and template loading** |
1051 | 135 | 136 | ||
1052 | 136 | OpenStack Releases and template loading | ||
1053 | 137 | --------------------------------------- | ||
1054 | 138 | When the object is instantiated, it is associated with a specific OS | 137 | When the object is instantiated, it is associated with a specific OS |
1055 | 139 | release. This dictates how the template loader will be constructed. | 138 | release. This dictates how the template loader will be constructed. |
1056 | 140 | 139 | ||
1057 | 141 | The constructed loader attempts to load the template from several places | 140 | The constructed loader attempts to load the template from several places |
1058 | 142 | in the following order: | 141 | in the following order: |
1065 | 143 | - from the most recent OS release-specific template dir (if one exists) | 142 | - from the most recent OS release-specific template dir (if one exists) |
1066 | 144 | - the base templates_dir | 143 | - the base templates_dir |
1067 | 145 | - a template directory shipped in the charm with this helper file. | 144 | - a template directory shipped in the charm with this helper file. |
1068 | 146 | 145 | ||
1069 | 147 | 146 | For the example above, '/tmp/templates' contains the following structure:: | |
1070 | 148 | For the example above, '/tmp/templates' contains the following structure: | 147 | |
1071 | 149 | /tmp/templates/nova.conf | 148 | /tmp/templates/nova.conf |
1072 | 150 | /tmp/templates/api-paste.ini | 149 | /tmp/templates/api-paste.ini |
1073 | 151 | /tmp/templates/grizzly/api-paste.ini | 150 | /tmp/templates/grizzly/api-paste.ini |
1074 | @@ -169,8 +168,8 @@ | |||
1075 | 169 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows | 168 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows |
1076 | 170 | us to ship common templates (haproxy, apache) with the helpers. | 169 | us to ship common templates (haproxy, apache) with the helpers. |
1077 | 171 | 170 | ||
1080 | 172 | Context generators | 171 | **Context generators** |
1081 | 173 | --------------------------------------- | 172 | |
1082 | 174 | Context generators are used to generate template contexts during hook | 173 | Context generators are used to generate template contexts during hook |
1083 | 175 | execution. Doing so may require inspecting service relations, charm | 174 | execution. Doing so may require inspecting service relations, charm |
1084 | 176 | config, etc. When registered, a config file is associated with a list | 175 | config, etc. When registered, a config file is associated with a list |
1085 | 177 | 176 | ||
1086 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
1087 | --- hooks/charmhelpers/contrib/openstack/utils.py 2014-05-19 11:38:09 +0000 | |||
1088 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2014-07-29 13:07:23 +0000 | |||
1089 | @@ -3,7 +3,6 @@ | |||
1090 | 3 | # Common python helper functions used for OpenStack charms. | 3 | # Common python helper functions used for OpenStack charms. |
1091 | 4 | from collections import OrderedDict | 4 | from collections import OrderedDict |
1092 | 5 | 5 | ||
1093 | 6 | import apt_pkg as apt | ||
1094 | 7 | import subprocess | 6 | import subprocess |
1095 | 8 | import os | 7 | import os |
1096 | 9 | import socket | 8 | import socket |
1097 | @@ -41,7 +40,8 @@ | |||
1098 | 41 | ('quantal', 'folsom'), | 40 | ('quantal', 'folsom'), |
1099 | 42 | ('raring', 'grizzly'), | 41 | ('raring', 'grizzly'), |
1100 | 43 | ('saucy', 'havana'), | 42 | ('saucy', 'havana'), |
1102 | 44 | ('trusty', 'icehouse') | 43 | ('trusty', 'icehouse'), |
1103 | 44 | ('utopic', 'juno'), | ||
1104 | 45 | ]) | 45 | ]) |
1105 | 46 | 46 | ||
1106 | 47 | 47 | ||
1107 | @@ -52,6 +52,7 @@ | |||
1108 | 52 | ('2013.1', 'grizzly'), | 52 | ('2013.1', 'grizzly'), |
1109 | 53 | ('2013.2', 'havana'), | 53 | ('2013.2', 'havana'), |
1110 | 54 | ('2014.1', 'icehouse'), | 54 | ('2014.1', 'icehouse'), |
1111 | 55 | ('2014.2', 'juno'), | ||
1112 | 55 | ]) | 56 | ]) |
1113 | 56 | 57 | ||
1114 | 57 | # The ugly duckling | 58 | # The ugly duckling |
1115 | @@ -83,6 +84,8 @@ | |||
1116 | 83 | '''Derive OpenStack release codename from a given installation source.''' | 84 | '''Derive OpenStack release codename from a given installation source.''' |
1117 | 84 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | 85 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] |
1118 | 85 | rel = '' | 86 | rel = '' |
1119 | 87 | if src is None: | ||
1120 | 88 | return rel | ||
1121 | 86 | if src in ['distro', 'distro-proposed']: | 89 | if src in ['distro', 'distro-proposed']: |
1122 | 87 | try: | 90 | try: |
1123 | 88 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] | 91 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] |
1124 | @@ -130,6 +133,7 @@ | |||
1125 | 130 | 133 | ||
1126 | 131 | def get_os_codename_package(package, fatal=True): | 134 | def get_os_codename_package(package, fatal=True): |
1127 | 132 | '''Derive OpenStack release codename from an installed package.''' | 135 | '''Derive OpenStack release codename from an installed package.''' |
1128 | 136 | import apt_pkg as apt | ||
1129 | 133 | apt.init() | 137 | apt.init() |
1130 | 134 | 138 | ||
1131 | 135 | # Tell apt to build an in-memory cache to prevent race conditions (if | 139 | # Tell apt to build an in-memory cache to prevent race conditions (if |
1132 | @@ -187,7 +191,7 @@ | |||
1133 | 187 | for version, cname in vers_map.iteritems(): | 191 | for version, cname in vers_map.iteritems(): |
1134 | 188 | if cname == codename: | 192 | if cname == codename: |
1135 | 189 | return version | 193 | return version |
1137 | 190 | #e = "Could not determine OpenStack version for package: %s" % pkg | 194 | # e = "Could not determine OpenStack version for package: %s" % pkg |
1138 | 191 | # error_out(e) | 195 | # error_out(e) |
1139 | 192 | 196 | ||
1140 | 193 | 197 | ||
1141 | @@ -273,6 +277,9 @@ | |||
1142 | 273 | 'icehouse': 'precise-updates/icehouse', | 277 | 'icehouse': 'precise-updates/icehouse', |
1143 | 274 | 'icehouse/updates': 'precise-updates/icehouse', | 278 | 'icehouse/updates': 'precise-updates/icehouse', |
1144 | 275 | 'icehouse/proposed': 'precise-proposed/icehouse', | 279 | 'icehouse/proposed': 'precise-proposed/icehouse', |
1145 | 280 | 'juno': 'trusty-updates/juno', | ||
1146 | 281 | 'juno/updates': 'trusty-updates/juno', | ||
1147 | 282 | 'juno/proposed': 'trusty-proposed/juno', | ||
1148 | 276 | } | 283 | } |
1149 | 277 | 284 | ||
1150 | 278 | try: | 285 | try: |
1151 | @@ -320,6 +327,7 @@ | |||
1152 | 320 | 327 | ||
1153 | 321 | """ | 328 | """ |
1154 | 322 | 329 | ||
1155 | 330 | import apt_pkg as apt | ||
1156 | 323 | src = config('openstack-origin') | 331 | src = config('openstack-origin') |
1157 | 324 | cur_vers = get_os_version_package(package) | 332 | cur_vers = get_os_version_package(package) |
1158 | 325 | available_vers = get_os_version_install_source(src) | 333 | available_vers = get_os_version_install_source(src) |
1159 | 326 | 334 | ||
1160 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
1161 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:02:24 +0000 | |||
1162 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-29 13:07:23 +0000 | |||
1163 | @@ -303,7 +303,7 @@ | |||
1164 | 303 | blk_device, fstype, system_services=[]): | 303 | blk_device, fstype, system_services=[]): |
1165 | 304 | """ | 304 | """ |
1166 | 305 | NOTE: This function must only be called from a single service unit for | 305 | NOTE: This function must only be called from a single service unit for |
1168 | 306 | the same rbd_img otherwise data loss will occur. | 306 | the same rbd_img otherwise data loss will occur. |
1169 | 307 | 307 | ||
1170 | 308 | Ensures given pool and RBD image exists, is mapped to a block device, | 308 | Ensures given pool and RBD image exists, is mapped to a block device, |
1171 | 309 | and the device is formatted and mounted at the given mount_point. | 309 | and the device is formatted and mounted at the given mount_point. |
1172 | 310 | 310 | ||
1173 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
1174 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-19 11:38:09 +0000 | |||
1175 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-29 13:07:23 +0000 | |||
1176 | @@ -37,6 +37,7 @@ | |||
1177 | 37 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), | 37 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), |
1178 | 38 | 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) | 38 | 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) |
1179 | 39 | 39 | ||
1180 | 40 | |||
1181 | 40 | def is_device_mounted(device): | 41 | def is_device_mounted(device): |
1182 | 41 | '''Given a device path, return True if that device is mounted, and False | 42 | '''Given a device path, return True if that device is mounted, and False |
1183 | 42 | if it isn't. | 43 | if it isn't. |
1184 | 43 | 44 | ||
1185 | === added file 'hooks/charmhelpers/core/fstab.py' | |||
1186 | --- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000 | |||
1187 | +++ hooks/charmhelpers/core/fstab.py 2014-07-29 13:07:23 +0000 | |||
1188 | @@ -0,0 +1,116 @@ | |||
1189 | 1 | #!/usr/bin/env python | ||
1190 | 2 | # -*- coding: utf-8 -*- | ||
1191 | 3 | |||
1192 | 4 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' | ||
1193 | 5 | |||
1194 | 6 | import os | ||
1195 | 7 | |||
1196 | 8 | |||
1197 | 9 | class Fstab(file): | ||
1198 | 10 | """This class extends file in order to implement a file reader/writer | ||
1199 | 11 | for file `/etc/fstab` | ||
1200 | 12 | """ | ||
1201 | 13 | |||
1202 | 14 | class Entry(object): | ||
1203 | 15 | """Entry class represents a non-comment line on the `/etc/fstab` file | ||
1204 | 16 | """ | ||
1205 | 17 | def __init__(self, device, mountpoint, filesystem, | ||
1206 | 18 | options, d=0, p=0): | ||
1207 | 19 | self.device = device | ||
1208 | 20 | self.mountpoint = mountpoint | ||
1209 | 21 | self.filesystem = filesystem | ||
1210 | 22 | |||
1211 | 23 | if not options: | ||
1212 | 24 | options = "defaults" | ||
1213 | 25 | |||
1214 | 26 | self.options = options | ||
1215 | 27 | self.d = d | ||
1216 | 28 | self.p = p | ||
1217 | 29 | |||
1218 | 30 | def __eq__(self, o): | ||
1219 | 31 | return str(self) == str(o) | ||
1220 | 32 | |||
1221 | 33 | def __str__(self): | ||
1222 | 34 | return "{} {} {} {} {} {}".format(self.device, | ||
1223 | 35 | self.mountpoint, | ||
1224 | 36 | self.filesystem, | ||
1225 | 37 | self.options, | ||
1226 | 38 | self.d, | ||
1227 | 39 | self.p) | ||
1228 | 40 | |||
1229 | 41 | DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') | ||
1230 | 42 | |||
1231 | 43 | def __init__(self, path=None): | ||
1232 | 44 | if path: | ||
1233 | 45 | self._path = path | ||
1234 | 46 | else: | ||
1235 | 47 | self._path = self.DEFAULT_PATH | ||
1236 | 48 | file.__init__(self, self._path, 'r+') | ||
1237 | 49 | |||
1238 | 50 | def _hydrate_entry(self, line): | ||
1239 | 51 | # NOTE: use split with no arguments to split on any | ||
1240 | 52 | # whitespace including tabs | ||
1241 | 53 | return Fstab.Entry(*filter( | ||
1242 | 54 | lambda x: x not in ('', None), | ||
1243 | 55 | line.strip("\n").split())) | ||
1244 | 56 | |||
1245 | 57 | @property | ||
1246 | 58 | def entries(self): | ||
1247 | 59 | self.seek(0) | ||
1248 | 60 | for line in self.readlines(): | ||
1249 | 61 | try: | ||
1250 | 62 | if not line.startswith("#"): | ||
1251 | 63 | yield self._hydrate_entry(line) | ||
1252 | 64 | except ValueError: | ||
1253 | 65 | pass | ||
1254 | 66 | |||
1255 | 67 | def get_entry_by_attr(self, attr, value): | ||
1256 | 68 | for entry in self.entries: | ||
1257 | 69 | e_attr = getattr(entry, attr) | ||
1258 | 70 | if e_attr == value: | ||
1259 | 71 | return entry | ||
1260 | 72 | return None | ||
1261 | 73 | |||
1262 | 74 | def add_entry(self, entry): | ||
1263 | 75 | if self.get_entry_by_attr('device', entry.device): | ||
1264 | 76 | return False | ||
1265 | 77 | |||
1266 | 78 | self.write(str(entry) + '\n') | ||
1267 | 79 | self.truncate() | ||
1268 | 80 | return entry | ||
1269 | 81 | |||
1270 | 82 | def remove_entry(self, entry): | ||
1271 | 83 | self.seek(0) | ||
1272 | 84 | |||
1273 | 85 | lines = self.readlines() | ||
1274 | 86 | |||
1275 | 87 | found = False | ||
1276 | 88 | for index, line in enumerate(lines): | ||
1277 | 89 | if not line.startswith("#"): | ||
1278 | 90 | if self._hydrate_entry(line) == entry: | ||
1279 | 91 | found = True | ||
1280 | 92 | break | ||
1281 | 93 | |||
1282 | 94 | if not found: | ||
1283 | 95 | return False | ||
1284 | 96 | |||
1285 | 97 | lines.remove(line) | ||
1286 | 98 | |||
1287 | 99 | self.seek(0) | ||
1288 | 100 | self.write(''.join(lines)) | ||
1289 | 101 | self.truncate() | ||
1290 | 102 | return True | ||
1291 | 103 | |||
1292 | 104 | @classmethod | ||
1293 | 105 | def remove_by_mountpoint(cls, mountpoint, path=None): | ||
1294 | 106 | fstab = cls(path=path) | ||
1295 | 107 | entry = fstab.get_entry_by_attr('mountpoint', mountpoint) | ||
1296 | 108 | if entry: | ||
1297 | 109 | return fstab.remove_entry(entry) | ||
1298 | 110 | return False | ||
1299 | 111 | |||
1300 | 112 | @classmethod | ||
1301 | 113 | def add(cls, device, mountpoint, filesystem, options=None, path=None): | ||
1302 | 114 | return cls(path=path).add_entry(Fstab.Entry(device, | ||
1303 | 115 | mountpoint, filesystem, | ||
1304 | 116 | options=options)) | ||
1305 | 0 | 117 | ||
1306 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
1307 | --- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:38:09 +0000 | |||
1308 | +++ hooks/charmhelpers/core/hookenv.py 2014-07-29 13:07:23 +0000 | |||
1309 | @@ -25,7 +25,7 @@ | |||
1310 | 25 | def cached(func): | 25 | def cached(func): |
1311 | 26 | """Cache return values for multiple executions of func + args | 26 | """Cache return values for multiple executions of func + args |
1312 | 27 | 27 | ||
1314 | 28 | For example: | 28 | For example:: |
1315 | 29 | 29 | ||
1316 | 30 | @cached | 30 | @cached |
1317 | 31 | def unit_get(attribute): | 31 | def unit_get(attribute): |
1318 | @@ -445,18 +445,19 @@ | |||
1319 | 445 | class Hooks(object): | 445 | class Hooks(object): |
1320 | 446 | """A convenient handler for hook functions. | 446 | """A convenient handler for hook functions. |
1321 | 447 | 447 | ||
1323 | 448 | Example: | 448 | Example:: |
1324 | 449 | |||
1325 | 449 | hooks = Hooks() | 450 | hooks = Hooks() |
1326 | 450 | 451 | ||
1327 | 451 | # register a hook, taking its name from the function name | 452 | # register a hook, taking its name from the function name |
1328 | 452 | @hooks.hook() | 453 | @hooks.hook() |
1329 | 453 | def install(): | 454 | def install(): |
1331 | 454 | ... | 455 | pass # your code here |
1332 | 455 | 456 | ||
1333 | 456 | # register a hook, providing a custom hook name | 457 | # register a hook, providing a custom hook name |
1334 | 457 | @hooks.hook("config-changed") | 458 | @hooks.hook("config-changed") |
1335 | 458 | def config_changed(): | 459 | def config_changed(): |
1337 | 459 | ... | 460 | pass # your code here |
1338 | 460 | 461 | ||
1339 | 461 | if __name__ == "__main__": | 462 | if __name__ == "__main__": |
1340 | 462 | # execute a hook based on the name the program is called by | 463 | # execute a hook based on the name the program is called by |
1341 | 463 | 464 | ||
1342 | === modified file 'hooks/charmhelpers/core/host.py' | |||
1343 | --- hooks/charmhelpers/core/host.py 2014-05-19 11:38:09 +0000 | |||
1344 | +++ hooks/charmhelpers/core/host.py 2014-07-29 13:07:23 +0000 | |||
1345 | @@ -12,11 +12,11 @@ | |||
1346 | 12 | import string | 12 | import string |
1347 | 13 | import subprocess | 13 | import subprocess |
1348 | 14 | import hashlib | 14 | import hashlib |
1349 | 15 | import apt_pkg | ||
1350 | 16 | 15 | ||
1351 | 17 | from collections import OrderedDict | 16 | from collections import OrderedDict |
1352 | 18 | 17 | ||
1353 | 19 | from hookenv import log | 18 | from hookenv import log |
1354 | 19 | from fstab import Fstab | ||
1355 | 20 | 20 | ||
1356 | 21 | 21 | ||
1357 | 22 | def service_start(service_name): | 22 | def service_start(service_name): |
1358 | @@ -35,7 +35,8 @@ | |||
1359 | 35 | 35 | ||
1360 | 36 | 36 | ||
1361 | 37 | def service_reload(service_name, restart_on_failure=False): | 37 | def service_reload(service_name, restart_on_failure=False): |
1363 | 38 | """Reload a system service, optionally falling back to restart if reload fails""" | 38 | """Reload a system service, optionally falling back to restart if |
1364 | 39 | reload fails""" | ||
1365 | 39 | service_result = service('reload', service_name) | 40 | service_result = service('reload', service_name) |
1366 | 40 | if not service_result and restart_on_failure: | 41 | if not service_result and restart_on_failure: |
1367 | 41 | service_result = service('restart', service_name) | 42 | service_result = service('restart', service_name) |
1368 | @@ -144,7 +145,19 @@ | |||
1369 | 144 | target.write(content) | 145 | target.write(content) |
1370 | 145 | 146 | ||
1371 | 146 | 147 | ||
1373 | 147 | def mount(device, mountpoint, options=None, persist=False): | 148 | def fstab_remove(mp): |
1374 | 149 | """Remove the given mountpoint entry from /etc/fstab | ||
1375 | 150 | """ | ||
1376 | 151 | return Fstab.remove_by_mountpoint(mp) | ||
1377 | 152 | |||
1378 | 153 | |||
1379 | 154 | def fstab_add(dev, mp, fs, options=None): | ||
1380 | 155 | """Adds the given device entry to the /etc/fstab file | ||
1381 | 156 | """ | ||
1382 | 157 | return Fstab.add(dev, mp, fs, options=options) | ||
1383 | 158 | |||
1384 | 159 | |||
1385 | 160 | def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): | ||
1386 | 148 | """Mount a filesystem at a particular mountpoint""" | 161 | """Mount a filesystem at a particular mountpoint""" |
1387 | 149 | cmd_args = ['mount'] | 162 | cmd_args = ['mount'] |
1388 | 150 | if options is not None: | 163 | if options is not None: |
1389 | @@ -155,9 +168,9 @@ | |||
1390 | 155 | except subprocess.CalledProcessError, e: | 168 | except subprocess.CalledProcessError, e: |
1391 | 156 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) | 169 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) |
1392 | 157 | return False | 170 | return False |
1393 | 171 | |||
1394 | 158 | if persist: | 172 | if persist: |
1397 | 159 | # TODO: update fstab | 173 | return fstab_add(device, mountpoint, filesystem, options=options) |
1396 | 160 | pass | ||
1398 | 161 | return True | 174 | return True |
1399 | 162 | 175 | ||
1400 | 163 | 176 | ||
1401 | @@ -169,9 +182,9 @@ | |||
1402 | 169 | except subprocess.CalledProcessError, e: | 182 | except subprocess.CalledProcessError, e: |
1403 | 170 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) | 183 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) |
1404 | 171 | return False | 184 | return False |
1405 | 185 | |||
1406 | 172 | if persist: | 186 | if persist: |
1409 | 173 | # TODO: update fstab | 187 | return fstab_remove(mountpoint) |
1408 | 174 | pass | ||
1410 | 175 | return True | 188 | return True |
1411 | 176 | 189 | ||
1412 | 177 | 190 | ||
1413 | @@ -198,13 +211,13 @@ | |||
1414 | 198 | def restart_on_change(restart_map, stopstart=False): | 211 | def restart_on_change(restart_map, stopstart=False): |
1415 | 199 | """Restart services based on configuration files changing | 212 | """Restart services based on configuration files changing |
1416 | 200 | 213 | ||
1418 | 201 | This function is used a decorator, for example | 214 | This function is used a decorator, for example:: |
1419 | 202 | 215 | ||
1420 | 203 | @restart_on_change({ | 216 | @restart_on_change({ |
1421 | 204 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] | 217 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] |
1422 | 205 | }) | 218 | }) |
1423 | 206 | def ceph_client_changed(): | 219 | def ceph_client_changed(): |
1425 | 207 | ... | 220 | pass # your code here |
1426 | 208 | 221 | ||
1427 | 209 | In this example, the cinder-api and cinder-volume services | 222 | In this example, the cinder-api and cinder-volume services |
1428 | 210 | would be restarted if /etc/ceph/ceph.conf is changed by the | 223 | would be restarted if /etc/ceph/ceph.conf is changed by the |
1429 | @@ -300,12 +313,19 @@ | |||
1430 | 300 | 313 | ||
1431 | 301 | def cmp_pkgrevno(package, revno, pkgcache=None): | 314 | def cmp_pkgrevno(package, revno, pkgcache=None): |
1432 | 302 | '''Compare supplied revno with the revno of the installed package | 315 | '''Compare supplied revno with the revno of the installed package |
1436 | 303 | 1 => Installed revno is greater than supplied arg | 316 | |
1437 | 304 | 0 => Installed revno is the same as supplied arg | 317 | * 1 => Installed revno is greater than supplied arg |
1438 | 305 | -1 => Installed revno is less than supplied arg | 318 | * 0 => Installed revno is the same as supplied arg |
1439 | 319 | * -1 => Installed revno is less than supplied arg | ||
1440 | 320 | |||
1441 | 306 | ''' | 321 | ''' |
1442 | 322 | import apt_pkg | ||
1443 | 307 | if not pkgcache: | 323 | if not pkgcache: |
1444 | 308 | apt_pkg.init() | 324 | apt_pkg.init() |
1445 | 325 | # Force Apt to build its cache in memory. That way we avoid race | ||
1446 | 326 | # conditions with other applications building the cache in the same | ||
1447 | 327 | # place. | ||
1448 | 328 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | ||
1449 | 309 | pkgcache = apt_pkg.Cache() | 329 | pkgcache = apt_pkg.Cache() |
1450 | 310 | pkg = pkgcache[package] | 330 | pkg = pkgcache[package] |
1451 | 311 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | 331 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
1452 | 312 | 332 | ||
1453 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
1454 | --- hooks/charmhelpers/fetch/__init__.py 2014-05-19 11:38:09 +0000 | |||
1455 | +++ hooks/charmhelpers/fetch/__init__.py 2014-07-29 13:07:23 +0000 | |||
1456 | @@ -13,7 +13,6 @@ | |||
1457 | 13 | config, | 13 | config, |
1458 | 14 | log, | 14 | log, |
1459 | 15 | ) | 15 | ) |
1460 | 16 | import apt_pkg | ||
1461 | 17 | import os | 16 | import os |
1462 | 18 | 17 | ||
1463 | 19 | 18 | ||
1464 | @@ -56,6 +55,15 @@ | |||
1465 | 56 | 'icehouse/proposed': 'precise-proposed/icehouse', | 55 | 'icehouse/proposed': 'precise-proposed/icehouse', |
1466 | 57 | 'precise-icehouse/proposed': 'precise-proposed/icehouse', | 56 | 'precise-icehouse/proposed': 'precise-proposed/icehouse', |
1467 | 58 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', | 57 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', |
1468 | 58 | # Juno | ||
1469 | 59 | 'juno': 'trusty-updates/juno', | ||
1470 | 60 | 'trusty-juno': 'trusty-updates/juno', | ||
1471 | 61 | 'trusty-juno/updates': 'trusty-updates/juno', | ||
1472 | 62 | 'trusty-updates/juno': 'trusty-updates/juno', | ||
1473 | 63 | 'juno/proposed': 'trusty-proposed/juno', | ||
1474 | 64 | 'juno/proposed': 'trusty-proposed/juno', | ||
1475 | 65 | 'trusty-juno/proposed': 'trusty-proposed/juno', | ||
1476 | 66 | 'trusty-proposed/juno': 'trusty-proposed/juno', | ||
1477 | 59 | } | 67 | } |
1478 | 60 | 68 | ||
1479 | 61 | # The order of this list is very important. Handlers should be listed in from | 69 | # The order of this list is very important. Handlers should be listed in from |
1480 | @@ -108,6 +116,7 @@ | |||
1481 | 108 | 116 | ||
1482 | 109 | def filter_installed_packages(packages): | 117 | def filter_installed_packages(packages): |
1483 | 110 | """Returns a list of packages that require installation""" | 118 | """Returns a list of packages that require installation""" |
1484 | 119 | import apt_pkg | ||
1485 | 111 | apt_pkg.init() | 120 | apt_pkg.init() |
1486 | 112 | 121 | ||
1487 | 113 | # Tell apt to build an in-memory cache to prevent race conditions (if | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if |
1488 | @@ -226,31 +235,39 @@ | |||
1489 | 226 | sources_var='install_sources', | 235 | sources_var='install_sources', |
1490 | 227 | keys_var='install_keys'): | 236 | keys_var='install_keys'): |
1491 | 228 | """ | 237 | """ |
1493 | 229 | Configure multiple sources from charm configuration | 238 | Configure multiple sources from charm configuration. |
1494 | 239 | |||
1495 | 240 | The lists are encoded as yaml fragments in the configuration. | ||
1496 | 241 | The frament needs to be included as a string. | ||
1497 | 230 | 242 | ||
1498 | 231 | Example config: | 243 | Example config: |
1500 | 232 | install_sources: | 244 | install_sources: | |
1501 | 233 | - "ppa:foo" | 245 | - "ppa:foo" |
1502 | 234 | - "http://example.com/repo precise main" | 246 | - "http://example.com/repo precise main" |
1504 | 235 | install_keys: | 247 | install_keys: | |
1505 | 236 | - null | 248 | - null |
1506 | 237 | - "a1b2c3d4" | 249 | - "a1b2c3d4" |
1507 | 238 | 250 | ||
1508 | 239 | Note that 'null' (a.k.a. None) should not be quoted. | 251 | Note that 'null' (a.k.a. None) should not be quoted. |
1509 | 240 | """ | 252 | """ |
1517 | 241 | sources = safe_load(config(sources_var)) | 253 | sources = safe_load((config(sources_var) or '').strip()) or [] |
1518 | 242 | keys = config(keys_var) | 254 | keys = safe_load((config(keys_var) or '').strip()) or None |
1519 | 243 | if keys is not None: | 255 | |
1520 | 244 | keys = safe_load(keys) | 256 | if isinstance(sources, basestring): |
1521 | 245 | if isinstance(sources, basestring) and ( | 257 | sources = [sources] |
1522 | 246 | keys is None or isinstance(keys, basestring)): | 258 | |
1523 | 247 | add_source(sources, keys) | 259 | if keys is None: |
1524 | 260 | for source in sources: | ||
1525 | 261 | add_source(source, None) | ||
1526 | 248 | else: | 262 | else: |
1532 | 249 | if not len(sources) == len(keys): | 263 | if isinstance(keys, basestring): |
1533 | 250 | msg = 'Install sources and keys lists are different lengths' | 264 | keys = [keys] |
1534 | 251 | raise SourceConfigError(msg) | 265 | |
1535 | 252 | for src_num in range(len(sources)): | 266 | if len(sources) != len(keys): |
1536 | 253 | add_source(sources[src_num], keys[src_num]) | 267 | raise SourceConfigError( |
1537 | 268 | 'Install sources and keys lists are different lengths') | ||
1538 | 269 | for source, key in zip(sources, keys): | ||
1539 | 270 | add_source(source, key) | ||
1540 | 254 | if update: | 271 | if update: |
1541 | 255 | apt_update(fatal=True) | 272 | apt_update(fatal=True) |
1542 | 256 | 273 | ||
1543 | 257 | 274 | ||
1544 | === modified file 'hooks/charmhelpers/fetch/bzrurl.py' | |||
1545 | --- hooks/charmhelpers/fetch/bzrurl.py 2013-11-06 03:48:26 +0000 | |||
1546 | +++ hooks/charmhelpers/fetch/bzrurl.py 2014-07-29 13:07:23 +0000 | |||
1547 | @@ -39,7 +39,8 @@ | |||
1548 | 39 | def install(self, source): | 39 | def install(self, source): |
1549 | 40 | url_parts = self.parse_url(source) | 40 | url_parts = self.parse_url(source) |
1550 | 41 | branch_name = url_parts.path.strip("/").split("/")[-1] | 41 | branch_name = url_parts.path.strip("/").split("/")[-1] |
1552 | 42 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) | 42 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", |
1553 | 43 | branch_name) | ||
1554 | 43 | if not os.path.exists(dest_dir): | 44 | if not os.path.exists(dest_dir): |
1555 | 44 | mkdir(dest_dir, perms=0755) | 45 | mkdir(dest_dir, perms=0755) |
1556 | 45 | try: | 46 | try: |
1557 | 46 | 47 | ||
1558 | === added symlink 'hooks/neutron-api-relation-broken' | |||
1559 | === target is u'nova_cc_hooks.py' | |||
1560 | === added symlink 'hooks/neutron-api-relation-changed' | |||
1561 | === target is u'nova_cc_hooks.py' | |||
1562 | === added symlink 'hooks/neutron-api-relation-departed' | |||
1563 | === target is u'nova_cc_hooks.py' | |||
1564 | === added symlink 'hooks/neutron-api-relation-joined' | |||
1565 | === target is u'nova_cc_hooks.py' | |||
1566 | === modified file 'hooks/nova_cc_context.py' | |||
1567 | --- hooks/nova_cc_context.py 2014-06-17 10:01:21 +0000 | |||
1568 | +++ hooks/nova_cc_context.py 2014-07-29 13:07:23 +0000 | |||
1569 | @@ -1,7 +1,7 @@ | |||
1570 | 1 | 1 | ||
1571 | 2 | from charmhelpers.core.hookenv import ( | 2 | from charmhelpers.core.hookenv import ( |
1572 | 3 | config, relation_ids, relation_set, log, ERROR, | 3 | config, relation_ids, relation_set, log, ERROR, |
1574 | 4 | unit_get) | 4 | unit_get, related_units, relation_get) |
1575 | 5 | 5 | ||
1576 | 6 | from charmhelpers.fetch import apt_install, filter_installed_packages | 6 | from charmhelpers.fetch import apt_install, filter_installed_packages |
1577 | 7 | from charmhelpers.contrib.openstack import context, neutron, utils | 7 | from charmhelpers.contrib.openstack import context, neutron, utils |
1578 | @@ -14,6 +14,17 @@ | |||
1579 | 14 | ) | 14 | ) |
1580 | 15 | 15 | ||
1581 | 16 | 16 | ||
1582 | 17 | def context_complete(ctxt): | ||
1583 | 18 | _missing = [] | ||
1584 | 19 | for k, v in ctxt.iteritems(): | ||
1585 | 20 | if v is None or v == '': | ||
1586 | 21 | _missing.append(k) | ||
1587 | 22 | if _missing: | ||
1588 | 23 | log('Missing required data: %s' % ' '.join(_missing), level='INFO') | ||
1589 | 24 | return False | ||
1590 | 25 | return True | ||
1591 | 26 | |||
1592 | 27 | |||
1593 | 17 | class ApacheSSLContext(context.ApacheSSLContext): | 28 | class ApacheSSLContext(context.ApacheSSLContext): |
1594 | 18 | 29 | ||
1595 | 19 | interfaces = ['https'] | 30 | interfaces = ['https'] |
1596 | @@ -27,6 +38,26 @@ | |||
1597 | 27 | return super(ApacheSSLContext, self).__call__() | 38 | return super(ApacheSSLContext, self).__call__() |
1598 | 28 | 39 | ||
1599 | 29 | 40 | ||
1600 | 41 | class NeutronAPIContext(context.OSContextGenerator): | ||
1601 | 42 | |||
1602 | 43 | def __call__(self): | ||
1603 | 44 | log('Generating template context from neutron api relation') | ||
1604 | 45 | ctxt = {} | ||
1605 | 46 | for rid in relation_ids('neutron-api'): | ||
1606 | 47 | for unit in related_units(rid): | ||
1607 | 48 | rdata = relation_get(rid=rid, unit=unit) | ||
1608 | 49 | ctxt = { | ||
1609 | 50 | 'neutron_url': rdata.get('neutron-url'), | ||
1610 | 51 | 'neutron_plugin': rdata.get('neutron-plugin'), | ||
1611 | 52 | 'neutron_security_groups': | ||
1612 | 53 | rdata.get('neutron-security-groups'), | ||
1613 | 54 | 'network_manager': 'neutron', | ||
1614 | 55 | } | ||
1615 | 56 | if context_complete(ctxt): | ||
1616 | 57 | return ctxt | ||
1617 | 58 | return {} | ||
1618 | 59 | |||
1619 | 60 | |||
1620 | 30 | class VolumeServiceContext(context.OSContextGenerator): | 61 | class VolumeServiceContext(context.OSContextGenerator): |
1621 | 31 | interfaces = [] | 62 | interfaces = [] |
1622 | 32 | 63 | ||
1623 | 33 | 64 | ||
1624 | === modified file 'hooks/nova_cc_hooks.py' | |||
1625 | --- hooks/nova_cc_hooks.py 2014-04-11 16:41:42 +0000 | |||
1626 | +++ hooks/nova_cc_hooks.py 2014-07-29 13:07:23 +0000 | |||
1627 | @@ -19,12 +19,15 @@ | |||
1628 | 19 | relation_get, | 19 | relation_get, |
1629 | 20 | relation_ids, | 20 | relation_ids, |
1630 | 21 | relation_set, | 21 | relation_set, |
1631 | 22 | related_units, | ||
1632 | 22 | open_port, | 23 | open_port, |
1633 | 23 | unit_get, | 24 | unit_get, |
1634 | 24 | ) | 25 | ) |
1635 | 25 | 26 | ||
1636 | 26 | from charmhelpers.core.host import ( | 27 | from charmhelpers.core.host import ( |
1638 | 27 | restart_on_change | 28 | restart_on_change, |
1639 | 29 | service_running, | ||
1640 | 30 | service_stop, | ||
1641 | 28 | ) | 31 | ) |
1642 | 29 | 32 | ||
1643 | 30 | from charmhelpers.fetch import ( | 33 | from charmhelpers.fetch import ( |
1644 | @@ -41,6 +44,10 @@ | |||
1645 | 41 | neutron_plugin_attribute, | 44 | neutron_plugin_attribute, |
1646 | 42 | ) | 45 | ) |
1647 | 43 | 46 | ||
1648 | 47 | from nova_cc_context import ( | ||
1649 | 48 | NeutronAPIContext | ||
1650 | 49 | ) | ||
1651 | 50 | |||
1652 | 44 | from nova_cc_utils import ( | 51 | from nova_cc_utils import ( |
1653 | 45 | api_port, | 52 | api_port, |
1654 | 46 | auth_token_config, | 53 | auth_token_config, |
1655 | @@ -54,8 +61,8 @@ | |||
1656 | 54 | save_script_rc, | 61 | save_script_rc, |
1657 | 55 | ssh_compute_add, | 62 | ssh_compute_add, |
1658 | 56 | ssh_compute_remove, | 63 | ssh_compute_remove, |
1661 | 57 | ssh_known_hosts_b64, | 64 | ssh_known_hosts_lines, |
1662 | 58 | ssh_authorized_keys_b64, | 65 | ssh_authorized_keys_lines, |
1663 | 59 | register_configs, | 66 | register_configs, |
1664 | 60 | restart_map, | 67 | restart_map, |
1665 | 61 | volume_service, | 68 | volume_service, |
1666 | @@ -63,11 +70,12 @@ | |||
1667 | 63 | NOVA_CONF, | 70 | NOVA_CONF, |
1668 | 64 | QUANTUM_CONF, | 71 | QUANTUM_CONF, |
1669 | 65 | NEUTRON_CONF, | 72 | NEUTRON_CONF, |
1671 | 66 | QUANTUM_API_PASTE | 73 | QUANTUM_API_PASTE, |
1672 | 74 | service_guard, | ||
1673 | 75 | guard_map, | ||
1674 | 67 | ) | 76 | ) |
1675 | 68 | 77 | ||
1676 | 69 | from charmhelpers.contrib.hahelpers.cluster import ( | 78 | from charmhelpers.contrib.hahelpers.cluster import ( |
1677 | 70 | canonical_url, | ||
1678 | 71 | eligible_leader, | 79 | eligible_leader, |
1679 | 72 | get_hacluster_config, | 80 | get_hacluster_config, |
1680 | 73 | is_leader, | 81 | is_leader, |
1681 | @@ -75,6 +83,16 @@ | |||
1682 | 75 | 83 | ||
1683 | 76 | from charmhelpers.payload.execd import execd_preinstall | 84 | from charmhelpers.payload.execd import execd_preinstall |
1684 | 77 | 85 | ||
1685 | 86 | from charmhelpers.contrib.openstack.ip import ( | ||
1686 | 87 | canonical_url, | ||
1687 | 88 | PUBLIC, INTERNAL, ADMIN | ||
1688 | 89 | ) | ||
1689 | 90 | |||
1690 | 91 | from charmhelpers.contrib.network.ip import ( | ||
1691 | 92 | get_iface_for_address, | ||
1692 | 93 | get_netmask_for_address | ||
1693 | 94 | ) | ||
1694 | 95 | |||
1695 | 78 | hooks = Hooks() | 96 | hooks = Hooks() |
1696 | 79 | CONFIGS = register_configs() | 97 | CONFIGS = register_configs() |
1697 | 80 | 98 | ||
1698 | @@ -96,6 +114,8 @@ | |||
1699 | 96 | 114 | ||
1700 | 97 | 115 | ||
1701 | 98 | @hooks.hook('config-changed') | 116 | @hooks.hook('config-changed') |
1702 | 117 | @service_guard(guard_map(), CONFIGS, | ||
1703 | 118 | active=config('service-guard')) | ||
1704 | 99 | @restart_on_change(restart_map(), stopstart=True) | 119 | @restart_on_change(restart_map(), stopstart=True) |
1705 | 100 | def config_changed(): | 120 | def config_changed(): |
1706 | 101 | global CONFIGS | 121 | global CONFIGS |
1707 | @@ -104,6 +124,8 @@ | |||
1708 | 104 | save_script_rc() | 124 | save_script_rc() |
1709 | 105 | configure_https() | 125 | configure_https() |
1710 | 106 | CONFIGS.write_all() | 126 | CONFIGS.write_all() |
1711 | 127 | for r_id in relation_ids('identity-service'): | ||
1712 | 128 | identity_joined(rid=r_id) | ||
1713 | 107 | 129 | ||
1714 | 108 | 130 | ||
1715 | 109 | @hooks.hook('amqp-relation-joined') | 131 | @hooks.hook('amqp-relation-joined') |
1716 | @@ -114,16 +136,19 @@ | |||
1717 | 114 | 136 | ||
1718 | 115 | @hooks.hook('amqp-relation-changed') | 137 | @hooks.hook('amqp-relation-changed') |
1719 | 116 | @hooks.hook('amqp-relation-departed') | 138 | @hooks.hook('amqp-relation-departed') |
1720 | 139 | @service_guard(guard_map(), CONFIGS, | ||
1721 | 140 | active=config('service-guard')) | ||
1722 | 117 | @restart_on_change(restart_map()) | 141 | @restart_on_change(restart_map()) |
1723 | 118 | def amqp_changed(): | 142 | def amqp_changed(): |
1724 | 119 | if 'amqp' not in CONFIGS.complete_contexts(): | 143 | if 'amqp' not in CONFIGS.complete_contexts(): |
1725 | 120 | log('amqp relation incomplete. Peer not ready?') | 144 | log('amqp relation incomplete. Peer not ready?') |
1726 | 121 | return | 145 | return |
1727 | 122 | CONFIGS.write(NOVA_CONF) | 146 | CONFIGS.write(NOVA_CONF) |
1732 | 123 | if network_manager() == 'quantum': | 147 | if not is_relation_made('neutron-api'): |
1733 | 124 | CONFIGS.write(QUANTUM_CONF) | 148 | if network_manager() == 'quantum': |
1734 | 125 | if network_manager() == 'neutron': | 149 | CONFIGS.write(QUANTUM_CONF) |
1735 | 126 | CONFIGS.write(NEUTRON_CONF) | 150 | if network_manager() == 'neutron': |
1736 | 151 | CONFIGS.write(NEUTRON_CONF) | ||
1737 | 127 | 152 | ||
1738 | 128 | 153 | ||
1739 | 129 | @hooks.hook('shared-db-relation-joined') | 154 | @hooks.hook('shared-db-relation-joined') |
1740 | @@ -171,6 +196,8 @@ | |||
1741 | 171 | 196 | ||
1742 | 172 | 197 | ||
1743 | 173 | @hooks.hook('shared-db-relation-changed') | 198 | @hooks.hook('shared-db-relation-changed') |
1744 | 199 | @service_guard(guard_map(), CONFIGS, | ||
1745 | 200 | active=config('service-guard')) | ||
1746 | 174 | @restart_on_change(restart_map()) | 201 | @restart_on_change(restart_map()) |
1747 | 175 | def db_changed(): | 202 | def db_changed(): |
1748 | 176 | if 'shared-db' not in CONFIGS.complete_contexts(): | 203 | if 'shared-db' not in CONFIGS.complete_contexts(): |
1749 | @@ -186,6 +213,8 @@ | |||
1750 | 186 | 213 | ||
1751 | 187 | 214 | ||
1752 | 188 | @hooks.hook('pgsql-nova-db-relation-changed') | 215 | @hooks.hook('pgsql-nova-db-relation-changed') |
1753 | 216 | @service_guard(guard_map(), CONFIGS, | ||
1754 | 217 | active=config('service-guard')) | ||
1755 | 189 | @restart_on_change(restart_map()) | 218 | @restart_on_change(restart_map()) |
1756 | 190 | def postgresql_nova_db_changed(): | 219 | def postgresql_nova_db_changed(): |
1757 | 191 | if 'pgsql-nova-db' not in CONFIGS.complete_contexts(): | 220 | if 'pgsql-nova-db' not in CONFIGS.complete_contexts(): |
1758 | @@ -201,6 +230,8 @@ | |||
1759 | 201 | 230 | ||
1760 | 202 | 231 | ||
1761 | 203 | @hooks.hook('pgsql-neutron-db-relation-changed') | 232 | @hooks.hook('pgsql-neutron-db-relation-changed') |
1762 | 233 | @service_guard(guard_map(), CONFIGS, | ||
1763 | 234 | active=config('service-guard')) | ||
1764 | 204 | @restart_on_change(restart_map()) | 235 | @restart_on_change(restart_map()) |
1765 | 205 | def postgresql_neutron_db_changed(): | 236 | def postgresql_neutron_db_changed(): |
1766 | 206 | if network_manager() in ['neutron', 'quantum']: | 237 | if network_manager() in ['neutron', 'quantum']: |
1767 | @@ -210,6 +241,8 @@ | |||
1768 | 210 | 241 | ||
1769 | 211 | 242 | ||
1770 | 212 | @hooks.hook('image-service-relation-changed') | 243 | @hooks.hook('image-service-relation-changed') |
1771 | 244 | @service_guard(guard_map(), CONFIGS, | ||
1772 | 245 | active=config('service-guard')) | ||
1773 | 213 | @restart_on_change(restart_map()) | 246 | @restart_on_change(restart_map()) |
1774 | 214 | def image_service_changed(): | 247 | def image_service_changed(): |
1775 | 215 | if 'image-service' not in CONFIGS.complete_contexts(): | 248 | if 'image-service' not in CONFIGS.complete_contexts(): |
1776 | @@ -223,11 +256,17 @@ | |||
1777 | 223 | def identity_joined(rid=None): | 256 | def identity_joined(rid=None): |
1778 | 224 | if not eligible_leader(CLUSTER_RES): | 257 | if not eligible_leader(CLUSTER_RES): |
1779 | 225 | return | 258 | return |
1782 | 226 | base_url = canonical_url(CONFIGS) | 259 | public_url = canonical_url(CONFIGS, PUBLIC) |
1783 | 227 | relation_set(relation_id=rid, **determine_endpoints(base_url)) | 260 | internal_url = canonical_url(CONFIGS, INTERNAL) |
1784 | 261 | admin_url = canonical_url(CONFIGS, ADMIN) | ||
1785 | 262 | relation_set(relation_id=rid, **determine_endpoints(public_url, | ||
1786 | 263 | internal_url, | ||
1787 | 264 | admin_url)) | ||
1788 | 228 | 265 | ||
1789 | 229 | 266 | ||
1790 | 230 | @hooks.hook('identity-service-relation-changed') | 267 | @hooks.hook('identity-service-relation-changed') |
1791 | 268 | @service_guard(guard_map(), CONFIGS, | ||
1792 | 269 | active=config('service-guard')) | ||
1793 | 231 | @restart_on_change(restart_map()) | 270 | @restart_on_change(restart_map()) |
1794 | 232 | def identity_changed(): | 271 | def identity_changed(): |
1795 | 233 | if 'identity-service' not in CONFIGS.complete_contexts(): | 272 | if 'identity-service' not in CONFIGS.complete_contexts(): |
1796 | @@ -235,20 +274,24 @@ | |||
1797 | 235 | return | 274 | return |
1798 | 236 | CONFIGS.write('/etc/nova/api-paste.ini') | 275 | CONFIGS.write('/etc/nova/api-paste.ini') |
1799 | 237 | CONFIGS.write(NOVA_CONF) | 276 | CONFIGS.write(NOVA_CONF) |
1806 | 238 | if network_manager() == 'quantum': | 277 | if not is_relation_made('neutron-api'): |
1807 | 239 | CONFIGS.write(QUANTUM_API_PASTE) | 278 | if network_manager() == 'quantum': |
1808 | 240 | CONFIGS.write(QUANTUM_CONF) | 279 | CONFIGS.write(QUANTUM_API_PASTE) |
1809 | 241 | save_novarc() | 280 | CONFIGS.write(QUANTUM_CONF) |
1810 | 242 | if network_manager() == 'neutron': | 281 | save_novarc() |
1811 | 243 | CONFIGS.write(NEUTRON_CONF) | 282 | if network_manager() == 'neutron': |
1812 | 283 | CONFIGS.write(NEUTRON_CONF) | ||
1813 | 244 | [compute_joined(rid) for rid in relation_ids('cloud-compute')] | 284 | [compute_joined(rid) for rid in relation_ids('cloud-compute')] |
1814 | 245 | [quantum_joined(rid) for rid in relation_ids('quantum-network-service')] | 285 | [quantum_joined(rid) for rid in relation_ids('quantum-network-service')] |
1815 | 246 | [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')] | 286 | [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')] |
1816 | 287 | [neutron_api_relation_joined(rid) for rid in relation_ids('neutron-api')] | ||
1817 | 247 | configure_https() | 288 | configure_https() |
1818 | 248 | 289 | ||
1819 | 249 | 290 | ||
1820 | 250 | @hooks.hook('nova-volume-service-relation-joined', | 291 | @hooks.hook('nova-volume-service-relation-joined', |
1821 | 251 | 'cinder-volume-service-relation-joined') | 292 | 'cinder-volume-service-relation-joined') |
1822 | 293 | @service_guard(guard_map(), CONFIGS, | ||
1823 | 294 | active=config('service-guard')) | ||
1824 | 252 | @restart_on_change(restart_map()) | 295 | @restart_on_change(restart_map()) |
1825 | 253 | def volume_joined(): | 296 | def volume_joined(): |
1826 | 254 | CONFIGS.write(NOVA_CONF) | 297 | CONFIGS.write(NOVA_CONF) |
1827 | @@ -293,6 +336,33 @@ | |||
1828 | 293 | out.write('export OS_REGION_NAME=%s\n' % config('region')) | 336 | out.write('export OS_REGION_NAME=%s\n' % config('region')) |
1829 | 294 | 337 | ||
1830 | 295 | 338 | ||
1831 | 339 | def neutron_settings(): | ||
1832 | 340 | neutron_settings = {} | ||
1833 | 341 | if is_relation_made('neutron-api', 'neutron-plugin'): | ||
1834 | 342 | neutron_api_info = NeutronAPIContext()() | ||
1835 | 343 | neutron_settings.update({ | ||
1836 | 344 | # XXX: Rename these relations settings? | ||
1837 | 345 | 'quantum_plugin': neutron_api_info['neutron_plugin'], | ||
1838 | 346 | 'region': config('region'), | ||
1839 | 347 | 'quantum_security_groups': | ||
1840 | 348 | neutron_api_info['neutron_security_groups'], | ||
1841 | 349 | 'quantum_url': neutron_api_info['neutron_url'], | ||
1842 | 350 | }) | ||
1843 | 351 | else: | ||
1844 | 352 | neutron_settings.update({ | ||
1845 | 353 | # XXX: Rename these relations settings? | ||
1846 | 354 | 'quantum_plugin': neutron_plugin(), | ||
1847 | 355 | 'region': config('region'), | ||
1848 | 356 | 'quantum_security_groups': config('quantum-security-groups'), | ||
1849 | 357 | 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), | ||
1850 | 358 | str(api_port('neutron-server'))), | ||
1851 | 359 | }) | ||
1852 | 360 | neutron_url = urlparse(neutron_settings['quantum_url']) | ||
1853 | 361 | neutron_settings['quantum_host'] = neutron_url.hostname | ||
1854 | 362 | neutron_settings['quantum_port'] = neutron_url.port | ||
1855 | 363 | return neutron_settings | ||
1856 | 364 | |||
1857 | 365 | |||
1858 | 296 | def keystone_compute_settings(): | 366 | def keystone_compute_settings(): |
1859 | 297 | ks_auth_config = _auth_config() | 367 | ks_auth_config = _auth_config() |
1860 | 298 | rel_settings = {} | 368 | rel_settings = {} |
1861 | @@ -300,20 +370,10 @@ | |||
1862 | 300 | if network_manager() in ['quantum', 'neutron']: | 370 | if network_manager() in ['quantum', 'neutron']: |
1863 | 301 | if ks_auth_config: | 371 | if ks_auth_config: |
1864 | 302 | rel_settings.update(ks_auth_config) | 372 | rel_settings.update(ks_auth_config) |
1875 | 303 | 373 | rel_settings.update(neutron_settings()) | |
1866 | 304 | rel_settings.update({ | ||
1867 | 305 | # XXX: Rename these relations settings? | ||
1868 | 306 | 'quantum_plugin': neutron_plugin(), | ||
1869 | 307 | 'region': config('region'), | ||
1870 | 308 | 'quantum_security_groups': config('quantum-security-groups'), | ||
1871 | 309 | 'quantum_url': (canonical_url(CONFIGS) + ':' + | ||
1872 | 310 | str(api_port('neutron-server'))), | ||
1873 | 311 | }) | ||
1874 | 312 | |||
1876 | 313 | ks_ca = keystone_ca_cert_b64() | 374 | ks_ca = keystone_ca_cert_b64() |
1877 | 314 | if ks_auth_config and ks_ca: | 375 | if ks_auth_config and ks_ca: |
1878 | 315 | rel_settings['ca_cert'] = ks_ca | 376 | rel_settings['ca_cert'] = ks_ca |
1879 | 316 | |||
1880 | 317 | return rel_settings | 377 | return rel_settings |
1881 | 318 | 378 | ||
1882 | 319 | 379 | ||
1883 | @@ -328,7 +388,6 @@ | |||
1884 | 328 | # this may not even be needed. | 388 | # this may not even be needed. |
1885 | 329 | 'ec2_host': unit_get('private-address'), | 389 | 'ec2_host': unit_get('private-address'), |
1886 | 330 | } | 390 | } |
1887 | 331 | |||
1888 | 332 | # update relation setting if we're attempting to restart remote | 391 | # update relation setting if we're attempting to restart remote |
1889 | 333 | # services | 392 | # services |
1890 | 334 | if remote_restart: | 393 | if remote_restart: |
1891 | @@ -339,21 +398,63 @@ | |||
1892 | 339 | 398 | ||
1893 | 340 | 399 | ||
1894 | 341 | @hooks.hook('cloud-compute-relation-changed') | 400 | @hooks.hook('cloud-compute-relation-changed') |
1899 | 342 | def compute_changed(): | 401 | def compute_changed(rid=None, unit=None): |
1900 | 343 | migration_auth = relation_get('migration_auth_type') | 402 | rel_settings = relation_get(rid=rid, unit=unit) |
1901 | 344 | if migration_auth == 'ssh': | 403 | if 'migration_auth_type' not in rel_settings: |
1902 | 345 | key = relation_get('ssh_public_key') | 404 | return |
1903 | 405 | if rel_settings['migration_auth_type'] == 'ssh': | ||
1904 | 406 | key = rel_settings.get('ssh_public_key') | ||
1905 | 346 | if not key: | 407 | if not key: |
1906 | 347 | log('SSH migration set but peer did not publish key.') | 408 | log('SSH migration set but peer did not publish key.') |
1907 | 348 | return | 409 | return |
1916 | 349 | ssh_compute_add(key) | 410 | ssh_compute_add(key, rid=rid, unit=unit) |
1917 | 350 | relation_set(known_hosts=ssh_known_hosts_b64(), | 411 | index = 0 |
1918 | 351 | authorized_keys=ssh_authorized_keys_b64()) | 412 | for line in ssh_known_hosts_lines(unit=unit): |
1919 | 352 | if relation_get('nova_ssh_public_key'): | 413 | relation_set( |
1920 | 353 | key = relation_get('nova_ssh_public_key') | 414 | relation_id=rid, |
1921 | 354 | ssh_compute_add(key, user='nova') | 415 | relation_settings={ |
1922 | 355 | relation_set(nova_known_hosts=ssh_known_hosts_b64(user='nova'), | 416 | 'known_hosts_{}'.format(index): line}) |
1923 | 356 | nova_authorized_keys=ssh_authorized_keys_b64(user='nova')) | 417 | index += 1 |
1924 | 418 | relation_set(relation_id=rid, known_hosts_max_index=index) | ||
1925 | 419 | index = 0 | ||
1926 | 420 | for line in ssh_authorized_keys_lines(unit=unit): | ||
1927 | 421 | relation_set( | ||
1928 | 422 | relation_id=rid, | ||
1929 | 423 | relation_settings={ | ||
1930 | 424 | 'authorized_keys_{}'.format(index): line}) | ||
1931 | 425 | index += 1 | ||
1932 | 426 | relation_set(relation_id=rid, authorized_keys_max_index=index) | ||
1933 | 427 | if 'nova_ssh_public_key' not in rel_settings: | ||
1934 | 428 | return | ||
1935 | 429 | if rel_settings['nova_ssh_public_key']: | ||
1936 | 430 | ssh_compute_add(rel_settings['nova_ssh_public_key'], | ||
1937 | 431 | rid=rid, unit=unit, user='nova') | ||
1938 | 432 | index = 0 | ||
1939 | 433 | for line in ssh_known_hosts_lines(unit=unit, user='nova'): | ||
1940 | 434 | relation_set( | ||
1941 | 435 | relation_id=rid, | ||
1942 | 436 | relation_settings={ | ||
1943 | 437 | '{}_known_hosts_{}'.format( | ||
1944 | 438 | 'nova', | ||
1945 | 439 | index): line}) | ||
1946 | 440 | index += 1 | ||
1947 | 441 | relation_set( | ||
1948 | 442 | relation_id=rid, | ||
1949 | 443 | relation_settings={ | ||
1950 | 444 | '{}_known_hosts_max_index'.format('nova'): index}) | ||
1951 | 445 | index = 0 | ||
1952 | 446 | for line in ssh_authorized_keys_lines(unit=unit, user='nova'): | ||
1953 | 447 | relation_set( | ||
1954 | 448 | relation_id=rid, | ||
1955 | 449 | relation_settings={ | ||
1956 | 450 | '{}_authorized_keys_{}'.format( | ||
1957 | 451 | 'nova', | ||
1958 | 452 | index): line}) | ||
1959 | 453 | index += 1 | ||
1960 | 454 | relation_set( | ||
1961 | 455 | relation_id=rid, | ||
1962 | 456 | relation_settings={ | ||
1963 | 457 | '{}_authorized_keys_max_index'.format('nova'): index}) | ||
1964 | 357 | 458 | ||
1965 | 358 | 459 | ||
1966 | 359 | @hooks.hook('cloud-compute-relation-departed') | 460 | @hooks.hook('cloud-compute-relation-departed') |
1967 | @@ -367,15 +468,7 @@ | |||
1968 | 367 | if not eligible_leader(CLUSTER_RES): | 468 | if not eligible_leader(CLUSTER_RES): |
1969 | 368 | return | 469 | return |
1970 | 369 | 470 | ||
1980 | 370 | url = canonical_url(CONFIGS) + ':9696' | 471 | rel_settings = neutron_settings() |
1972 | 371 | # XXX: Can we rename to neutron_*? | ||
1973 | 372 | rel_settings = { | ||
1974 | 373 | 'quantum_host': urlparse(url).hostname, | ||
1975 | 374 | 'quantum_url': url, | ||
1976 | 375 | 'quantum_port': 9696, | ||
1977 | 376 | 'quantum_plugin': neutron_plugin(), | ||
1978 | 377 | 'region': config('region') | ||
1979 | 378 | } | ||
1981 | 379 | 472 | ||
1982 | 380 | # inform quantum about local keystone auth config | 473 | # inform quantum about local keystone auth config |
1983 | 381 | ks_auth_config = _auth_config() | 474 | ks_auth_config = _auth_config() |
1984 | @@ -385,12 +478,13 @@ | |||
1985 | 385 | ks_ca = keystone_ca_cert_b64() | 478 | ks_ca = keystone_ca_cert_b64() |
1986 | 386 | if ks_auth_config and ks_ca: | 479 | if ks_auth_config and ks_ca: |
1987 | 387 | rel_settings['ca_cert'] = ks_ca | 480 | rel_settings['ca_cert'] = ks_ca |
1988 | 388 | |||
1989 | 389 | relation_set(relation_id=rid, **rel_settings) | 481 | relation_set(relation_id=rid, **rel_settings) |
1990 | 390 | 482 | ||
1991 | 391 | 483 | ||
1992 | 392 | @hooks.hook('cluster-relation-changed', | 484 | @hooks.hook('cluster-relation-changed', |
1993 | 393 | 'cluster-relation-departed') | 485 | 'cluster-relation-departed') |
1994 | 486 | @service_guard(guard_map(), CONFIGS, | ||
1995 | 487 | active=config('service-guard')) | ||
1996 | 394 | @restart_on_change(restart_map(), stopstart=True) | 488 | @restart_on_change(restart_map(), stopstart=True) |
1997 | 395 | def cluster_changed(): | 489 | def cluster_changed(): |
1998 | 396 | CONFIGS.write_all() | 490 | CONFIGS.write_all() |
1999 | @@ -400,15 +494,28 @@ | |||
2000 | 400 | def ha_joined(): | 494 | def ha_joined(): |
2001 | 401 | config = get_hacluster_config() | 495 | config = get_hacluster_config() |
2002 | 402 | resources = { | 496 | resources = { |
2003 | 403 | 'res_nova_vip': 'ocf:heartbeat:IPaddr2', | ||
2004 | 404 | 'res_nova_haproxy': 'lsb:haproxy', | 497 | 'res_nova_haproxy': 'lsb:haproxy', |
2005 | 405 | } | 498 | } |
2006 | 406 | vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ | ||
2007 | 407 | (config['vip'], config['vip_cidr'], config['vip_iface']) | ||
2008 | 408 | resource_params = { | 499 | resource_params = { |
2009 | 409 | 'res_nova_vip': vip_params, | ||
2010 | 410 | 'res_nova_haproxy': 'op monitor interval="5s"' | 500 | 'res_nova_haproxy': 'op monitor interval="5s"' |
2011 | 411 | } | 501 | } |
2012 | 502 | vip_group = [] | ||
2013 | 503 | for vip in config['vip'].split(): | ||
2014 | 504 | iface = get_iface_for_address(vip) | ||
2015 | 505 | if iface is not None: | ||
2016 | 506 | vip_key = 'res_nova_{}_vip'.format(iface) | ||
2017 | 507 | resources[vip_key] = 'ocf:heartbeat:IPaddr2' | ||
2018 | 508 | resource_params[vip_key] = ( | ||
2019 | 509 | 'params ip="{vip}" cidr_netmask="{netmask}"' | ||
2020 | 510 | ' nic="{iface}"'.format(vip=vip, | ||
2021 | 511 | iface=iface, | ||
2022 | 512 | netmask=get_netmask_for_address(vip)) | ||
2023 | 513 | ) | ||
2024 | 514 | vip_group.append(vip_key) | ||
2025 | 515 | |||
2026 | 516 | if len(vip_group) > 1: | ||
2027 | 517 | relation_set(groups={'grp_nova_vips': ' '.join(vip_group)}) | ||
2028 | 518 | |||
2029 | 412 | init_services = { | 519 | init_services = { |
2030 | 413 | 'res_nova_haproxy': 'haproxy' | 520 | 'res_nova_haproxy': 'haproxy' |
2031 | 414 | } | 521 | } |
2032 | @@ -447,6 +554,8 @@ | |||
2033 | 447 | 'pgsql-nova-db-relation-broken', | 554 | 'pgsql-nova-db-relation-broken', |
2034 | 448 | 'pgsql-neutron-db-relation-broken', | 555 | 'pgsql-neutron-db-relation-broken', |
2035 | 449 | 'quantum-network-service-relation-broken') | 556 | 'quantum-network-service-relation-broken') |
2036 | 557 | @service_guard(guard_map(), CONFIGS, | ||
2037 | 558 | active=config('service-guard')) | ||
2038 | 450 | def relation_broken(): | 559 | def relation_broken(): |
2039 | 451 | CONFIGS.write_all() | 560 | CONFIGS.write_all() |
2040 | 452 | 561 | ||
2041 | @@ -480,13 +589,15 @@ | |||
2042 | 480 | rel_settings.update({ | 589 | rel_settings.update({ |
2043 | 481 | 'quantum_plugin': neutron_plugin(), | 590 | 'quantum_plugin': neutron_plugin(), |
2044 | 482 | 'quantum_security_groups': config('quantum-security-groups'), | 591 | 'quantum_security_groups': config('quantum-security-groups'), |
2047 | 483 | 'quantum_url': (canonical_url(CONFIGS) + ':' + | 592 | 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), |
2048 | 484 | str(api_port('neutron-server')))}) | 593 | str(api_port('neutron-server')))}) |
2049 | 485 | 594 | ||
2050 | 486 | relation_set(relation_id=rid, **rel_settings) | 595 | relation_set(relation_id=rid, **rel_settings) |
2051 | 487 | 596 | ||
2052 | 488 | 597 | ||
2053 | 489 | @hooks.hook('nova-vmware-relation-changed') | 598 | @hooks.hook('nova-vmware-relation-changed') |
2054 | 599 | @service_guard(guard_map(), CONFIGS, | ||
2055 | 600 | active=config('service-guard')) | ||
2056 | 490 | @restart_on_change(restart_map()) | 601 | @restart_on_change(restart_map()) |
2057 | 491 | def nova_vmware_relation_changed(): | 602 | def nova_vmware_relation_changed(): |
2058 | 492 | CONFIGS.write('/etc/nova/nova.conf') | 603 | CONFIGS.write('/etc/nova/nova.conf') |
2059 | @@ -498,6 +609,49 @@ | |||
2060 | 498 | amqp_joined(relation_id=r_id) | 609 | amqp_joined(relation_id=r_id) |
2061 | 499 | for r_id in relation_ids('identity-service'): | 610 | for r_id in relation_ids('identity-service'): |
2062 | 500 | identity_joined(rid=r_id) | 611 | identity_joined(rid=r_id) |
2063 | 612 | for r_id in relation_ids('cloud-compute'): | ||
2064 | 613 | for unit in related_units(r_id): | ||
2065 | 614 | compute_changed(r_id, unit) | ||
2066 | 615 | |||
2067 | 616 | |||
2068 | 617 | @hooks.hook('neutron-api-relation-joined') | ||
2069 | 618 | def neutron_api_relation_joined(rid=None): | ||
2070 | 619 | with open('/etc/init/neutron-server.override', 'wb') as out: | ||
2071 | 620 | out.write('manual\n') | ||
2072 | 621 | if os.path.isfile(NEUTRON_CONF): | ||
2073 | 622 | os.rename(NEUTRON_CONF, NEUTRON_CONF + '_unused') | ||
2074 | 623 | if service_running('neutron-server'): | ||
2075 | 624 | service_stop('neutron-server') | ||
2076 | 625 | for id_rid in relation_ids('identity-service'): | ||
2077 | 626 | identity_joined(rid=id_rid) | ||
2078 | 627 | nova_url = canonical_url(CONFIGS, INTERNAL) + ":8774/v2" | ||
2079 | 628 | relation_set(relation_id=rid, nova_url=nova_url) | ||
2080 | 629 | |||
2081 | 630 | |||
2082 | 631 | @hooks.hook('neutron-api-relation-changed') | ||
2083 | 632 | @service_guard(guard_map(), CONFIGS, | ||
2084 | 633 | active=config('service-guard')) | ||
2085 | 634 | @restart_on_change(restart_map()) | ||
2086 | 635 | def neutron_api_relation_changed(): | ||
2087 | 636 | CONFIGS.write(NOVA_CONF) | ||
2088 | 637 | for rid in relation_ids('cloud-compute'): | ||
2089 | 638 | compute_joined(rid=rid) | ||
2090 | 639 | for rid in relation_ids('quantum-network-service'): | ||
2091 | 640 | quantum_joined(rid=rid) | ||
2092 | 641 | |||
2093 | 642 | |||
2094 | 643 | @hooks.hook('neutron-api-relation-broken') | ||
2095 | 644 | @service_guard(guard_map(), CONFIGS, | ||
2096 | 645 | active=config('service-guard')) | ||
2097 | 646 | @restart_on_change(restart_map()) | ||
2098 | 647 | def neutron_api_relation_broken(): | ||
2099 | 648 | if os.path.isfile('/etc/init/neutron-server.override'): | ||
2100 | 649 | os.remove('/etc/init/neutron-server.override') | ||
2101 | 650 | CONFIGS.write_all() | ||
2102 | 651 | for rid in relation_ids('cloud-compute'): | ||
2103 | 652 | compute_joined(rid=rid) | ||
2104 | 653 | for rid in relation_ids('quantum-network-service'): | ||
2105 | 654 | quantum_joined(rid=rid) | ||
2106 | 501 | 655 | ||
2107 | 502 | 656 | ||
2108 | 503 | def main(): | 657 | def main(): |
2109 | 504 | 658 | ||
2110 | === modified file 'hooks/nova_cc_utils.py' | |||
2111 | --- hooks/nova_cc_utils.py 2014-05-21 10:03:01 +0000 | |||
2112 | +++ hooks/nova_cc_utils.py 2014-07-29 13:07:23 +0000 | |||
2113 | @@ -33,20 +33,22 @@ | |||
2114 | 33 | relation_get, | 33 | relation_get, |
2115 | 34 | relation_ids, | 34 | relation_ids, |
2116 | 35 | remote_unit, | 35 | remote_unit, |
2117 | 36 | is_relation_made, | ||
2118 | 36 | INFO, | 37 | INFO, |
2119 | 37 | ERROR, | 38 | ERROR, |
2120 | 38 | ) | 39 | ) |
2121 | 39 | 40 | ||
2122 | 40 | from charmhelpers.core.host import ( | 41 | from charmhelpers.core.host import ( |
2124 | 41 | service_start | 42 | service_start, |
2125 | 43 | service_stop, | ||
2126 | 44 | service_running | ||
2127 | 42 | ) | 45 | ) |
2128 | 43 | 46 | ||
2129 | 44 | |||
2130 | 45 | import nova_cc_context | 47 | import nova_cc_context |
2131 | 46 | 48 | ||
2132 | 47 | TEMPLATES = 'templates/' | 49 | TEMPLATES = 'templates/' |
2133 | 48 | 50 | ||
2135 | 49 | CLUSTER_RES = 'res_nova_vip' | 51 | CLUSTER_RES = 'grp_nova_vips' |
2136 | 50 | 52 | ||
2137 | 51 | # removed from original: charm-helper-sh | 53 | # removed from original: charm-helper-sh |
2138 | 52 | BASE_PACKAGES = [ | 54 | BASE_PACKAGES = [ |
2139 | @@ -106,8 +108,7 @@ | |||
2140 | 106 | context.SyslogContext(), | 108 | context.SyslogContext(), |
2141 | 107 | nova_cc_context.HAProxyContext(), | 109 | nova_cc_context.HAProxyContext(), |
2142 | 108 | nova_cc_context.IdentityServiceContext(), | 110 | nova_cc_context.IdentityServiceContext(), |
2145 | 109 | nova_cc_context.VolumeServiceContext(), | 111 | nova_cc_context.VolumeServiceContext()], |
2144 | 110 | nova_cc_context.NeutronCCContext()], | ||
2146 | 111 | }), | 112 | }), |
2147 | 112 | (NOVA_API_PASTE, { | 113 | (NOVA_API_PASTE, { |
2148 | 113 | 'services': [s for s in BASE_SERVICES if 'api' in s], | 114 | 'services': [s for s in BASE_SERVICES if 'api' in s], |
2149 | @@ -188,39 +189,47 @@ | |||
2150 | 188 | 189 | ||
2151 | 189 | net_manager = network_manager() | 190 | net_manager = network_manager() |
2152 | 190 | 191 | ||
2153 | 191 | # pop out irrelevant resources from the OrderedDict (easier than adding | ||
2154 | 192 | # them late) | ||
2155 | 193 | if net_manager != 'quantum': | ||
2156 | 194 | [resource_map.pop(k) for k in list(resource_map.iterkeys()) | ||
2157 | 195 | if 'quantum' in k] | ||
2158 | 196 | if net_manager != 'neutron': | ||
2159 | 197 | [resource_map.pop(k) for k in list(resource_map.iterkeys()) | ||
2160 | 198 | if 'neutron' in k] | ||
2161 | 199 | |||
2162 | 200 | if os.path.exists('/etc/apache2/conf-available'): | 192 | if os.path.exists('/etc/apache2/conf-available'): |
2163 | 201 | resource_map.pop(APACHE_CONF) | 193 | resource_map.pop(APACHE_CONF) |
2164 | 202 | else: | 194 | else: |
2165 | 203 | resource_map.pop(APACHE_24_CONF) | 195 | resource_map.pop(APACHE_24_CONF) |
2166 | 204 | 196 | ||
2182 | 205 | # add neutron plugin requirements. nova-c-c only needs the neutron-server | 197 | if is_relation_made('neutron-api'): |
2183 | 206 | # associated with configs, not the plugin agent. | 198 | [resource_map.pop(k) for k in list(resource_map.iterkeys()) |
2184 | 207 | if net_manager in ['quantum', 'neutron']: | 199 | if 'quantum' in k or 'neutron' in k] |
2185 | 208 | plugin = neutron_plugin() | 200 | resource_map[NOVA_CONF]['contexts'].append( |
2186 | 209 | if plugin: | 201 | nova_cc_context.NeutronAPIContext()) |
2187 | 210 | conf = neutron_plugin_attribute(plugin, 'config', net_manager) | 202 | else: |
2188 | 211 | ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager) | 203 | resource_map[NOVA_CONF]['contexts'].append( |
2189 | 212 | or []) | 204 | nova_cc_context.NeutronCCContext()) |
2190 | 213 | services = neutron_plugin_attribute(plugin, 'server_services', | 205 | # pop out irrelevant resources from the OrderedDict (easier than adding |
2191 | 214 | net_manager) | 206 | # them late) |
2192 | 215 | resource_map[conf] = {} | 207 | if net_manager != 'quantum': |
2193 | 216 | resource_map[conf]['services'] = services | 208 | [resource_map.pop(k) for k in list(resource_map.iterkeys()) |
2194 | 217 | resource_map[conf]['contexts'] = ctxts | 209 | if 'quantum' in k] |
2195 | 218 | resource_map[conf]['contexts'].append( | 210 | if net_manager != 'neutron': |
2196 | 219 | nova_cc_context.NeutronCCContext()) | 211 | [resource_map.pop(k) for k in list(resource_map.iterkeys()) |
2197 | 212 | if 'neutron' in k] | ||
2198 | 213 | # add neutron plugin requirements. nova-c-c only needs the | ||
2199 | 214 | # neutron-server associated with configs, not the plugin agent. | ||
2200 | 215 | if net_manager in ['quantum', 'neutron']: | ||
2201 | 216 | plugin = neutron_plugin() | ||
2202 | 217 | if plugin: | ||
2203 | 218 | conf = neutron_plugin_attribute(plugin, 'config', net_manager) | ||
2204 | 219 | ctxts = (neutron_plugin_attribute(plugin, 'contexts', | ||
2205 | 220 | net_manager) | ||
2206 | 221 | or []) | ||
2207 | 222 | services = neutron_plugin_attribute(plugin, 'server_services', | ||
2208 | 223 | net_manager) | ||
2209 | 224 | resource_map[conf] = {} | ||
2210 | 225 | resource_map[conf]['services'] = services | ||
2211 | 226 | resource_map[conf]['contexts'] = ctxts | ||
2212 | 227 | resource_map[conf]['contexts'].append( | ||
2213 | 228 | nova_cc_context.NeutronCCContext()) | ||
2214 | 220 | 229 | ||
2218 | 221 | # update for postgres | 230 | # update for postgres |
2219 | 222 | resource_map[conf]['contexts'].append( | 231 | resource_map[conf]['contexts'].append( |
2220 | 223 | nova_cc_context.NeutronPostgresqlDBContext()) | 232 | nova_cc_context.NeutronPostgresqlDBContext()) |
2221 | 224 | 233 | ||
2222 | 225 | # nova-conductor for releases >= G. | 234 | # nova-conductor for releases >= G. |
2223 | 226 | if os_release('nova-common') not in ['essex', 'folsom']: | 235 | if os_release('nova-common') not in ['essex', 'folsom']: |
2224 | @@ -235,6 +244,7 @@ | |||
2225 | 235 | for s in vmware_ctxt['services']: | 244 | for s in vmware_ctxt['services']: |
2226 | 236 | if s not in resource_map[NOVA_CONF]['services']: | 245 | if s not in resource_map[NOVA_CONF]['services']: |
2227 | 237 | resource_map[NOVA_CONF]['services'].append(s) | 246 | resource_map[NOVA_CONF]['services'].append(s) |
2228 | 247 | |||
2229 | 238 | return resource_map | 248 | return resource_map |
2230 | 239 | 249 | ||
2231 | 240 | 250 | ||
2232 | @@ -509,8 +519,11 @@ | |||
2233 | 509 | return b64encode(_in.read()) | 519 | return b64encode(_in.read()) |
2234 | 510 | 520 | ||
2235 | 511 | 521 | ||
2238 | 512 | def ssh_directory_for_unit(user=None): | 522 | def ssh_directory_for_unit(unit=None, user=None): |
2239 | 513 | remote_service = remote_unit().split('/')[0] | 523 | if unit: |
2240 | 524 | remote_service = unit.split('/')[0] | ||
2241 | 525 | else: | ||
2242 | 526 | remote_service = remote_unit().split('/')[0] | ||
2243 | 514 | if user: | 527 | if user: |
2244 | 515 | remote_service = "{}_{}".format(remote_service, user) | 528 | remote_service = "{}_{}".format(remote_service, user) |
2245 | 516 | _dir = os.path.join(NOVA_SSH_DIR, remote_service) | 529 | _dir = os.path.join(NOVA_SSH_DIR, remote_service) |
2246 | @@ -524,29 +537,29 @@ | |||
2247 | 524 | return _dir | 537 | return _dir |
2248 | 525 | 538 | ||
2249 | 526 | 539 | ||
2260 | 527 | def known_hosts(user=None): | 540 | def known_hosts(unit=None, user=None): |
2261 | 528 | return os.path.join(ssh_directory_for_unit(user), 'known_hosts') | 541 | return os.path.join(ssh_directory_for_unit(unit, user), 'known_hosts') |
2262 | 529 | 542 | ||
2263 | 530 | 543 | ||
2264 | 531 | def authorized_keys(user=None): | 544 | def authorized_keys(unit=None, user=None): |
2265 | 532 | return os.path.join(ssh_directory_for_unit(user), 'authorized_keys') | 545 | return os.path.join(ssh_directory_for_unit(unit, user), 'authorized_keys') |
2266 | 533 | 546 | ||
2267 | 534 | 547 | ||
2268 | 535 | def ssh_known_host_key(host, user=None): | 548 | def ssh_known_host_key(host, unit=None, user=None): |
2269 | 536 | cmd = ['ssh-keygen', '-f', known_hosts(user), '-H', '-F', host] | 549 | cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-H', '-F', host] |
2270 | 537 | try: | 550 | try: |
2271 | 538 | return subprocess.check_output(cmd).strip() | 551 | return subprocess.check_output(cmd).strip() |
2272 | 539 | except subprocess.CalledProcessError: | 552 | except subprocess.CalledProcessError: |
2273 | 540 | return None | 553 | return None |
2274 | 541 | 554 | ||
2275 | 542 | 555 | ||
2277 | 543 | def remove_known_host(host, user=None): | 556 | def remove_known_host(host, unit=None, user=None): |
2278 | 544 | log('Removing SSH known host entry for compute host at %s' % host) | 557 | log('Removing SSH known host entry for compute host at %s' % host) |
2280 | 545 | cmd = ['ssh-keygen', '-f', known_hosts(user), '-R', host] | 558 | cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-R', host] |
2281 | 546 | subprocess.check_call(cmd) | 559 | subprocess.check_call(cmd) |
2282 | 547 | 560 | ||
2283 | 548 | 561 | ||
2285 | 549 | def add_known_host(host, user=None): | 562 | def add_known_host(host, unit=None, user=None): |
2286 | 550 | '''Add variations of host to a known hosts file.''' | 563 | '''Add variations of host to a known hosts file.''' |
2287 | 551 | cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] | 564 | cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] |
2288 | 552 | try: | 565 | try: |
2289 | @@ -555,34 +568,37 @@ | |||
2290 | 555 | log('Could not obtain SSH host key from %s' % host, level=ERROR) | 568 | log('Could not obtain SSH host key from %s' % host, level=ERROR) |
2291 | 556 | raise e | 569 | raise e |
2292 | 557 | 570 | ||
2294 | 558 | current_key = ssh_known_host_key(host, user) | 571 | current_key = ssh_known_host_key(host, unit, user) |
2295 | 559 | if current_key: | 572 | if current_key: |
2296 | 560 | if remote_key == current_key: | 573 | if remote_key == current_key: |
2297 | 561 | log('Known host key for compute host %s up to date.' % host) | 574 | log('Known host key for compute host %s up to date.' % host) |
2298 | 562 | return | 575 | return |
2299 | 563 | else: | 576 | else: |
2301 | 564 | remove_known_host(host, user) | 577 | remove_known_host(host, unit, user) |
2302 | 565 | 578 | ||
2303 | 566 | log('Adding SSH host key to known hosts for compute node at %s.' % host) | 579 | log('Adding SSH host key to known hosts for compute node at %s.' % host) |
2305 | 567 | with open(known_hosts(user), 'a') as out: | 580 | with open(known_hosts(unit, user), 'a') as out: |
2306 | 568 | out.write(remote_key + '\n') | 581 | out.write(remote_key + '\n') |
2307 | 569 | 582 | ||
2308 | 570 | 583 | ||
2311 | 571 | def ssh_authorized_key_exists(public_key, user=None): | 584 | def ssh_authorized_key_exists(public_key, unit=None, user=None): |
2312 | 572 | with open(authorized_keys(user)) as keys: | 585 | with open(authorized_keys(unit, user)) as keys: |
2313 | 573 | return (' %s ' % public_key) in keys.read() | 586 | return (' %s ' % public_key) in keys.read() |
2314 | 574 | 587 | ||
2315 | 575 | 588 | ||
2318 | 576 | def add_authorized_key(public_key, user=None): | 589 | def add_authorized_key(public_key, unit=None, user=None): |
2319 | 577 | with open(authorized_keys(user), 'a') as keys: | 590 | with open(authorized_keys(unit, user), 'a') as keys: |
2320 | 578 | keys.write(public_key + '\n') | 591 | keys.write(public_key + '\n') |
2321 | 579 | 592 | ||
2322 | 580 | 593 | ||
2324 | 581 | def ssh_compute_add(public_key, user=None): | 594 | def ssh_compute_add(public_key, rid=None, unit=None, user=None): |
2325 | 582 | # If remote compute node hands us a hostname, ensure we have a | 595 | # If remote compute node hands us a hostname, ensure we have a |
2326 | 583 | # known hosts entry for its IP, hostname and FQDN. | 596 | # known hosts entry for its IP, hostname and FQDN. |
2328 | 584 | private_address = relation_get('private-address') | 597 | private_address = relation_get(rid=rid, unit=unit, |
2329 | 598 | attribute='private-address') | ||
2330 | 585 | hosts = [private_address] | 599 | hosts = [private_address] |
2331 | 600 | if relation_get('hostname'): | ||
2332 | 601 | hosts.append(relation_get('hostname')) | ||
2333 | 586 | 602 | ||
2334 | 587 | if not is_ip(private_address): | 603 | if not is_ip(private_address): |
2335 | 588 | hosts.append(get_host_ip(private_address)) | 604 | hosts.append(get_host_ip(private_address)) |
2336 | @@ -593,31 +609,41 @@ | |||
2337 | 593 | hosts.append(hn.split('.')[0]) | 609 | hosts.append(hn.split('.')[0]) |
2338 | 594 | 610 | ||
2339 | 595 | for host in list(set(hosts)): | 611 | for host in list(set(hosts)): |
2342 | 596 | if not ssh_known_host_key(host, user): | 612 | if not ssh_known_host_key(host, unit, user): |
2343 | 597 | add_known_host(host, user) | 613 | add_known_host(host, unit, user) |
2344 | 598 | 614 | ||
2346 | 599 | if not ssh_authorized_key_exists(public_key, user): | 615 | if not ssh_authorized_key_exists(public_key, unit, user): |
2347 | 600 | log('Saving SSH authorized key for compute host at %s.' % | 616 | log('Saving SSH authorized key for compute host at %s.' % |
2348 | 601 | private_address) | 617 | private_address) |
2365 | 602 | add_authorized_key(public_key, user) | 618 | add_authorized_key(public_key, unit, user) |
2366 | 603 | 619 | ||
2367 | 604 | 620 | ||
2368 | 605 | def ssh_known_hosts_b64(user=None): | 621 | def ssh_known_hosts_lines(unit=None, user=None): |
2369 | 606 | with open(known_hosts(user)) as hosts: | 622 | known_hosts_list = [] |
2370 | 607 | return b64encode(hosts.read()) | 623 | |
2371 | 608 | 624 | with open(known_hosts(unit, user)) as hosts: | |
2372 | 609 | 625 | for hosts_line in hosts: | |
2373 | 610 | def ssh_authorized_keys_b64(user=None): | 626 | if hosts_line.rstrip(): |
2374 | 611 | with open(authorized_keys(user)) as keys: | 627 | known_hosts_list.append(hosts_line.rstrip()) |
2375 | 612 | return b64encode(keys.read()) | 628 | return(known_hosts_list) |
2376 | 613 | 629 | ||
2377 | 614 | 630 | ||
2378 | 615 | def ssh_compute_remove(public_key, user=None): | 631 | def ssh_authorized_keys_lines(unit=None, user=None): |
2379 | 616 | if not (os.path.isfile(authorized_keys(user)) or | 632 | authorized_keys_list = [] |
2380 | 617 | os.path.isfile(known_hosts(user))): | 633 | |
2381 | 634 | with open(authorized_keys(unit, user)) as keys: | ||
2382 | 635 | for authkey_line in keys: | ||
2383 | 636 | if authkey_line.rstrip(): | ||
2384 | 637 | authorized_keys_list.append(authkey_line.rstrip()) | ||
2385 | 638 | return(authorized_keys_list) | ||
2386 | 639 | |||
2387 | 640 | |||
2388 | 641 | def ssh_compute_remove(public_key, unit=None, user=None): | ||
2389 | 642 | if not (os.path.isfile(authorized_keys(unit, user)) or | ||
2390 | 643 | os.path.isfile(known_hosts(unit, user))): | ||
2391 | 618 | return | 644 | return |
2392 | 619 | 645 | ||
2394 | 620 | with open(authorized_keys(user)) as _keys: | 646 | with open(authorized_keys(unit, user)) as _keys: |
2395 | 621 | keys = [k.strip() for k in _keys.readlines()] | 647 | keys = [k.strip() for k in _keys.readlines()] |
2396 | 622 | 648 | ||
2397 | 623 | if public_key not in keys: | 649 | if public_key not in keys: |
2398 | @@ -625,67 +651,101 @@ | |||
2399 | 625 | 651 | ||
2400 | 626 | [keys.remove(key) for key in keys if key == public_key] | 652 | [keys.remove(key) for key in keys if key == public_key] |
2401 | 627 | 653 | ||
2403 | 628 | with open(authorized_keys(user), 'w') as _keys: | 654 | with open(authorized_keys(unit, user), 'w') as _keys: |
2404 | 629 | keys = '\n'.join(keys) | 655 | keys = '\n'.join(keys) |
2405 | 630 | if not keys.endswith('\n'): | 656 | if not keys.endswith('\n'): |
2406 | 631 | keys += '\n' | 657 | keys += '\n' |
2407 | 632 | _keys.write(keys) | 658 | _keys.write(keys) |
2408 | 633 | 659 | ||
2409 | 634 | 660 | ||
2411 | 635 | def determine_endpoints(url): | 661 | def determine_endpoints(public_url, internal_url, admin_url): |
2412 | 636 | '''Generates a dictionary containing all relevant endpoints to be | 662 | '''Generates a dictionary containing all relevant endpoints to be |
2413 | 637 | passed to keystone as relation settings.''' | 663 | passed to keystone as relation settings.''' |
2414 | 638 | region = config('region') | 664 | region = config('region') |
2415 | 639 | os_rel = os_release('nova-common') | 665 | os_rel = os_release('nova-common') |
2416 | 640 | 666 | ||
2417 | 641 | if os_rel >= 'grizzly': | 667 | if os_rel >= 'grizzly': |
2420 | 642 | nova_url = ('%s:%s/v2/$(tenant_id)s' % | 668 | nova_public_url = ('%s:%s/v2/$(tenant_id)s' % |
2421 | 643 | (url, api_port('nova-api-os-compute'))) | 669 | (public_url, api_port('nova-api-os-compute'))) |
2422 | 670 | nova_internal_url = ('%s:%s/v2/$(tenant_id)s' % | ||
2423 | 671 | (internal_url, api_port('nova-api-os-compute'))) | ||
2424 | 672 | nova_admin_url = ('%s:%s/v2/$(tenant_id)s' % | ||
2425 | 673 | (admin_url, api_port('nova-api-os-compute'))) | ||
2426 | 644 | else: | 674 | else: |
2434 | 645 | nova_url = ('%s:%s/v1.1/$(tenant_id)s' % | 675 | nova_public_url = ('%s:%s/v1.1/$(tenant_id)s' % |
2435 | 646 | (url, api_port('nova-api-os-compute'))) | 676 | (public_url, api_port('nova-api-os-compute'))) |
2436 | 647 | ec2_url = '%s:%s/services/Cloud' % (url, api_port('nova-api-ec2')) | 677 | nova_internal_url = ('%s:%s/v1.1/$(tenant_id)s' % |
2437 | 648 | nova_volume_url = ('%s:%s/v1/$(tenant_id)s' % | 678 | (internal_url, api_port('nova-api-os-compute'))) |
2438 | 649 | (url, api_port('nova-api-os-compute'))) | 679 | nova_admin_url = ('%s:%s/v1.1/$(tenant_id)s' % |
2439 | 650 | neutron_url = '%s:%s' % (url, api_port('neutron-server')) | 680 | (admin_url, api_port('nova-api-os-compute'))) |
2440 | 651 | s3_url = '%s:%s' % (url, api_port('nova-objectstore')) | 681 | |
2441 | 682 | ec2_public_url = '%s:%s/services/Cloud' % ( | ||
2442 | 683 | public_url, api_port('nova-api-ec2')) | ||
2443 | 684 | ec2_internal_url = '%s:%s/services/Cloud' % ( | ||
2444 | 685 | internal_url, api_port('nova-api-ec2')) | ||
2445 | 686 | ec2_admin_url = '%s:%s/services/Cloud' % (admin_url, | ||
2446 | 687 | api_port('nova-api-ec2')) | ||
2447 | 688 | |||
2448 | 689 | nova_volume_public_url = ('%s:%s/v1/$(tenant_id)s' % | ||
2449 | 690 | (public_url, api_port('nova-api-os-compute'))) | ||
2450 | 691 | nova_volume_internal_url = ('%s:%s/v1/$(tenant_id)s' % | ||
2451 | 692 | (internal_url, | ||
2452 | 693 | api_port('nova-api-os-compute'))) | ||
2453 | 694 | nova_volume_admin_url = ('%s:%s/v1/$(tenant_id)s' % | ||
2454 | 695 | (admin_url, api_port('nova-api-os-compute'))) | ||
2455 | 696 | |||
2456 | 697 | neutron_public_url = '%s:%s' % (public_url, api_port('neutron-server')) | ||
2457 | 698 | neutron_internal_url = '%s:%s' % (internal_url, api_port('neutron-server')) | ||
2458 | 699 | neutron_admin_url = '%s:%s' % (admin_url, api_port('neutron-server')) | ||
2459 | 700 | |||
2460 | 701 | s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore')) | ||
2461 | 702 | s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore')) | ||
2462 | 703 | s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore')) | ||
2463 | 652 | 704 | ||
2464 | 653 | # the base endpoints | 705 | # the base endpoints |
2465 | 654 | endpoints = { | 706 | endpoints = { |
2466 | 655 | 'nova_service': 'nova', | 707 | 'nova_service': 'nova', |
2467 | 656 | 'nova_region': region, | 708 | 'nova_region': region, |
2471 | 657 | 'nova_public_url': nova_url, | 709 | 'nova_public_url': nova_public_url, |
2472 | 658 | 'nova_admin_url': nova_url, | 710 | 'nova_admin_url': nova_admin_url, |
2473 | 659 | 'nova_internal_url': nova_url, | 711 | 'nova_internal_url': nova_internal_url, |
2474 | 660 | 'ec2_service': 'ec2', | 712 | 'ec2_service': 'ec2', |
2475 | 661 | 'ec2_region': region, | 713 | 'ec2_region': region, |
2479 | 662 | 'ec2_public_url': ec2_url, | 714 | 'ec2_public_url': ec2_public_url, |
2480 | 663 | 'ec2_admin_url': ec2_url, | 715 | 'ec2_admin_url': ec2_admin_url, |
2481 | 664 | 'ec2_internal_url': ec2_url, | 716 | 'ec2_internal_url': ec2_internal_url, |
2482 | 665 | 's3_service': 's3', | 717 | 's3_service': 's3', |
2483 | 666 | 's3_region': region, | 718 | 's3_region': region, |
2487 | 667 | 's3_public_url': s3_url, | 719 | 's3_public_url': s3_public_url, |
2488 | 668 | 's3_admin_url': s3_url, | 720 | 's3_admin_url': s3_admin_url, |
2489 | 669 | 's3_internal_url': s3_url, | 721 | 's3_internal_url': s3_internal_url, |
2490 | 670 | } | 722 | } |
2491 | 671 | 723 | ||
2492 | 672 | if relation_ids('nova-volume-service'): | 724 | if relation_ids('nova-volume-service'): |
2493 | 673 | endpoints.update({ | 725 | endpoints.update({ |
2494 | 674 | 'nova-volume_service': 'nova-volume', | 726 | 'nova-volume_service': 'nova-volume', |
2495 | 675 | 'nova-volume_region': region, | 727 | 'nova-volume_region': region, |
2499 | 676 | 'nova-volume_public_url': nova_volume_url, | 728 | 'nova-volume_public_url': nova_volume_public_url, |
2500 | 677 | 'nova-volume_admin_url': nova_volume_url, | 729 | 'nova-volume_admin_url': nova_volume_admin_url, |
2501 | 678 | 'nova-volume_internal_url': nova_volume_url, | 730 | 'nova-volume_internal_url': nova_volume_internal_url, |
2502 | 679 | }) | 731 | }) |
2503 | 680 | 732 | ||
2504 | 681 | # XXX: Keep these relations named quantum_*?? | 733 | # XXX: Keep these relations named quantum_*?? |
2506 | 682 | if network_manager() in ['quantum', 'neutron']: | 734 | if is_relation_made('neutron-api'): |
2507 | 735 | endpoints.update({ | ||
2508 | 736 | 'quantum_service': None, | ||
2509 | 737 | 'quantum_region': None, | ||
2510 | 738 | 'quantum_public_url': None, | ||
2511 | 739 | 'quantum_admin_url': None, | ||
2512 | 740 | 'quantum_internal_url': None, | ||
2513 | 741 | }) | ||
2514 | 742 | elif network_manager() in ['quantum', 'neutron']: | ||
2515 | 683 | endpoints.update({ | 743 | endpoints.update({ |
2516 | 684 | 'quantum_service': 'quantum', | 744 | 'quantum_service': 'quantum', |
2517 | 685 | 'quantum_region': region, | 745 | 'quantum_region': region, |
2521 | 686 | 'quantum_public_url': neutron_url, | 746 | 'quantum_public_url': neutron_public_url, |
2522 | 687 | 'quantum_admin_url': neutron_url, | 747 | 'quantum_admin_url': neutron_admin_url, |
2523 | 688 | 'quantum_internal_url': neutron_url, | 748 | 'quantum_internal_url': neutron_internal_url, |
2524 | 689 | }) | 749 | }) |
2525 | 690 | 750 | ||
2526 | 691 | return endpoints | 751 | return endpoints |
2527 | @@ -695,3 +755,58 @@ | |||
2528 | 695 | # quantum-plugin config setting can be safely overriden | 755 | # quantum-plugin config setting can be safely overriden |
2529 | 696 | # as we only supported OVS in G/neutron | 756 | # as we only supported OVS in G/neutron |
2530 | 697 | return config('neutron-plugin') or config('quantum-plugin') | 757 | return config('neutron-plugin') or config('quantum-plugin') |
2531 | 758 | |||
2532 | 759 | |||
2533 | 760 | def guard_map(): | ||
2534 | 761 | '''Map of services and required interfaces that must be present before | ||
2535 | 762 | the service should be allowed to start''' | ||
2536 | 763 | gmap = {} | ||
2537 | 764 | nova_services = deepcopy(BASE_SERVICES) | ||
2538 | 765 | if os_release('nova-common') not in ['essex', 'folsom']: | ||
2539 | 766 | nova_services.append('nova-conductor') | ||
2540 | 767 | |||
2541 | 768 | nova_interfaces = ['identity-service', 'amqp'] | ||
2542 | 769 | if relation_ids('pgsql-nova-db'): | ||
2543 | 770 | nova_interfaces.append('pgsql-nova-db') | ||
2544 | 771 | else: | ||
2545 | 772 | nova_interfaces.append('shared-db') | ||
2546 | 773 | |||
2547 | 774 | for svc in nova_services: | ||
2548 | 775 | gmap[svc] = nova_interfaces | ||
2549 | 776 | |||
2550 | 777 | net_manager = network_manager() | ||
2551 | 778 | if net_manager in ['neutron', 'quantum']: | ||
2552 | 779 | neutron_interfaces = ['identity-service', 'amqp'] | ||
2553 | 780 | if relation_ids('pgsql-neutron-db'): | ||
2554 | 781 | neutron_interfaces.append('pgsql-neutron-db') | ||
2555 | 782 | else: | ||
2556 | 783 | neutron_interfaces.append('shared-db') | ||
2557 | 784 | if network_manager() == 'quantum': | ||
2558 | 785 | gmap['quantum-server'] = neutron_interfaces | ||
2559 | 786 | else: | ||
2560 | 787 | gmap['neutron-server'] = neutron_interfaces | ||
2561 | 788 | |||
2562 | 789 | return gmap | ||
2563 | 790 | |||
2564 | 791 | |||
2565 | 792 | def service_guard(guard_map, contexts, active=False): | ||
2566 | 793 | '''Inhibit services in guard_map from running unless | ||
2567 | 794 | required interfaces are found complete in contexts.''' | ||
2568 | 795 | def wrap(f): | ||
2569 | 796 | def wrapped_f(*args): | ||
2570 | 797 | if active is True: | ||
2571 | 798 | incomplete_services = [] | ||
2572 | 799 | for svc in guard_map: | ||
2573 | 800 | for interface in guard_map[svc]: | ||
2574 | 801 | if interface not in contexts.complete_contexts(): | ||
2575 | 802 | incomplete_services.append(svc) | ||
2576 | 803 | f(*args) | ||
2577 | 804 | for svc in incomplete_services: | ||
2578 | 805 | if service_running(svc): | ||
2579 | 806 | log('Service {} has unfulfilled ' | ||
2580 | 807 | 'interface requirements, stopping.'.format(svc)) | ||
2581 | 808 | service_stop(svc) | ||
2582 | 809 | else: | ||
2583 | 810 | f(*args) | ||
2584 | 811 | return wrapped_f | ||
2585 | 812 | return wrap | ||
2586 | 698 | 813 | ||
2587 | === modified file 'metadata.yaml' | |||
2588 | --- metadata.yaml 2014-03-31 11:56:09 +0000 | |||
2589 | +++ metadata.yaml 2014-07-29 13:07:23 +0000 | |||
2590 | @@ -30,6 +30,8 @@ | |||
2591 | 30 | interface: nova-volume | 30 | interface: nova-volume |
2592 | 31 | quantum-network-service: | 31 | quantum-network-service: |
2593 | 32 | interface: quantum | 32 | interface: quantum |
2594 | 33 | neutron-api: | ||
2595 | 34 | interface: neutron-api | ||
2596 | 33 | ha: | 35 | ha: |
2597 | 34 | interface: hacluster | 36 | interface: hacluster |
2598 | 35 | scope: container | 37 | scope: container |
2599 | 36 | 38 | ||
2600 | === modified file 'revision' | |||
2601 | --- revision 2014-04-16 08:25:14 +0000 | |||
2602 | +++ revision 2014-07-29 13:07:23 +0000 | |||
2603 | @@ -1,1 +1,1 @@ | |||
2605 | 1 | 315 | 1 | 500 |
2606 | 2 | 2 | ||
2607 | === added directory 'tests' | |||
2608 | === added file 'tests/00-setup' | |||
2609 | --- tests/00-setup 1970-01-01 00:00:00 +0000 | |||
2610 | +++ tests/00-setup 2014-07-29 13:07:23 +0000 | |||
2611 | @@ -0,0 +1,10 @@ | |||
2612 | 1 | #!/bin/bash | ||
2613 | 2 | |||
2614 | 3 | set -ex | ||
2615 | 4 | |||
2616 | 5 | sudo add-apt-repository --yes ppa:juju/stable | ||
2617 | 6 | sudo apt-get update --yes | ||
2618 | 7 | sudo apt-get install --yes python-amulet | ||
2619 | 8 | sudo apt-get install --yes python-glanceclient | ||
2620 | 9 | sudo apt-get install --yes python-keystoneclient | ||
2621 | 10 | sudo apt-get install --yes python-novaclient | ||
2622 | 0 | 11 | ||
2623 | === added file 'tests/10-basic-precise-essex' | |||
2624 | --- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000 | |||
2625 | +++ tests/10-basic-precise-essex 2014-07-29 13:07:23 +0000 | |||
2626 | @@ -0,0 +1,10 @@ | |||
2627 | 1 | #!/usr/bin/python | ||
2628 | 2 | |||
2629 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
2630 | 4 | precise-essex.""" | ||
2631 | 5 | |||
2632 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
2633 | 7 | |||
2634 | 8 | if __name__ == '__main__': | ||
2635 | 9 | deployment = NovaCCBasicDeployment(series='precise') | ||
2636 | 10 | deployment.run_tests() | ||
2637 | 0 | 11 | ||
2638 | === added file 'tests/11-basic-precise-folsom' | |||
2639 | --- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000 | |||
2640 | +++ tests/11-basic-precise-folsom 2014-07-29 13:07:23 +0000 | |||
2641 | @@ -0,0 +1,18 @@ | |||
2642 | 1 | #!/usr/bin/python | ||
2643 | 2 | |||
2644 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
2645 | 4 | precise-folsom.""" | ||
2646 | 5 | |||
2647 | 6 | import amulet | ||
2648 | 7 | from basic_deployment import NovaCCBasicDeployment | ||
2649 | 8 | |||
2650 | 9 | if __name__ == '__main__': | ||
2651 | 10 | # NOTE(coreycb): Skipping failing test until resolved. 'nova-manage db sync' | ||
2652 | 11 | # fails in shared-db-relation-changed (only fails on folsom) | ||
2653 | 12 | message = "Skipping failing test until resolved" | ||
2654 | 13 | amulet.raise_status(amulet.SKIP, msg=message) | ||
2655 | 14 | |||
2656 | 15 | deployment = NovaCCBasicDeployment(series='precise', | ||
2657 | 16 | openstack='cloud:precise-folsom', | ||
2658 | 17 | source='cloud:precise-updates/folsom') | ||
2659 | 18 | deployment.run_tests() | ||
2660 | 0 | 19 | ||
2661 | === added file 'tests/12-basic-precise-grizzly' | |||
2662 | --- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000 | |||
2663 | +++ tests/12-basic-precise-grizzly 2014-07-29 13:07:23 +0000 | |||
2664 | @@ -0,0 +1,12 @@ | |||
2665 | 1 | #!/usr/bin/python | ||
2666 | 2 | |||
2667 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
2668 | 4 | precise-grizzly.""" | ||
2669 | 5 | |||
2670 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
2671 | 7 | |||
2672 | 8 | if __name__ == '__main__': | ||
2673 | 9 | deployment = NovaCCBasicDeployment(series='precise', | ||
2674 | 10 | openstack='cloud:precise-grizzly', | ||
2675 | 11 | source='cloud:precise-updates/grizzly') | ||
2676 | 12 | deployment.run_tests() | ||
2677 | 0 | 13 | ||
2678 | === added file 'tests/13-basic-precise-havana' | |||
2679 | --- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000 | |||
2680 | +++ tests/13-basic-precise-havana 2014-07-29 13:07:23 +0000 | |||
2681 | @@ -0,0 +1,12 @@ | |||
2682 | 1 | #!/usr/bin/python | ||
2683 | 2 | |||
2684 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
2685 | 4 | precise-havana.""" | ||
2686 | 5 | |||
2687 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
2688 | 7 | |||
2689 | 8 | if __name__ == '__main__': | ||
2690 | 9 | deployment = NovaCCBasicDeployment(series='precise', | ||
2691 | 10 | openstack='cloud:precise-havana', | ||
2692 | 11 | source='cloud:precise-updates/havana') | ||
2693 | 12 | deployment.run_tests() | ||
2694 | 0 | 13 | ||
2695 | === added file 'tests/14-basic-precise-icehouse' | |||
2696 | --- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000 | |||
2697 | +++ tests/14-basic-precise-icehouse 2014-07-29 13:07:23 +0000 | |||
2698 | @@ -0,0 +1,12 @@ | |||
2699 | 1 | #!/usr/bin/python | ||
2700 | 2 | |||
2701 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
2702 | 4 | precise-icehouse.""" | ||
2703 | 5 | |||
2704 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
2705 | 7 | |||
2706 | 8 | if __name__ == '__main__': | ||
2707 | 9 | deployment = NovaCCBasicDeployment(series='precise', | ||
2708 | 10 | openstack='cloud:precise-icehouse', | ||
2709 | 11 | source='cloud:precise-updates/icehouse') | ||
2710 | 12 | deployment.run_tests() | ||
2711 | 0 | 13 | ||
2712 | === added file 'tests/15-basic-trusty-icehouse' | |||
2713 | --- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000 | |||
2714 | +++ tests/15-basic-trusty-icehouse 2014-07-29 13:07:23 +0000 | |||
2715 | @@ -0,0 +1,10 @@ | |||
2716 | 1 | #!/usr/bin/python | ||
2717 | 2 | |||
2718 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
2719 | 4 | trusty-icehouse.""" | ||
2720 | 5 | |||
2721 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
2722 | 7 | |||
2723 | 8 | if __name__ == '__main__': | ||
2724 | 9 | deployment = NovaCCBasicDeployment(series='trusty') | ||
2725 | 10 | deployment.run_tests() | ||
2726 | 0 | 11 | ||
2727 | === added file 'tests/README' | |||
2728 | --- tests/README 1970-01-01 00:00:00 +0000 | |||
2729 | +++ tests/README 2014-07-29 13:07:23 +0000 | |||
2730 | @@ -0,0 +1,47 @@ | |||
2731 | 1 | This directory provides Amulet tests that focus on verification of Nova Cloud | ||
2732 | 2 | Controller deployments. | ||
2733 | 3 | |||
2734 | 4 | If you use a web proxy server to access the web, you'll need to set the | ||
2735 | 5 | AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. | ||
2736 | 6 | |||
2737 | 7 | The following examples demonstrate different ways that tests can be executed. | ||
2738 | 8 | All examples are run from the charm's root directory. | ||
2739 | 9 | |||
2740 | 10 | * To run all tests (starting with 00-setup): | ||
2741 | 11 | |||
2742 | 12 | make test | ||
2743 | 13 | |||
2744 | 14 | * To run a specific test module (or modules): | ||
2745 | 15 | |||
2746 | 16 | juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse | ||
2747 | 17 | |||
2748 | 18 | * To run a specific test module (or modules), and keep the environment | ||
2749 | 19 | deployed after a failure: | ||
2750 | 20 | |||
2751 | 21 | juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse | ||
2752 | 22 | |||
2753 | 23 | * To re-run a test module against an already deployed environment (one | ||
2754 | 24 | that was deployed by a previous call to 'juju test --set-e'): | ||
2755 | 25 | |||
2756 | 26 | ./tests/15-basic-trusty-icehouse | ||
2757 | 27 | |||
2758 | 28 | For debugging and test development purposes, all code should be idempotent. | ||
2759 | 29 | In other words, the code should have the ability to be re-run without changing | ||
2760 | 30 | the results beyond the initial run. This enables editing and re-running of a | ||
2761 | 31 | test module against an already deployed environment, as described above. | ||
2762 | 32 | |||
2763 | 33 | Manual debugging tips: | ||
2764 | 34 | |||
2765 | 35 | * Set the following env vars before using the OpenStack CLI as admin: | ||
2766 | 36 | export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 | ||
2767 | 37 | export OS_TENANT_NAME=admin | ||
2768 | 38 | export OS_USERNAME=admin | ||
2769 | 39 | export OS_PASSWORD=openstack | ||
2770 | 40 | export OS_REGION_NAME=RegionOne | ||
2771 | 41 | |||
2772 | 42 | * Set the following env vars before using the OpenStack CLI as demoUser: | ||
2773 | 43 | export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 | ||
2774 | 44 | export OS_TENANT_NAME=demoTenant | ||
2775 | 45 | export OS_USERNAME=demoUser | ||
2776 | 46 | export OS_PASSWORD=password | ||
2777 | 47 | export OS_REGION_NAME=RegionOne | ||
2778 | 0 | 48 | ||
2779 | === added file 'tests/basic_deployment.py' | |||
2780 | --- tests/basic_deployment.py 1970-01-01 00:00:00 +0000 | |||
2781 | +++ tests/basic_deployment.py 2014-07-29 13:07:23 +0000 | |||
2782 | @@ -0,0 +1,520 @@ | |||
2783 | 1 | #!/usr/bin/python | ||
2784 | 2 | |||
2785 | 3 | import amulet | ||
2786 | 4 | |||
2787 | 5 | from charmhelpers.contrib.openstack.amulet.deployment import ( | ||
2788 | 6 | OpenStackAmuletDeployment | ||
2789 | 7 | ) | ||
2790 | 8 | |||
2791 | 9 | from charmhelpers.contrib.openstack.amulet.utils import ( | ||
2792 | 10 | OpenStackAmuletUtils, | ||
2793 | 11 | DEBUG, # flake8: noqa | ||
2794 | 12 | ERROR | ||
2795 | 13 | ) | ||
2796 | 14 | |||
2797 | 15 | # Use DEBUG to turn on debug logging | ||
2798 | 16 | u = OpenStackAmuletUtils(ERROR) | ||
2799 | 17 | |||
2800 | 18 | |||
2801 | 19 | class NovaCCBasicDeployment(OpenStackAmuletDeployment): | ||
2802 | 20 | """Amulet tests on a basic nova cloud controller deployment.""" | ||
2803 | 21 | |||
2804 | 22 | def __init__(self, series=None, openstack=None, source=None): | ||
2805 | 23 | """Deploy the entire test environment.""" | ||
2806 | 24 | super(NovaCCBasicDeployment, self).__init__(series, openstack, source) | ||
2807 | 25 | self._add_services() | ||
2808 | 26 | self._add_relations() | ||
2809 | 27 | self._configure_services() | ||
2810 | 28 | self._deploy() | ||
2811 | 29 | self._initialize_tests() | ||
2812 | 30 | |||
2813 | 31 | def _add_services(self): | ||
2814 | 32 | """Add the service that we're testing, including the number of units, | ||
2815 | 33 | where nova-cloud-controller is local, and the other charms are from | ||
2816 | 34 | the charm store.""" | ||
2817 | 35 | this_service = ('nova-cloud-controller', 1) | ||
2818 | 36 | other_services = [('mysql', 1), ('rabbitmq-server', 1), | ||
2819 | 37 | ('nova-compute', 2), ('keystone', 1), ('glance', 1)] | ||
2820 | 38 | super(NovaCCBasicDeployment, self)._add_services(this_service, | ||
2821 | 39 | other_services) | ||
2822 | 40 | |||
2823 | 41 | def _add_relations(self): | ||
2824 | 42 | """Add all of the relations for the services.""" | ||
2825 | 43 | relations = { | ||
2826 | 44 | 'nova-cloud-controller:shared-db': 'mysql:shared-db', | ||
2827 | 45 | 'nova-cloud-controller:identity-service': 'keystone:identity-service', | ||
2828 | 46 | 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', | ||
2829 | 47 | 'nova-cloud-controller:cloud-compute': 'nova-compute:cloud-compute', | ||
2830 | 48 | 'nova-cloud-controller:image-service': 'glance:image-service', | ||
2831 | 49 | 'nova-compute:image-service': 'glance:image-service', | ||
2832 | 50 | 'nova-compute:shared-db': 'mysql:shared-db', | ||
2833 | 51 | 'nova-compute:amqp': 'rabbitmq-server:amqp', | ||
2834 | 52 | 'keystone:shared-db': 'mysql:shared-db', | ||
2835 | 53 | 'glance:identity-service': 'keystone:identity-service', | ||
2836 | 54 | 'glance:shared-db': 'mysql:shared-db', | ||
2837 | 55 | 'glance:amqp': 'rabbitmq-server:amqp' | ||
2838 | 56 | } | ||
2839 | 57 | super(NovaCCBasicDeployment, self)._add_relations(relations) | ||
2840 | 58 | |||
2841 | 59 | def _configure_services(self): | ||
2842 | 60 | """Configure all of the services.""" | ||
2843 | 61 | keystone_config = {'admin-password': 'openstack', | ||
2844 | 62 | 'admin-token': 'ubuntutesting'} | ||
2845 | 63 | configs = {'keystone': keystone_config} | ||
2846 | 64 | super(NovaCCBasicDeployment, self)._configure_services(configs) | ||
2847 | 65 | |||
2848 | 66 | def _initialize_tests(self): | ||
2849 | 67 | """Perform final initialization before tests get run.""" | ||
2850 | 68 | # Access the sentries for inspecting service units | ||
2851 | 69 | self.mysql_sentry = self.d.sentry.unit['mysql/0'] | ||
2852 | 70 | self.keystone_sentry = self.d.sentry.unit['keystone/0'] | ||
2853 | 71 | self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] | ||
2854 | 72 | self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0'] | ||
2855 | 73 | self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] | ||
2856 | 74 | self.glance_sentry = self.d.sentry.unit['glance/0'] | ||
2857 | 75 | |||
2858 | 76 | # Authenticate admin with keystone | ||
2859 | 77 | self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, | ||
2860 | 78 | user='admin', | ||
2861 | 79 | password='openstack', | ||
2862 | 80 | tenant='admin') | ||
2863 | 81 | |||
2864 | 82 | # Authenticate admin with glance endpoint | ||
2865 | 83 | self.glance = u.authenticate_glance_admin(self.keystone) | ||
2866 | 84 | |||
2867 | 85 | # Create a demo tenant/role/user | ||
2868 | 86 | self.demo_tenant = 'demoTenant' | ||
2869 | 87 | self.demo_role = 'demoRole' | ||
2870 | 88 | self.demo_user = 'demoUser' | ||
2871 | 89 | if not u.tenant_exists(self.keystone, self.demo_tenant): | ||
2872 | 90 | tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, | ||
2873 | 91 | description='demo tenant', | ||
2874 | 92 | enabled=True) | ||
2875 | 93 | self.keystone.roles.create(name=self.demo_role) | ||
2876 | 94 | self.keystone.users.create(name=self.demo_user, | ||
2877 | 95 | password='password', | ||
2878 | 96 | tenant_id=tenant.id, | ||
2879 | 97 | email='demo@demo.com') | ||
2880 | 98 | |||
2881 | 99 | # Authenticate demo user with keystone | ||
2882 | 100 | self.keystone_demo = \ | ||
2883 | 101 | u.authenticate_keystone_user(self.keystone, user=self.demo_user, | ||
2884 | 102 | password='password', | ||
2885 | 103 | tenant=self.demo_tenant) | ||
2886 | 104 | |||
2887 | 105 | # Authenticate demo user with nova-api | ||
2888 | 106 | self.nova_demo = u.authenticate_nova_user(self.keystone, | ||
2889 | 107 | user=self.demo_user, | ||
2890 | 108 | password='password', | ||
2891 | 109 | tenant=self.demo_tenant) | ||
2892 | 110 | |||
2893 | 111 | def test_services(self): | ||
2894 | 112 | """Verify the expected services are running on the corresponding | ||
2895 | 113 | service units.""" | ||
2896 | 114 | commands = { | ||
2897 | 115 | self.mysql_sentry: ['status mysql'], | ||
2898 | 116 | self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], | ||
2899 | 117 | self.nova_cc_sentry: ['status nova-api-ec2', | ||
2900 | 118 | 'status nova-api-os-compute', | ||
2901 | 119 | 'status nova-objectstore', | ||
2902 | 120 | 'status nova-cert', | ||
2903 | 121 | 'status nova-scheduler'], | ||
2904 | 122 | self.nova_compute_sentry: ['status nova-compute', | ||
2905 | 123 | 'status nova-network', | ||
2906 | 124 | 'status nova-api'], | ||
2907 | 125 | self.keystone_sentry: ['status keystone'], | ||
2908 | 126 | self.glance_sentry: ['status glance-registry', 'status glance-api'] | ||
2909 | 127 | } | ||
2910 | 128 | if self._get_openstack_release() >= self.precise_grizzly: | ||
2911 | 129 | commands[self.nova_cc_sentry] = ['status nova-conductor'] | ||
2912 | 130 | |||
2913 | 131 | ret = u.validate_services(commands) | ||
2914 | 132 | if ret: | ||
2915 | 133 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
2916 | 134 | |||
2917 | 135 | def test_service_catalog(self): | ||
2918 | 136 | """Verify that the service catalog endpoint data is valid.""" | ||
2919 | 137 | endpoint_vol = {'adminURL': u.valid_url, | ||
2920 | 138 | 'region': 'RegionOne', | ||
2921 | 139 | 'publicURL': u.valid_url, | ||
2922 | 140 | 'internalURL': u.valid_url} | ||
2923 | 141 | endpoint_id = {'adminURL': u.valid_url, | ||
2924 | 142 | 'region': 'RegionOne', | ||
2925 | 143 | 'publicURL': u.valid_url, | ||
2926 | 144 | 'internalURL': u.valid_url} | ||
2927 | 145 | if self._get_openstack_release() >= self.precise_folsom: | ||
2928 | 146 | endpoint_vol['id'] = u.not_null | ||
2929 | 147 | endpoint_id['id'] = u.not_null | ||
2930 | 148 | expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol], | ||
2931 | 149 | 'ec2': [endpoint_vol], 'identity': [endpoint_id]} | ||
2932 | 150 | actual = self.keystone_demo.service_catalog.get_endpoints() | ||
2933 | 151 | |||
2934 | 152 | ret = u.validate_svc_catalog_endpoint_data(expected, actual) | ||
2935 | 153 | if ret: | ||
2936 | 154 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
2937 | 155 | |||
2938 | 156 | def test_openstack_compute_api_endpoint(self): | ||
2939 | 157 | """Verify the openstack compute api (osapi) endpoint data.""" | ||
2940 | 158 | endpoints = self.keystone.endpoints.list() | ||
2941 | 159 | admin_port = internal_port = public_port = '8774' | ||
2942 | 160 | expected = {'id': u.not_null, | ||
2943 | 161 | 'region': 'RegionOne', | ||
2944 | 162 | 'adminurl': u.valid_url, | ||
2945 | 163 | 'internalurl': u.valid_url, | ||
2946 | 164 | 'publicurl': u.valid_url, | ||
2947 | 165 | 'service_id': u.not_null} | ||
2948 | 166 | |||
2949 | 167 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
2950 | 168 | public_port, expected) | ||
2951 | 169 | if ret: | ||
2952 | 170 | message = 'osapi endpoint: {}'.format(ret) | ||
2953 | 171 | amulet.raise_status(amulet.FAIL, msg=message) | ||
2954 | 172 | |||
2955 | 173 | def test_ec2_api_endpoint(self): | ||
2956 | 174 | """Verify the EC2 api endpoint data.""" | ||
2957 | 175 | endpoints = self.keystone.endpoints.list() | ||
2958 | 176 | admin_port = internal_port = public_port = '8773' | ||
2959 | 177 | expected = {'id': u.not_null, | ||
2960 | 178 | 'region': 'RegionOne', | ||
2961 | 179 | 'adminurl': u.valid_url, | ||
2962 | 180 | 'internalurl': u.valid_url, | ||
2963 | 181 | 'publicurl': u.valid_url, | ||
2964 | 182 | 'service_id': u.not_null} | ||
2965 | 183 | |||
2966 | 184 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
2967 | 185 | public_port, expected) | ||
2968 | 186 | if ret: | ||
2969 | 187 | message = 'EC2 endpoint: {}'.format(ret) | ||
2970 | 188 | amulet.raise_status(amulet.FAIL, msg=message) | ||
2971 | 189 | |||
2972 | 190 | def test_s3_api_endpoint(self): | ||
2973 | 191 | """Verify the S3 api endpoint data.""" | ||
2974 | 192 | endpoints = self.keystone.endpoints.list() | ||
2975 | 193 | admin_port = internal_port = public_port = '3333' | ||
2976 | 194 | expected = {'id': u.not_null, | ||
2977 | 195 | 'region': 'RegionOne', | ||
2978 | 196 | 'adminurl': u.valid_url, | ||
2979 | 197 | 'internalurl': u.valid_url, | ||
2980 | 198 | 'publicurl': u.valid_url, | ||
2981 | 199 | 'service_id': u.not_null} | ||
2982 | 200 | |||
2983 | 201 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
2984 | 202 | public_port, expected) | ||
2985 | 203 | if ret: | ||
2986 | 204 | message = 'S3 endpoint: {}'.format(ret) | ||
2987 | 205 | amulet.raise_status(amulet.FAIL, msg=message) | ||
2988 | 206 | |||
2989 | 207 | def test_nova_cc_shared_db_relation(self): | ||
2990 | 208 | """Verify the nova-cc to mysql shared-db relation data""" | ||
2991 | 209 | unit = self.nova_cc_sentry | ||
2992 | 210 | relation = ['shared-db', 'mysql:shared-db'] | ||
2993 | 211 | expected = { | ||
2994 | 212 | 'private-address': u.valid_ip, | ||
2995 | 213 | 'nova_database': 'nova', | ||
2996 | 214 | 'nova_username': 'nova', | ||
2997 | 215 | 'nova_hostname': u.valid_ip | ||
2998 | 216 | } | ||
2999 | 217 | |||
3000 | 218 | ret = u.validate_relation_data(unit, relation, expected) | ||
3001 | 219 | if ret: | ||
3002 | 220 | message = u.relation_error('nova-cc shared-db', ret) | ||
3003 | 221 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3004 | 222 | |||
3005 | 223 | def test_mysql_shared_db_relation(self): | ||
3006 | 224 | """Verify the mysql to nova-cc shared-db relation data""" | ||
3007 | 225 | unit = self.mysql_sentry | ||
3008 | 226 | relation = ['shared-db', 'nova-cloud-controller:shared-db'] | ||
3009 | 227 | expected = { | ||
3010 | 228 | 'private-address': u.valid_ip, | ||
3011 | 229 | 'nova_password': u.not_null, | ||
3012 | 230 | 'db_host': u.valid_ip | ||
3013 | 231 | } | ||
3014 | 232 | |||
3015 | 233 | ret = u.validate_relation_data(unit, relation, expected) | ||
3016 | 234 | if ret: | ||
3017 | 235 | message = u.relation_error('mysql shared-db', ret) | ||
3018 | 236 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3019 | 237 | |||
3020 | 238 | def test_nova_cc_identity_service_relation(self): | ||
3021 | 239 | """Verify the nova-cc to keystone identity-service relation data""" | ||
3022 | 240 | unit = self.nova_cc_sentry | ||
3023 | 241 | relation = ['identity-service', 'keystone:identity-service'] | ||
3024 | 242 | expected = { | ||
3025 | 243 | 'nova_internal_url': u.valid_url, | ||
3026 | 244 | 'nova_public_url': u.valid_url, | ||
3027 | 245 | 's3_public_url': u.valid_url, | ||
3028 | 246 | 's3_service': 's3', | ||
3029 | 247 | 'ec2_admin_url': u.valid_url, | ||
3030 | 248 | 'ec2_internal_url': u.valid_url, | ||
3031 | 249 | 'nova_service': 'nova', | ||
3032 | 250 | 's3_region': 'RegionOne', | ||
3033 | 251 | 'private-address': u.valid_ip, | ||
3034 | 252 | 'nova_region': 'RegionOne', | ||
3035 | 253 | 'ec2_public_url': u.valid_url, | ||
3036 | 254 | 'ec2_region': 'RegionOne', | ||
3037 | 255 | 's3_internal_url': u.valid_url, | ||
3038 | 256 | 's3_admin_url': u.valid_url, | ||
3039 | 257 | 'nova_admin_url': u.valid_url, | ||
3040 | 258 | 'ec2_service': 'ec2' | ||
3041 | 259 | } | ||
3042 | 260 | |||
3043 | 261 | ret = u.validate_relation_data(unit, relation, expected) | ||
3044 | 262 | if ret: | ||
3045 | 263 | message = u.relation_error('nova-cc identity-service', ret) | ||
3046 | 264 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3047 | 265 | |||
3048 | 266 | def test_keystone_identity_service_relation(self): | ||
3049 | 267 | """Verify the keystone to nova-cc identity-service relation data""" | ||
3050 | 268 | unit = self.keystone_sentry | ||
3051 | 269 | relation = ['identity-service', | ||
3052 | 270 | 'nova-cloud-controller:identity-service'] | ||
3053 | 271 | expected = { | ||
3054 | 272 | 'service_protocol': 'http', | ||
3055 | 273 | 'service_tenant': 'services', | ||
3056 | 274 | 'admin_token': 'ubuntutesting', | ||
3057 | 275 | 'service_password': u.not_null, | ||
3058 | 276 | 'service_port': '5000', | ||
3059 | 277 | 'auth_port': '35357', | ||
3060 | 278 | 'auth_protocol': 'http', | ||
3061 | 279 | 'private-address': u.valid_ip, | ||
3062 | 280 | 'https_keystone': 'False', | ||
3063 | 281 | 'auth_host': u.valid_ip, | ||
3064 | 282 | 'service_username': 's3_ec2_nova', | ||
3065 | 283 | 'service_tenant_id': u.not_null, | ||
3066 | 284 | 'service_host': u.valid_ip | ||
3067 | 285 | } | ||
3068 | 286 | |||
3069 | 287 | ret = u.validate_relation_data(unit, relation, expected) | ||
3070 | 288 | if ret: | ||
3071 | 289 | message = u.relation_error('keystone identity-service', ret) | ||
3072 | 290 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3073 | 291 | |||
3074 | 292 | def test_nova_cc_amqp_relation(self): | ||
3075 | 293 | """Verify the nova-cc to rabbitmq-server amqp relation data""" | ||
3076 | 294 | unit = self.nova_cc_sentry | ||
3077 | 295 | relation = ['amqp', 'rabbitmq-server:amqp'] | ||
3078 | 296 | expected = { | ||
3079 | 297 | 'username': 'nova', | ||
3080 | 298 | 'private-address': u.valid_ip, | ||
3081 | 299 | 'vhost': 'openstack' | ||
3082 | 300 | } | ||
3083 | 301 | |||
3084 | 302 | ret = u.validate_relation_data(unit, relation, expected) | ||
3085 | 303 | if ret: | ||
3086 | 304 | message = u.relation_error('nova-cc amqp', ret) | ||
3087 | 305 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3088 | 306 | |||
3089 | 307 | def test_rabbitmq_amqp_relation(self): | ||
3090 | 308 | """Verify the rabbitmq-server to nova-cc amqp relation data""" | ||
3091 | 309 | unit = self.rabbitmq_sentry | ||
3092 | 310 | relation = ['amqp', 'nova-cloud-controller:amqp'] | ||
3093 | 311 | expected = { | ||
3094 | 312 | 'private-address': u.valid_ip, | ||
3095 | 313 | 'password': u.not_null, | ||
3096 | 314 | 'hostname': u.valid_ip | ||
3097 | 315 | } | ||
3098 | 316 | |||
3099 | 317 | ret = u.validate_relation_data(unit, relation, expected) | ||
3100 | 318 | if ret: | ||
3101 | 319 | message = u.relation_error('rabbitmq amqp', ret) | ||
3102 | 320 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3103 | 321 | |||
3104 | 322 | def test_nova_cc_cloud_compute_relation(self): | ||
3105 | 323 | """Verify the nova-cc to nova-compute cloud-compute relation data""" | ||
3106 | 324 | unit = self.nova_cc_sentry | ||
3107 | 325 | relation = ['cloud-compute', 'nova-compute:cloud-compute'] | ||
3108 | 326 | expected = { | ||
3109 | 327 | 'volume_service': 'cinder', | ||
3110 | 328 | 'network_manager': 'flatdhcpmanager', | ||
3111 | 329 | 'ec2_host': u.valid_ip, | ||
3112 | 330 | 'private-address': u.valid_ip, | ||
3113 | 331 | 'restart_trigger': u.not_null | ||
3114 | 332 | } | ||
3115 | 333 | if self._get_openstack_release() == self.precise_essex: | ||
3116 | 334 | expected['volume_service'] = 'nova-volume' | ||
3117 | 335 | |||
3118 | 336 | ret = u.validate_relation_data(unit, relation, expected) | ||
3119 | 337 | if ret: | ||
3120 | 338 | message = u.relation_error('nova-cc cloud-compute', ret) | ||
3121 | 339 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3122 | 340 | |||
3123 | 341 | def test_nova_cloud_compute_relation(self): | ||
3124 | 342 | """Verify the nova-compute to nova-cc cloud-compute relation data""" | ||
3125 | 343 | unit = self.nova_compute_sentry | ||
3126 | 344 | relation = ['cloud-compute', 'nova-cloud-controller:cloud-compute'] | ||
3127 | 345 | expected = { | ||
3128 | 346 | 'private-address': u.valid_ip, | ||
3129 | 347 | } | ||
3130 | 348 | |||
3131 | 349 | ret = u.validate_relation_data(unit, relation, expected) | ||
3132 | 350 | if ret: | ||
3133 | 351 | message = u.relation_error('nova-compute cloud-compute', ret) | ||
3134 | 352 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3135 | 353 | |||
3136 | 354 | def test_nova_cc_image_service_relation(self): | ||
3137 | 355 | """Verify the nova-cc to glance image-service relation data""" | ||
3138 | 356 | unit = self.nova_cc_sentry | ||
3139 | 357 | relation = ['image-service', 'glance:image-service'] | ||
3140 | 358 | expected = { | ||
3141 | 359 | 'private-address': u.valid_ip, | ||
3142 | 360 | } | ||
3143 | 361 | |||
3144 | 362 | ret = u.validate_relation_data(unit, relation, expected) | ||
3145 | 363 | if ret: | ||
3146 | 364 | message = u.relation_error('nova-cc image-service', ret) | ||
3147 | 365 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3148 | 366 | |||
3149 | 367 | def test_glance_image_service_relation(self): | ||
3150 | 368 | """Verify the glance to nova-cc image-service relation data""" | ||
3151 | 369 | unit = self.glance_sentry | ||
3152 | 370 | relation = ['image-service', 'nova-cloud-controller:image-service'] | ||
3153 | 371 | expected = { | ||
3154 | 372 | 'private-address': u.valid_ip, | ||
3155 | 373 | 'glance-api-server': u.valid_url | ||
3156 | 374 | } | ||
3157 | 375 | |||
3158 | 376 | ret = u.validate_relation_data(unit, relation, expected) | ||
3159 | 377 | if ret: | ||
3160 | 378 | message = u.relation_error('glance image-service', ret) | ||
3161 | 379 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3162 | 380 | |||
3163 | 381 | def test_restart_on_config_change(self): | ||
3164 | 382 | """Verify that the specified services are restarted when the config | ||
3165 | 383 | is changed.""" | ||
3166 | 384 | # NOTE(coreycb): Skipping failing test on essex until resolved. | ||
3167 | 385 | # config-flags don't take effect on essex. | ||
3168 | 386 | if self._get_openstack_release() == self.precise_essex: | ||
3169 | 387 | u.log.error("Skipping failing test until resolved") | ||
3170 | 388 | return | ||
3171 | 389 | |||
3172 | 390 | services = ['nova-api-ec2', 'nova-api-os-compute', 'nova-objectstore', | ||
3173 | 391 | 'nova-cert', 'nova-scheduler', 'nova-conductor'] | ||
3174 | 392 | self.d.configure('nova-cloud-controller', | ||
3175 | 393 | {'config-flags': 'quota_cores=20,quota_instances=40,quota_ram=102400'}) | ||
3176 | 394 | pgrep_full = True | ||
3177 | 395 | |||
3178 | 396 | time = 20 | ||
3179 | 397 | conf = '/etc/nova/nova.conf' | ||
3180 | 398 | for s in services: | ||
3181 | 399 | if not u.service_restarted(self.nova_cc_sentry, s, conf, | ||
3182 | 400 | pgrep_full=True, sleep_time=time): | ||
3183 | 401 | msg = "service {} didn't restart after config change".format(s) | ||
3184 | 402 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
3185 | 403 | time = 0 | ||
3186 | 404 | |||
3187 | 405 | def test_nova_default_config(self): | ||
3188 | 406 | """Verify the data in the nova config file's default section.""" | ||
3189 | 407 | # NOTE(coreycb): Currently no way to test on essex because config file | ||
3190 | 408 | # has no section headers. | ||
3191 | 409 | if self._get_openstack_release() == self.precise_essex: | ||
3192 | 410 | return | ||
3193 | 411 | |||
3194 | 412 | unit = self.nova_cc_sentry | ||
3195 | 413 | conf = '/etc/nova/nova.conf' | ||
3196 | 414 | rabbitmq_relation = self.rabbitmq_sentry.relation('amqp', | ||
3197 | 415 | 'nova-cloud-controller:amqp') | ||
3198 | 416 | glance_relation = self.glance_sentry.relation('image-service', | ||
3199 | 417 | 'nova-cloud-controller:image-service') | ||
3200 | 418 | mysql_relation = self.mysql_sentry.relation('shared-db', | ||
3201 | 419 | 'nova-cloud-controller:shared-db') | ||
3202 | 420 | db_uri = "mysql://{}:{}@{}/{}".format('nova', | ||
3203 | 421 | mysql_relation['nova_password'], | ||
3204 | 422 | mysql_relation['db_host'], | ||
3205 | 423 | 'nova') | ||
3206 | 424 | keystone_ep = self.keystone_demo.service_catalog.url_for(\ | ||
3207 | 425 | service_type='identity', | ||
3208 | 426 | endpoint_type='publicURL') | ||
3209 | 427 | keystone_ec2 = "{}/ec2tokens".format(keystone_ep) | ||
3210 | 428 | |||
3211 | 429 | expected = {'dhcpbridge_flagfile': '/etc/nova/nova.conf', | ||
3212 | 430 | 'dhcpbridge': '/usr/bin/nova-dhcpbridge', | ||
3213 | 431 | 'logdir': '/var/log/nova', | ||
3214 | 432 | 'state_path': '/var/lib/nova', | ||
3215 | 433 | 'lock_path': '/var/lock/nova', | ||
3216 | 434 | 'force_dhcp_release': 'True', | ||
3217 | 435 | 'iscsi_helper': 'tgtadm', | ||
3218 | 436 | 'libvirt_use_virtio_for_bridges': 'True', | ||
3219 | 437 | 'connection_type': 'libvirt', | ||
3220 | 438 | 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf', | ||
3221 | 439 | 'verbose': 'True', | ||
3222 | 440 | 'ec2_private_dns_show_ip': 'True', | ||
3223 | 441 | 'api_paste_config': '/etc/nova/api-paste.ini', | ||
3224 | 442 | 'volumes_path': '/var/lib/nova/volumes', | ||
3225 | 443 | 'enabled_apis': 'ec2,osapi_compute,metadata', | ||
3226 | 444 | 'auth_strategy': 'keystone', | ||
3227 | 445 | 'compute_driver': 'libvirt.LibvirtDriver', | ||
3228 | 446 | 'keystone_ec2_url': keystone_ec2, | ||
3229 | 447 | 'sql_connection': db_uri, | ||
3230 | 448 | 'rabbit_userid': 'nova', | ||
3231 | 449 | 'rabbit_virtual_host': 'openstack', | ||
3232 | 450 | 'rabbit_password': rabbitmq_relation['password'], | ||
3233 | 451 | 'rabbit_host': rabbitmq_relation['hostname'], | ||
3234 | 452 | 'glance_api_servers': glance_relation['glance-api-server'], | ||
3235 | 453 | 'network_manager': 'nova.network.manager.FlatDHCPManager', | ||
3236 | 454 | 's3_listen_port': '3333', | ||
3237 | 455 | 'osapi_compute_listen_port': '8774', | ||
3238 | 456 | 'ec2_listen_port': '8773'} | ||
3239 | 457 | |||
3240 | 458 | ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) | ||
3241 | 459 | if ret: | ||
3242 | 460 | message = "nova config error: {}".format(ret) | ||
3243 | 461 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3244 | 462 | |||
3245 | 463 | |||
3246 | 464 | def test_nova_keystone_authtoken_config(self): | ||
3247 | 465 | """Verify the data in the nova config file's keystone_authtoken | ||
3248 | 466 | section. This data only exists since icehouse.""" | ||
3249 | 467 | if self._get_openstack_release() < self.precise_icehouse: | ||
3250 | 468 | return | ||
3251 | 469 | |||
3252 | 470 | unit = self.nova_cc_sentry | ||
3253 | 471 | conf = '/etc/nova/nova.conf' | ||
3254 | 472 | keystone_relation = self.keystone_sentry.relation('identity-service', | ||
3255 | 473 | 'nova-cloud-controller:identity-service') | ||
3256 | 474 | keystone_uri = "http://{}:{}/".format(keystone_relation['service_host'], | ||
3257 | 475 | keystone_relation['service_port']) | ||
3258 | 476 | expected = {'auth_uri': keystone_uri, | ||
3259 | 477 | 'auth_host': keystone_relation['service_host'], | ||
3260 | 478 | 'auth_port': keystone_relation['auth_port'], | ||
3261 | 479 | 'auth_protocol': keystone_relation['auth_protocol'], | ||
3262 | 480 | 'admin_tenant_name': keystone_relation['service_tenant'], | ||
3263 | 481 | 'admin_user': keystone_relation['service_username'], | ||
3264 | 482 | 'admin_password': keystone_relation['service_password']} | ||
3265 | 483 | |||
3266 | 484 | ret = u.validate_config_data(unit, conf, 'keystone_authtoken', expected) | ||
3267 | 485 | if ret: | ||
3268 | 486 | message = "nova config error: {}".format(ret) | ||
3269 | 487 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3270 | 488 | |||
3271 | 489 | def test_image_instance_create(self): | ||
3272 | 490 | """Create an image/instance, verify they exist, and delete them.""" | ||
3273 | 491 | # NOTE(coreycb): Skipping failing test on essex until resolved. essex | ||
3274 | 492 | # nova API calls are getting "Malformed request url (HTTP | ||
3275 | 493 | # 400)". | ||
3276 | 494 | if self._get_openstack_release() == self.precise_essex: | ||
3277 | 495 | u.log.error("Skipping failing test until resolved") | ||
3278 | 496 | return | ||
3279 | 497 | |||
3280 | 498 | image = u.create_cirros_image(self.glance, "cirros-image") | ||
3281 | 499 | if not image: | ||
3282 | 500 | amulet.raise_status(amulet.FAIL, msg="Image create failed") | ||
3283 | 501 | |||
3284 | 502 | instance = u.create_instance(self.nova_demo, "cirros-image", "cirros", | ||
3285 | 503 | "m1.tiny") | ||
3286 | 504 | if not instance: | ||
3287 | 505 | amulet.raise_status(amulet.FAIL, msg="Instance create failed") | ||
3288 | 506 | |||
3289 | 507 | found = False | ||
3290 | 508 | for instance in self.nova_demo.servers.list(): | ||
3291 | 509 | if instance.name == 'cirros': | ||
3292 | 510 | found = True | ||
3293 | 511 | if instance.status != 'ACTIVE': | ||
3294 | 512 | msg = "cirros instance is not active" | ||
3295 | 513 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3296 | 514 | |||
3297 | 515 | if not found: | ||
3298 | 516 | message = "nova cirros instance does not exist" | ||
3299 | 517 | amulet.raise_status(amulet.FAIL, msg=message) | ||
3300 | 518 | |||
3301 | 519 | u.delete_image(self.glance, image) | ||
3302 | 520 | u.delete_instance(self.nova_demo, instance) | ||
3303 | 0 | 521 | ||
3304 | === added directory 'tests/charmhelpers' | |||
3305 | === added file 'tests/charmhelpers/__init__.py' | |||
3306 | === added directory 'tests/charmhelpers/contrib' | |||
3307 | === added file 'tests/charmhelpers/contrib/__init__.py' | |||
3308 | === added directory 'tests/charmhelpers/contrib/amulet' | |||
3309 | === added file 'tests/charmhelpers/contrib/amulet/__init__.py' | |||
3310 | === added file 'tests/charmhelpers/contrib/amulet/deployment.py' | |||
3311 | --- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
3312 | +++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-29 13:07:23 +0000 | |||
3313 | @@ -0,0 +1,58 @@ | |||
3314 | 1 | import amulet | ||
3315 | 2 | |||
3316 | 3 | |||
3317 | 4 | class AmuletDeployment(object): | ||
3318 | 5 | """This class provides generic Amulet deployment and test runner | ||
3319 | 6 | methods.""" | ||
3320 | 7 | |||
3321 | 8 | def __init__(self, series=None): | ||
3322 | 9 | """Initialize the deployment environment.""" | ||
3323 | 10 | self.series = None | ||
3324 | 11 | |||
3325 | 12 | if series: | ||
3326 | 13 | self.series = series | ||
3327 | 14 | self.d = amulet.Deployment(series=self.series) | ||
3328 | 15 | else: | ||
3329 | 16 | self.d = amulet.Deployment() | ||
3330 | 17 | |||
3331 | 18 | def _add_services(self, this_service, other_services): | ||
3332 | 19 | """Add services to the deployment where this_service is the local charm | ||
3333 | 20 | that we're focused on testing and other_services are the other | ||
3334 | 21 | charms that come from the charm store.""" | ||
3335 | 22 | name, units = range(2) | ||
3336 | 23 | self.this_service = this_service[name] | ||
3337 | 24 | self.d.add(this_service[name], units=this_service[units]) | ||
3338 | 25 | |||
3339 | 26 | for svc in other_services: | ||
3340 | 27 | if self.series: | ||
3341 | 28 | self.d.add(svc[name], | ||
3342 | 29 | charm='cs:{}/{}'.format(self.series, svc[name]), | ||
3343 | 30 | units=svc[units]) | ||
3344 | 31 | else: | ||
3345 | 32 | self.d.add(svc[name], units=svc[units]) | ||
3346 | 33 | |||
3347 | 34 | def _add_relations(self, relations): | ||
3348 | 35 | """Add all of the relations for the services.""" | ||
3349 | 36 | for k, v in relations.iteritems(): | ||
3350 | 37 | self.d.relate(k, v) | ||
3351 | 38 | |||
3352 | 39 | def _configure_services(self, configs): | ||
3353 | 40 | """Configure all of the services.""" | ||
3354 | 41 | for service, config in configs.iteritems(): | ||
3355 | 42 | self.d.configure(service, config) | ||
3356 | 43 | |||
3357 | 44 | def _deploy(self): | ||
3358 | 45 | """Deploy environment and wait for all hooks to finish executing.""" | ||
3359 | 46 | try: | ||
3360 | 47 | self.d.setup() | ||
3361 | 48 | self.d.sentry.wait() | ||
3362 | 49 | except amulet.helpers.TimeoutError: | ||
3363 | 50 | amulet.raise_status(amulet.FAIL, msg="Deployment timed out") | ||
3364 | 51 | except: | ||
3365 | 52 | raise | ||
3366 | 53 | |||
3367 | 54 | def run_tests(self): | ||
3368 | 55 | """Run all of the methods that are prefixed with 'test_'.""" | ||
3369 | 56 | for test in dir(self): | ||
3370 | 57 | if test.startswith('test_'): | ||
3371 | 58 | getattr(self, test)() | ||
3372 | 0 | 59 | ||
3373 | === added file 'tests/charmhelpers/contrib/amulet/utils.py' | |||
3374 | --- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
3375 | +++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-29 13:07:23 +0000 | |||
3376 | @@ -0,0 +1,157 @@ | |||
3377 | 1 | import ConfigParser | ||
3378 | 2 | import io | ||
3379 | 3 | import logging | ||
3380 | 4 | import re | ||
3381 | 5 | import sys | ||
3382 | 6 | from time import sleep | ||
3383 | 7 | |||
3384 | 8 | |||
3385 | 9 | class AmuletUtils(object): | ||
3386 | 10 | """This class provides common utility functions that are used by Amulet | ||
3387 | 11 | tests.""" | ||
3388 | 12 | |||
3389 | 13 | def __init__(self, log_level=logging.ERROR): | ||
3390 | 14 | self.log = self.get_logger(level=log_level) | ||
3391 | 15 | |||
3392 | 16 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): | ||
3393 | 17 | """Get a logger object that will log to stdout.""" | ||
3394 | 18 | log = logging | ||
3395 | 19 | logger = log.getLogger(name) | ||
3396 | 20 | fmt = \ | ||
3397 | 21 | log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s") | ||
3398 | 22 | |||
3399 | 23 | handler = log.StreamHandler(stream=sys.stdout) | ||
3400 | 24 | handler.setLevel(level) | ||
3401 | 25 | handler.setFormatter(fmt) | ||
3402 | 26 | |||
3403 | 27 | logger.addHandler(handler) | ||
3404 | 28 | logger.setLevel(level) | ||
3405 | 29 | |||
3406 | 30 | return logger | ||
3407 | 31 | |||
3408 | 32 | def valid_ip(self, ip): | ||
3409 | 33 | if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): | ||
3410 | 34 | return True | ||
3411 | 35 | else: | ||
3412 | 36 | return False | ||
3413 | 37 | |||
3414 | 38 | def valid_url(self, url): | ||
3415 | 39 | p = re.compile( | ||
3416 | 40 | r'^(?:http|ftp)s?://' | ||
3417 | 41 | r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa | ||
3418 | 42 | r'localhost|' | ||
3419 | 43 | r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' | ||
3420 | 44 | r'(?::\d+)?' | ||
3421 | 45 | r'(?:/?|[/?]\S+)$', | ||
3422 | 46 | re.IGNORECASE) | ||
3423 | 47 | if p.match(url): | ||
3424 | 48 | return True | ||
3425 | 49 | else: | ||
3426 | 50 | return False | ||
3427 | 51 | |||
3428 | 52 | def validate_services(self, commands): | ||
3429 | 53 | """Verify the specified services are running on the corresponding | ||
3430 | 54 | service units.""" | ||
3431 | 55 | for k, v in commands.iteritems(): | ||
3432 | 56 | for cmd in v: | ||
3433 | 57 | output, code = k.run(cmd) | ||
3434 | 58 | if code != 0: | ||
3435 | 59 | return "command `{}` returned {}".format(cmd, str(code)) | ||
3436 | 60 | return None | ||
3437 | 61 | |||
3438 | 62 | def _get_config(self, unit, filename): | ||
3439 | 63 | """Get a ConfigParser object for parsing a unit's config file.""" | ||
3440 | 64 | file_contents = unit.file_contents(filename) | ||
3441 | 65 | config = ConfigParser.ConfigParser() | ||
3442 | 66 | config.readfp(io.StringIO(file_contents)) | ||
3443 | 67 | return config | ||
3444 | 68 | |||
3445 | 69 | def validate_config_data(self, sentry_unit, config_file, section, expected): | ||
3446 | 70 | """Verify that the specified section of the config file contains | ||
3447 | 71 | the expected option key:value pairs.""" | ||
3448 | 72 | config = self._get_config(sentry_unit, config_file) | ||
3449 | 73 | |||
3450 | 74 | if section != 'DEFAULT' and not config.has_section(section): | ||
3451 | 75 | return "section [{}] does not exist".format(section) | ||
3452 | 76 | |||
3453 | 77 | for k in expected.keys(): | ||
3454 | 78 | if not config.has_option(section, k): | ||
3455 | 79 | return "section [{}] is missing option {}".format(section, k) | ||
3456 | 80 | if config.get(section, k) != expected[k]: | ||
3457 | 81 | return "section [{}] {}:{} != expected {}:{}".format(section, | ||
3458 | 82 | k, config.get(section, k), k, expected[k]) | ||
3459 | 83 | return None | ||
3460 | 84 | |||
3461 | 85 | def _validate_dict_data(self, expected, actual): | ||
3462 | 86 | """Compare expected dictionary data vs actual dictionary data. | ||
3463 | 87 | The values in the 'expected' dictionary can be strings, bools, ints, | ||
3464 | 88 | longs, or can be a function that evaluate a variable and returns a | ||
3465 | 89 | bool.""" | ||
3466 | 90 | for k, v in expected.iteritems(): | ||
3467 | 91 | if k in actual: | ||
3468 | 92 | if isinstance(v, basestring) or \ | ||
3469 | 93 | isinstance(v, bool) or \ | ||
3470 | 94 | isinstance(v, (int, long)): | ||
3471 | 95 | if v != actual[k]: | ||
3472 | 96 | return "{}:{}".format(k, actual[k]) | ||
3473 | 97 | elif not v(actual[k]): | ||
3474 | 98 | return "{}:{}".format(k, actual[k]) | ||
3475 | 99 | else: | ||
3476 | 100 | return "key '{}' does not exist".format(k) | ||
3477 | 101 | return None | ||
3478 | 102 | |||
3479 | 103 | def validate_relation_data(self, sentry_unit, relation, expected): | ||
3480 | 104 | """Validate actual relation data based on expected relation data.""" | ||
3481 | 105 | actual = sentry_unit.relation(relation[0], relation[1]) | ||
3482 | 106 | self.log.debug('actual: {}'.format(repr(actual))) | ||
3483 | 107 | return self._validate_dict_data(expected, actual) | ||
3484 | 108 | |||
3485 | 109 | def _validate_list_data(self, expected, actual): | ||
3486 | 110 | """Compare expected list vs actual list data.""" | ||
3487 | 111 | for e in expected: | ||
3488 | 112 | if e not in actual: | ||
3489 | 113 | return "expected item {} not found in actual list".format(e) | ||
3490 | 114 | return None | ||
3491 | 115 | |||
3492 | 116 | def not_null(self, string): | ||
3493 | 117 | if string != None: | ||
3494 | 118 | return True | ||
3495 | 119 | else: | ||
3496 | 120 | return False | ||
3497 | 121 | |||
3498 | 122 | def _get_file_mtime(self, sentry_unit, filename): | ||
3499 | 123 | """Get last modification time of file.""" | ||
3500 | 124 | return sentry_unit.file_stat(filename)['mtime'] | ||
3501 | 125 | |||
3502 | 126 | def _get_dir_mtime(self, sentry_unit, directory): | ||
3503 | 127 | """Get last modification time of directory.""" | ||
3504 | 128 | return sentry_unit.directory_stat(directory)['mtime'] | ||
3505 | 129 | |||
3506 | 130 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): | ||
3507 | 131 | """Determine start time of the process based on the last modification | ||
3508 | 132 | time of the /proc/pid directory. If pgrep_full is True, the process | ||
3509 | 133 | name is matched against the full command line.""" | ||
3510 | 134 | if pgrep_full: | ||
3511 | 135 | cmd = 'pgrep -o -f {}'.format(service) | ||
3512 | 136 | else: | ||
3513 | 137 | cmd = 'pgrep -o {}'.format(service) | ||
3514 | 138 | proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) | ||
3515 | 139 | return self._get_dir_mtime(sentry_unit, proc_dir) | ||
3516 | 140 | |||
3517 | 141 | def service_restarted(self, sentry_unit, service, filename, | ||
3518 | 142 | pgrep_full=False): | ||
3519 | 143 | """Compare a service's start time vs a file's last modification time | ||
3520 | 144 | (such as a config file for that service) to determine if the service | ||
3521 | 145 | has been restarted.""" | ||
3522 | 146 | sleep(10) | ||
3523 | 147 | if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \ | ||
3524 | 148 | self._get_file_mtime(sentry_unit, filename): | ||
3525 | 149 | return True | ||
3526 | 150 | else: | ||
3527 | 151 | return False | ||
3528 | 152 | |||
3529 | 153 | def relation_error(self, name, data): | ||
3530 | 154 | return 'unexpected relation data in {} - {}'.format(name, data) | ||
3531 | 155 | |||
3532 | 156 | def endpoint_error(self, name, data): | ||
3533 | 157 | return 'unexpected endpoint data in {} - {}'.format(name, data) | ||
3534 | 0 | 158 | ||
3535 | === added directory 'tests/charmhelpers/contrib/openstack' | |||
3536 | === added file 'tests/charmhelpers/contrib/openstack/__init__.py' | |||
3537 | === added directory 'tests/charmhelpers/contrib/openstack/amulet' | |||
3538 | === added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
3539 | === added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
3540 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
3541 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-29 13:07:23 +0000 | |||
3542 | @@ -0,0 +1,55 @@ | |||
3543 | 1 | from charmhelpers.contrib.amulet.deployment import ( | ||
3544 | 2 | AmuletDeployment | ||
3545 | 3 | ) | ||
3546 | 4 | |||
3547 | 5 | |||
3548 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
3549 | 7 | """This class inherits from AmuletDeployment and has additional support | ||
3550 | 8 | that is specifically for use by OpenStack charms.""" | ||
3551 | 9 | |||
3552 | 10 | def __init__(self, series=None, openstack=None, source=None): | ||
3553 | 11 | """Initialize the deployment environment.""" | ||
3554 | 12 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
3555 | 13 | self.openstack = openstack | ||
3556 | 14 | self.source = source | ||
3557 | 15 | |||
3558 | 16 | def _add_services(self, this_service, other_services): | ||
3559 | 17 | """Add services to the deployment and set openstack-origin.""" | ||
3560 | 18 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
3561 | 19 | other_services) | ||
3562 | 20 | name = 0 | ||
3563 | 21 | services = other_services | ||
3564 | 22 | services.append(this_service) | ||
3565 | 23 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
3566 | 24 | |||
3567 | 25 | if self.openstack: | ||
3568 | 26 | for svc in services: | ||
3569 | 27 | if svc[name] not in use_source: | ||
3570 | 28 | config = {'openstack-origin': self.openstack} | ||
3571 | 29 | self.d.configure(svc[name], config) | ||
3572 | 30 | |||
3573 | 31 | if self.source: | ||
3574 | 32 | for svc in services: | ||
3575 | 33 | if svc[name] in use_source: | ||
3576 | 34 | config = {'source': self.source} | ||
3577 | 35 | self.d.configure(svc[name], config) | ||
3578 | 36 | |||
3579 | 37 | def _configure_services(self, configs): | ||
3580 | 38 | """Configure all of the services.""" | ||
3581 | 39 | for service, config in configs.iteritems(): | ||
3582 | 40 | self.d.configure(service, config) | ||
3583 | 41 | |||
3584 | 42 | def _get_openstack_release(self): | ||
3585 | 43 | """Return an integer representing the enum value of the openstack | ||
3586 | 44 | release.""" | ||
3587 | 45 | self.precise_essex, self.precise_folsom, self.precise_grizzly, \ | ||
3588 | 46 | self.precise_havana, self.precise_icehouse, \ | ||
3589 | 47 | self.trusty_icehouse = range(6) | ||
3590 | 48 | releases = { | ||
3591 | 49 | ('precise', None): self.precise_essex, | ||
3592 | 50 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
3593 | 51 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
3594 | 52 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
3595 | 53 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
3596 | 54 | ('trusty', None): self.trusty_icehouse} | ||
3597 | 55 | return releases[(self.series, self.openstack)] | ||
3598 | 0 | 56 | ||
3599 | === added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' | |||
3600 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
3601 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-29 13:07:23 +0000 | |||
3602 | @@ -0,0 +1,209 @@ | |||
3603 | 1 | import logging | ||
3604 | 2 | import os | ||
3605 | 3 | import time | ||
3606 | 4 | import urllib | ||
3607 | 5 | |||
3608 | 6 | import glanceclient.v1.client as glance_client | ||
3609 | 7 | import keystoneclient.v2_0 as keystone_client | ||
3610 | 8 | import novaclient.v1_1.client as nova_client | ||
3611 | 9 | |||
3612 | 10 | from charmhelpers.contrib.amulet.utils import ( | ||
3613 | 11 | AmuletUtils | ||
3614 | 12 | ) | ||
3615 | 13 | |||
3616 | 14 | DEBUG = logging.DEBUG | ||
3617 | 15 | ERROR = logging.ERROR | ||
3618 | 16 | |||
3619 | 17 | |||
3620 | 18 | class OpenStackAmuletUtils(AmuletUtils): | ||
3621 | 19 | """This class inherits from AmuletUtils and has additional support | ||
3622 | 20 | that is specifically for use by OpenStack charms.""" | ||
3623 | 21 | |||
3624 | 22 | def __init__(self, log_level=ERROR): | ||
3625 | 23 | """Initialize the deployment environment.""" | ||
3626 | 24 | super(OpenStackAmuletUtils, self).__init__(log_level) | ||
3627 | 25 | |||
3628 | 26 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | ||
3629 | 27 | public_port, expected): | ||
3630 | 28 | """Validate actual endpoint data vs expected endpoint data. The ports | ||
3631 | 29 | are used to find the matching endpoint.""" | ||
3632 | 30 | found = False | ||
3633 | 31 | for ep in endpoints: | ||
3634 | 32 | self.log.debug('endpoint: {}'.format(repr(ep))) | ||
3635 | 33 | if admin_port in ep.adminurl and internal_port in ep.internalurl \ | ||
3636 | 34 | and public_port in ep.publicurl: | ||
3637 | 35 | found = True | ||
3638 | 36 | actual = {'id': ep.id, | ||
3639 | 37 | 'region': ep.region, | ||
3640 | 38 | 'adminurl': ep.adminurl, | ||
3641 | 39 | 'internalurl': ep.internalurl, | ||
3642 | 40 | 'publicurl': ep.publicurl, | ||
3643 | 41 | 'service_id': ep.service_id} | ||
3644 | 42 | ret = self._validate_dict_data(expected, actual) | ||
3645 | 43 | if ret: | ||
3646 | 44 | return 'unexpected endpoint data - {}'.format(ret) | ||
3647 | 45 | |||
3648 | 46 | if not found: | ||
3649 | 47 | return 'endpoint not found' | ||
3650 | 48 | |||
3651 | 49 | def validate_svc_catalog_endpoint_data(self, expected, actual): | ||
3652 | 50 | """Validate a list of actual service catalog endpoints vs a list of | ||
3653 | 51 | expected service catalog endpoints.""" | ||
3654 | 52 | self.log.debug('actual: {}'.format(repr(actual))) | ||
3655 | 53 | for k, v in expected.iteritems(): | ||
3656 | 54 | if k in actual: | ||
3657 | 55 | ret = self._validate_dict_data(expected[k][0], actual[k][0]) | ||
3658 | 56 | if ret: | ||
3659 | 57 | return self.endpoint_error(k, ret) | ||
3660 | 58 | else: | ||
3661 | 59 | return "endpoint {} does not exist".format(k) | ||
3662 | 60 | return ret | ||
3663 | 61 | |||
3664 | 62 | def validate_tenant_data(self, expected, actual): | ||
3665 | 63 | """Validate a list of actual tenant data vs list of expected tenant | ||
3666 | 64 | data.""" | ||
3667 | 65 | self.log.debug('actual: {}'.format(repr(actual))) | ||
3668 | 66 | for e in expected: | ||
3669 | 67 | found = False | ||
3670 | 68 | for act in actual: | ||
3671 | 69 | a = {'enabled': act.enabled, 'description': act.description, | ||
3672 | 70 | 'name': act.name, 'id': act.id} | ||
3673 | 71 | if e['name'] == a['name']: | ||
3674 | 72 | found = True | ||
3675 | 73 | ret = self._validate_dict_data(e, a) | ||
3676 | 74 | if ret: | ||
3677 | 75 | return "unexpected tenant data - {}".format(ret) | ||
3678 | 76 | if not found: | ||
3679 | 77 | return "tenant {} does not exist".format(e['name']) | ||
3680 | 78 | return ret | ||
3681 | 79 | |||
3682 | 80 | def validate_role_data(self, expected, actual): | ||
3683 | 81 | """Validate a list of actual role data vs a list of expected role | ||
3684 | 82 | data.""" | ||
3685 | 83 | self.log.debug('actual: {}'.format(repr(actual))) | ||
3686 | 84 | for e in expected: | ||
3687 | 85 | found = False | ||
3688 | 86 | for act in actual: | ||
3689 | 87 | a = {'name': act.name, 'id': act.id} | ||
3690 | 88 | if e['name'] == a['name']: | ||
3691 | 89 | found = True | ||
3692 | 90 | ret = self._validate_dict_data(e, a) | ||
3693 | 91 | if ret: | ||
3694 | 92 | return "unexpected role data - {}".format(ret) | ||
3695 | 93 | if not found: | ||
3696 | 94 | return "role {} does not exist".format(e['name']) | ||
3697 | 95 | return ret | ||
3698 | 96 | |||
3699 | 97 | def validate_user_data(self, expected, actual): | ||
3700 | 98 | """Validate a list of actual user data vs a list of expected user | ||
3701 | 99 | data.""" | ||
3702 | 100 | self.log.debug('actual: {}'.format(repr(actual))) | ||
3703 | 101 | for e in expected: | ||
3704 | 102 | found = False | ||
3705 | 103 | for act in actual: | ||
3706 | 104 | a = {'enabled': act.enabled, 'name': act.name, | ||
3707 | 105 | 'email': act.email, 'tenantId': act.tenantId, | ||
3708 | 106 | 'id': act.id} | ||
3709 | 107 | if e['name'] == a['name']: | ||
3710 | 108 | found = True | ||
3711 | 109 | ret = self._validate_dict_data(e, a) | ||
3712 | 110 | if ret: | ||
3713 | 111 | return "unexpected user data - {}".format(ret) | ||
3714 | 112 | if not found: | ||
3715 | 113 | return "user {} does not exist".format(e['name']) | ||
3716 | 114 | return ret | ||
3717 | 115 | |||
3718 | 116 | def validate_flavor_data(self, expected, actual): | ||
3719 | 117 | """Validate a list of actual flavors vs a list of expected flavors.""" | ||
3720 | 118 | self.log.debug('actual: {}'.format(repr(actual))) | ||
3721 | 119 | act = [a.name for a in actual] | ||
3722 | 120 | return self._validate_list_data(expected, act) | ||
3723 | 121 | |||
3724 | 122 | def tenant_exists(self, keystone, tenant): | ||
3725 | 123 | """Return True if tenant exists""" | ||
3726 | 124 | return tenant in [t.name for t in keystone.tenants.list()] | ||
3727 | 125 | |||
3728 | 126 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | ||
3729 | 127 | tenant): | ||
3730 | 128 | """Authenticates admin user with the keystone admin endpoint.""" | ||
3731 | 129 | service_ip = \ | ||
3732 | 130 | keystone_sentry.relation('shared-db', | ||
3733 | 131 | 'mysql:shared-db')['private-address'] | ||
3734 | 132 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | ||
3735 | 133 | return keystone_client.Client(username=user, password=password, | ||
3736 | 134 | tenant_name=tenant, auth_url=ep) | ||
3737 | 135 | |||
3738 | 136 | def authenticate_keystone_user(self, keystone, user, password, tenant): | ||
3739 | 137 | """Authenticates a regular user with the keystone public endpoint.""" | ||
3740 | 138 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
3741 | 139 | endpoint_type='publicURL') | ||
3742 | 140 | return keystone_client.Client(username=user, password=password, | ||
3743 | 141 | tenant_name=tenant, auth_url=ep) | ||
3744 | 142 | |||
3745 | 143 | def authenticate_glance_admin(self, keystone): | ||
3746 | 144 | """Authenticates admin user with glance.""" | ||
3747 | 145 | ep = keystone.service_catalog.url_for(service_type='image', | ||
3748 | 146 | endpoint_type='adminURL') | ||
3749 | 147 | return glance_client.Client(ep, token=keystone.auth_token) | ||
3750 | 148 | |||
3751 | 149 | def authenticate_nova_user(self, keystone, user, password, tenant): | ||
3752 | 150 | """Authenticates a regular user with nova-api.""" | ||
3753 | 151 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
3754 | 152 | endpoint_type='publicURL') | ||
3755 | 153 | return nova_client.Client(username=user, api_key=password, | ||
3756 | 154 | project_id=tenant, auth_url=ep) | ||
3757 | 155 | |||
3758 | 156 | def create_cirros_image(self, glance, image_name): | ||
3759 | 157 | """Download the latest cirros image and upload it to glance.""" | ||
3760 | 158 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
3761 | 159 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
3762 | 160 | if http_proxy: | ||
3763 | 161 | proxies = {'http': http_proxy} | ||
3764 | 162 | opener = urllib.FancyURLopener(proxies) | ||
3765 | 163 | else: | ||
3766 | 164 | opener = urllib.FancyURLopener() | ||
3767 | 165 | |||
3768 | 166 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
3769 | 167 | version = f.read().strip() | ||
3770 | 168 | cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) | ||
3771 | 169 | |||
3772 | 170 | if not os.path.exists(cirros_img): | ||
3773 | 171 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
3774 | 172 | version, cirros_img) | ||
3775 | 173 | opener.retrieve(cirros_url, cirros_img) | ||
3776 | 174 | f.close() | ||
3777 | 175 | |||
3778 | 176 | with open(cirros_img) as f: | ||
3779 | 177 | image = glance.images.create(name=image_name, is_public=True, | ||
3780 | 178 | disk_format='qcow2', | ||
3781 | 179 | container_format='bare', data=f) | ||
3782 | 180 | return image | ||
3783 | 181 | |||
3784 | 182 | def delete_image(self, glance, image): | ||
3785 | 183 | """Delete the specified image.""" | ||
3786 | 184 | glance.images.delete(image) | ||
3787 | 185 | |||
3788 | 186 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
3789 | 187 | """Create the specified instance.""" | ||
3790 | 188 | image = nova.images.find(name=image_name) | ||
3791 | 189 | flavor = nova.flavors.find(name=flavor) | ||
3792 | 190 | instance = nova.servers.create(name=instance_name, image=image, | ||
3793 | 191 | flavor=flavor) | ||
3794 | 192 | |||
3795 | 193 | count = 1 | ||
3796 | 194 | status = instance.status | ||
3797 | 195 | while status != 'ACTIVE' and count < 60: | ||
3798 | 196 | time.sleep(3) | ||
3799 | 197 | instance = nova.servers.get(instance.id) | ||
3800 | 198 | status = instance.status | ||
3801 | 199 | self.log.debug('instance status: {}'.format(status)) | ||
3802 | 200 | count += 1 | ||
3803 | 201 | |||
3804 | 202 | if status == 'BUILD': | ||
3805 | 203 | return None | ||
3806 | 204 | |||
3807 | 205 | return instance | ||
3808 | 206 | |||
3809 | 207 | def delete_instance(self, nova, instance): | ||
3810 | 208 | """Delete the specified instance.""" | ||
3811 | 209 | nova.servers.delete(instance) | ||
3812 | 0 | 210 | ||
3813 | === modified file 'unit_tests/test_nova_cc_hooks.py' | |||
3814 | --- unit_tests/test_nova_cc_hooks.py 2014-05-21 10:03:01 +0000 | |||
3815 | +++ unit_tests/test_nova_cc_hooks.py 2014-07-29 13:07:23 +0000 | |||
3816 | @@ -1,6 +1,6 @@ | |||
3820 | 1 | from mock import MagicMock, patch | 1 | from mock import MagicMock, patch, call |
3821 | 2 | from test_utils import CharmTestCase | 2 | from test_utils import CharmTestCase, patch_open |
3822 | 3 | 3 | import os | |
3823 | 4 | with patch('charmhelpers.core.hookenv.config') as config: | 4 | with patch('charmhelpers.core.hookenv.config') as config: |
3824 | 5 | config.return_value = 'neutron' | 5 | config.return_value = 'neutron' |
3825 | 6 | import nova_cc_utils as utils | 6 | import nova_cc_utils as utils |
3826 | @@ -11,7 +11,11 @@ | |||
3827 | 11 | utils.register_configs = MagicMock() | 11 | utils.register_configs = MagicMock() |
3828 | 12 | utils.restart_map = MagicMock() | 12 | utils.restart_map = MagicMock() |
3829 | 13 | 13 | ||
3831 | 14 | import nova_cc_hooks as hooks | 14 | with patch('nova_cc_utils.guard_map') as gmap: |
3832 | 15 | with patch('charmhelpers.core.hookenv.config') as config: | ||
3833 | 16 | config.return_value = False | ||
3834 | 17 | gmap.return_value = {} | ||
3835 | 18 | import nova_cc_hooks as hooks | ||
3836 | 15 | 19 | ||
3837 | 16 | utils.register_configs = _reg | 20 | utils.register_configs = _reg |
3838 | 17 | utils.restart_map = _map | 21 | utils.restart_map = _map |
3839 | @@ -35,9 +39,11 @@ | |||
3840 | 35 | 'relation_set', | 39 | 'relation_set', |
3841 | 36 | 'relation_ids', | 40 | 'relation_ids', |
3842 | 37 | 'ssh_compute_add', | 41 | 'ssh_compute_add', |
3845 | 38 | 'ssh_known_hosts_b64', | 42 | 'ssh_known_hosts_lines', |
3846 | 39 | 'ssh_authorized_keys_b64', | 43 | 'ssh_authorized_keys_lines', |
3847 | 40 | 'save_script_rc', | 44 | 'save_script_rc', |
3848 | 45 | 'service_running', | ||
3849 | 46 | 'service_stop', | ||
3850 | 41 | 'execd_preinstall', | 47 | 'execd_preinstall', |
3851 | 42 | 'network_manager', | 48 | 'network_manager', |
3852 | 43 | 'volume_service', | 49 | 'volume_service', |
3853 | @@ -98,15 +104,64 @@ | |||
3854 | 98 | self.test_relation.set({ | 104 | self.test_relation.set({ |
3855 | 99 | 'migration_auth_type': 'ssh', 'ssh_public_key': 'fookey', | 105 | 'migration_auth_type': 'ssh', 'ssh_public_key': 'fookey', |
3856 | 100 | 'private-address': '10.0.0.1'}) | 106 | 'private-address': '10.0.0.1'}) |
3863 | 101 | self.ssh_known_hosts_b64.return_value = 'hosts' | 107 | self.ssh_known_hosts_lines.return_value = [ |
3864 | 102 | self.ssh_authorized_keys_b64.return_value = 'keys' | 108 | 'k_h_0', 'k_h_1', 'k_h_2'] |
3865 | 103 | hooks.compute_changed() | 109 | self.ssh_authorized_keys_lines.return_value = [ |
3866 | 104 | self.ssh_compute_add.assert_called_with('fookey') | 110 | 'auth_0', 'auth_1', 'auth_2'] |
3867 | 105 | self.relation_set.assert_called_with(known_hosts='hosts', | 111 | hooks.compute_changed() |
3868 | 106 | authorized_keys='keys') | 112 | self.ssh_compute_add.assert_called_with('fookey', rid=None, unit=None) |
3869 | 113 | expected_relations = [ | ||
3870 | 114 | call(relation_settings={'authorized_keys_0': 'auth_0'}, | ||
3871 | 115 | relation_id=None), | ||
3872 | 116 | call(relation_settings={'authorized_keys_1': 'auth_1'}, | ||
3873 | 117 | relation_id=None), | ||
3874 | 118 | call(relation_settings={'authorized_keys_2': 'auth_2'}, | ||
3875 | 119 | relation_id=None), | ||
3876 | 120 | call(relation_settings={'known_hosts_0': 'k_h_0'}, | ||
3877 | 121 | relation_id=None), | ||
3878 | 122 | call(relation_settings={'known_hosts_1': 'k_h_1'}, | ||
3879 | 123 | relation_id=None), | ||
3880 | 124 | call(relation_settings={'known_hosts_2': 'k_h_2'}, | ||
3881 | 125 | relation_id=None), | ||
3882 | 126 | call(authorized_keys_max_index=3, relation_id=None), | ||
3883 | 127 | call(known_hosts_max_index=3, relation_id=None)] | ||
3884 | 128 | self.assertEquals(sorted(self.relation_set.call_args_list), | ||
3885 | 129 | sorted(expected_relations)) | ||
3886 | 130 | |||
3887 | 131 | def test_compute_changed_nova_public_key(self): | ||
3888 | 132 | self.test_relation.set({ | ||
3889 | 133 | 'migration_auth_type': 'sasl', 'nova_ssh_public_key': 'fookey', | ||
3890 | 134 | 'private-address': '10.0.0.1'}) | ||
3891 | 135 | self.ssh_known_hosts_lines.return_value = [ | ||
3892 | 136 | 'k_h_0', 'k_h_1', 'k_h_2'] | ||
3893 | 137 | self.ssh_authorized_keys_lines.return_value = [ | ||
3894 | 138 | 'auth_0', 'auth_1', 'auth_2'] | ||
3895 | 139 | hooks.compute_changed() | ||
3896 | 140 | self.ssh_compute_add.assert_called_with('fookey', user='nova', | ||
3897 | 141 | rid=None, unit=None) | ||
3898 | 142 | expected_relations = [ | ||
3899 | 143 | call(relation_settings={'nova_authorized_keys_0': 'auth_0'}, | ||
3900 | 144 | relation_id=None), | ||
3901 | 145 | call(relation_settings={'nova_authorized_keys_1': 'auth_1'}, | ||
3902 | 146 | relation_id=None), | ||
3903 | 147 | call(relation_settings={'nova_authorized_keys_2': 'auth_2'}, | ||
3904 | 148 | relation_id=None), | ||
3905 | 149 | call(relation_settings={'nova_known_hosts_0': 'k_h_0'}, | ||
3906 | 150 | relation_id=None), | ||
3907 | 151 | call(relation_settings={'nova_known_hosts_1': 'k_h_1'}, | ||
3908 | 152 | relation_id=None), | ||
3909 | 153 | call(relation_settings={'nova_known_hosts_2': 'k_h_2'}, | ||
3910 | 154 | relation_id=None), | ||
3911 | 155 | call(relation_settings={'nova_known_hosts_max_index': 3}, | ||
3912 | 156 | relation_id=None), | ||
3913 | 157 | call(relation_settings={'nova_authorized_keys_max_index': 3}, | ||
3914 | 158 | relation_id=None)] | ||
3915 | 159 | self.assertEquals(sorted(self.relation_set.call_args_list), | ||
3916 | 160 | sorted(expected_relations)) | ||
3917 | 107 | 161 | ||
3918 | 108 | @patch.object(hooks, '_auth_config') | 162 | @patch.object(hooks, '_auth_config') |
3919 | 109 | def test_compute_joined_neutron(self, auth_config): | 163 | def test_compute_joined_neutron(self, auth_config): |
3920 | 164 | self.is_relation_made.return_value = False | ||
3921 | 110 | self.network_manager.return_value = 'neutron' | 165 | self.network_manager.return_value = 'neutron' |
3922 | 111 | self.eligible_leader = True | 166 | self.eligible_leader = True |
3923 | 112 | self.keystone_ca_cert_b64.return_value = 'foocert64' | 167 | self.keystone_ca_cert_b64.return_value = 'foocert64' |
3924 | @@ -122,6 +177,8 @@ | |||
3925 | 122 | relation_id=None, | 177 | relation_id=None, |
3926 | 123 | quantum_url='http://nova-cc-host1:9696', | 178 | quantum_url='http://nova-cc-host1:9696', |
3927 | 124 | ca_cert='foocert64', | 179 | ca_cert='foocert64', |
3928 | 180 | quantum_port=9696, | ||
3929 | 181 | quantum_host='nova-cc-host1', | ||
3930 | 125 | quantum_security_groups='no', | 182 | quantum_security_groups='no', |
3931 | 126 | region='RegionOne', | 183 | region='RegionOne', |
3932 | 127 | volume_service='cinder', | 184 | volume_service='cinder', |
3933 | @@ -129,6 +186,40 @@ | |||
3934 | 129 | quantum_plugin='nvp', | 186 | quantum_plugin='nvp', |
3935 | 130 | network_manager='neutron', **FAKE_KS_AUTH_CFG) | 187 | network_manager='neutron', **FAKE_KS_AUTH_CFG) |
3936 | 131 | 188 | ||
3937 | 189 | @patch.object(hooks, 'NeutronAPIContext') | ||
3938 | 190 | @patch.object(hooks, '_auth_config') | ||
3939 | 191 | def test_compute_joined_neutron_api_rel(self, auth_config, napi): | ||
3940 | 192 | def mock_NeutronAPIContext(): | ||
3941 | 193 | return { | ||
3942 | 194 | 'neutron_plugin': 'bob', | ||
3943 | 195 | 'neutron_security_groups': 'yes', | ||
3944 | 196 | 'neutron_url': 'http://nova-cc-host1:9696', | ||
3945 | 197 | } | ||
3946 | 198 | napi.return_value = mock_NeutronAPIContext | ||
3947 | 199 | self.is_relation_made.return_value = True | ||
3948 | 200 | self.network_manager.return_value = 'neutron' | ||
3949 | 201 | self.eligible_leader = True | ||
3950 | 202 | self.keystone_ca_cert_b64.return_value = 'foocert64' | ||
3951 | 203 | self.volume_service.return_value = 'cinder' | ||
3952 | 204 | self.unit_get.return_value = 'nova-cc-host1' | ||
3953 | 205 | self.canonical_url.return_value = 'http://nova-cc-host1' | ||
3954 | 206 | self.api_port.return_value = '9696' | ||
3955 | 207 | self.neutron_plugin.return_value = 'nvp' | ||
3956 | 208 | auth_config.return_value = FAKE_KS_AUTH_CFG | ||
3957 | 209 | hooks.compute_joined() | ||
3958 | 210 | self.relation_set.assert_called_with( | ||
3959 | 211 | relation_id=None, | ||
3960 | 212 | quantum_url='http://nova-cc-host1:9696', | ||
3961 | 213 | ca_cert='foocert64', | ||
3962 | 214 | quantum_port=9696, | ||
3963 | 215 | quantum_host='nova-cc-host1', | ||
3964 | 216 | quantum_security_groups='yes', | ||
3965 | 217 | region='RegionOne', | ||
3966 | 218 | volume_service='cinder', | ||
3967 | 219 | ec2_host='nova-cc-host1', | ||
3968 | 220 | quantum_plugin='bob', | ||
3969 | 221 | network_manager='neutron', **FAKE_KS_AUTH_CFG) | ||
3970 | 222 | |||
3971 | 132 | @patch.object(hooks, '_auth_config') | 223 | @patch.object(hooks, '_auth_config') |
3972 | 133 | def test_nova_vmware_joined(self, auth_config): | 224 | def test_nova_vmware_joined(self, auth_config): |
3973 | 134 | auth_config.return_value = FAKE_KS_AUTH_CFG | 225 | auth_config.return_value = FAKE_KS_AUTH_CFG |
3974 | @@ -231,3 +322,46 @@ | |||
3975 | 231 | self._postgresql_db_test(configs) | 322 | self._postgresql_db_test(configs) |
3976 | 232 | self.assertTrue(configs.write_all.called) | 323 | self.assertTrue(configs.write_all.called) |
3977 | 233 | self.migrate_database.assert_called_with() | 324 | self.migrate_database.assert_called_with() |
3978 | 325 | |||
3979 | 326 | @patch.object(os, 'rename') | ||
3980 | 327 | @patch.object(os.path, 'isfile') | ||
3981 | 328 | @patch.object(hooks, 'CONFIGS') | ||
3982 | 329 | def test_neutron_api_relation_joined(self, configs, isfile, rename): | ||
3983 | 330 | neutron_conf = '/etc/neutron/neutron.conf' | ||
3984 | 331 | nova_url = 'http://novaurl:8774/v2' | ||
3985 | 332 | isfile.return_value = True | ||
3986 | 333 | self.service_running.return_value = True | ||
3987 | 334 | _identity_joined = self.patch('identity_joined') | ||
3988 | 335 | self.relation_ids.side_effect = ['relid'] | ||
3989 | 336 | self.canonical_url.return_value = 'http://novaurl' | ||
3990 | 337 | with patch_open() as (_open, _file): | ||
3991 | 338 | hooks.neutron_api_relation_joined() | ||
3992 | 339 | self.service_stop.assert_called_with('neutron-server') | ||
3993 | 340 | rename.assert_called_with(neutron_conf, neutron_conf + '_unused') | ||
3994 | 341 | self.assertTrue(_identity_joined.called) | ||
3995 | 342 | self.relation_set.assert_called_with(relation_id=None, | ||
3996 | 343 | nova_url=nova_url) | ||
3997 | 344 | |||
3998 | 345 | @patch.object(hooks, 'CONFIGS') | ||
3999 | 346 | def test_neutron_api_relation_changed(self, configs): | ||
4000 | 347 | self.relation_ids.return_value = ['relid'] | ||
4001 | 348 | _compute_joined = self.patch('compute_joined') | ||
4002 | 349 | _quantum_joined = self.patch('quantum_joined') | ||
4003 | 350 | hooks.neutron_api_relation_changed() | ||
4004 | 351 | self.assertTrue(configs.write.called_with('/etc/nova/nova.conf')) | ||
4005 | 352 | self.assertTrue(_compute_joined.called) | ||
4006 | 353 | self.assertTrue(_quantum_joined.called) | ||
4007 | 354 | |||
4008 | 355 | @patch.object(os, 'remove') | ||
4009 | 356 | @patch.object(os.path, 'isfile') | ||
4010 | 357 | @patch.object(hooks, 'CONFIGS') | ||
4011 | 358 | def test_neutron_api_relation_broken(self, configs, isfile, remove): | ||
4012 | 359 | isfile.return_value = True | ||
4013 | 360 | self.relation_ids.return_value = ['relid'] | ||
4014 | 361 | _compute_joined = self.patch('compute_joined') | ||
4015 | 362 | _quantum_joined = self.patch('quantum_joined') | ||
4016 | 363 | hooks.neutron_api_relation_broken() | ||
4017 | 364 | remove.assert_called_with('/etc/init/neutron-server.override') | ||
4018 | 365 | self.assertTrue(configs.write_all.called) | ||
4019 | 366 | self.assertTrue(_compute_joined.called) | ||
4020 | 367 | self.assertTrue(_quantum_joined.called) | ||
4021 | 234 | 368 | ||
4022 | === modified file 'unit_tests/test_nova_cc_utils.py' | |||
4023 | --- unit_tests/test_nova_cc_utils.py 2014-05-02 10:06:23 +0000 | |||
4024 | +++ unit_tests/test_nova_cc_utils.py 2014-07-29 13:07:23 +0000 | |||
4025 | @@ -22,6 +22,7 @@ | |||
4026 | 22 | 'eligible_leader', | 22 | 'eligible_leader', |
4027 | 23 | 'enable_policy_rcd', | 23 | 'enable_policy_rcd', |
4028 | 24 | 'get_os_codename_install_source', | 24 | 'get_os_codename_install_source', |
4029 | 25 | 'is_relation_made', | ||
4030 | 25 | 'log', | 26 | 'log', |
4031 | 26 | 'ml2_migration', | 27 | 'ml2_migration', |
4032 | 27 | 'network_manager', | 28 | 'network_manager', |
4033 | @@ -34,7 +35,9 @@ | |||
4034 | 34 | 'remote_unit', | 35 | 'remote_unit', |
4035 | 35 | '_save_script_rc', | 36 | '_save_script_rc', |
4036 | 36 | 'service_start', | 37 | 'service_start', |
4038 | 37 | 'services' | 38 | 'services', |
4039 | 39 | 'service_running', | ||
4040 | 40 | 'service_stop' | ||
4041 | 38 | ] | 41 | ] |
4042 | 39 | 42 | ||
4043 | 40 | SCRIPTRC_ENV_VARS = { | 43 | SCRIPTRC_ENV_VARS = { |
4044 | @@ -151,6 +154,7 @@ | |||
4045 | 151 | 154 | ||
4046 | 152 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') | 155 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') |
4047 | 153 | def test_resource_map_quantum(self, subcontext): | 156 | def test_resource_map_quantum(self, subcontext): |
4048 | 157 | self.is_relation_made.return_value = False | ||
4049 | 154 | self._resource_map(network_manager='quantum') | 158 | self._resource_map(network_manager='quantum') |
4050 | 155 | _map = utils.resource_map() | 159 | _map = utils.resource_map() |
4051 | 156 | confs = [ | 160 | confs = [ |
4052 | @@ -162,6 +166,7 @@ | |||
4053 | 162 | 166 | ||
4054 | 163 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') | 167 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') |
4055 | 164 | def test_resource_map_neutron(self, subcontext): | 168 | def test_resource_map_neutron(self, subcontext): |
4056 | 169 | self.is_relation_made.return_value = False | ||
4057 | 165 | self._resource_map(network_manager='neutron') | 170 | self._resource_map(network_manager='neutron') |
4058 | 166 | _map = utils.resource_map() | 171 | _map = utils.resource_map() |
4059 | 167 | confs = [ | 172 | confs = [ |
4060 | @@ -170,6 +175,17 @@ | |||
4061 | 170 | [self.assertIn(q_conf, _map.keys()) for q_conf in confs] | 175 | [self.assertIn(q_conf, _map.keys()) for q_conf in confs] |
4062 | 171 | 176 | ||
4063 | 172 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') | 177 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') |
4064 | 178 | def test_resource_map_neutron_api_rel(self, subcontext): | ||
4065 | 179 | self.is_relation_made.return_value = True | ||
4066 | 180 | self._resource_map(network_manager='neutron') | ||
4067 | 181 | _map = utils.resource_map() | ||
4068 | 182 | confs = [ | ||
4069 | 183 | '/etc/neutron/neutron.conf', | ||
4070 | 184 | ] | ||
4071 | 185 | for q_conf in confs: | ||
4072 | 186 | self.assertFalse(q_conf in _map.keys()) | ||
4073 | 187 | |||
4074 | 188 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') | ||
4075 | 173 | def test_resource_map_vmware(self, subcontext): | 189 | def test_resource_map_vmware(self, subcontext): |
4076 | 174 | fake_context = MagicMock() | 190 | fake_context = MagicMock() |
4077 | 175 | fake_context.return_value = { | 191 | fake_context.return_value = { |
4078 | @@ -201,6 +217,7 @@ | |||
4079 | 201 | @patch('os.path.exists') | 217 | @patch('os.path.exists') |
4080 | 202 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') | 218 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') |
4081 | 203 | def test_restart_map_api_before_frontends(self, subcontext, _exists): | 219 | def test_restart_map_api_before_frontends(self, subcontext, _exists): |
4082 | 220 | self.is_relation_made.return_value = False | ||
4083 | 204 | _exists.return_value = False | 221 | _exists.return_value = False |
4084 | 205 | self._resource_map(network_manager='neutron') | 222 | self._resource_map(network_manager='neutron') |
4085 | 206 | _map = utils.restart_map() | 223 | _map = utils.restart_map() |
4086 | @@ -226,6 +243,7 @@ | |||
4087 | 226 | 243 | ||
4088 | 227 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') | 244 | @patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext') |
4089 | 228 | def test_determine_packages_neutron(self, subcontext): | 245 | def test_determine_packages_neutron(self, subcontext): |
4090 | 246 | self.is_relation_made.return_value = False | ||
4091 | 229 | self._resource_map(network_manager='neutron') | 247 | self._resource_map(network_manager='neutron') |
4092 | 230 | pkgs = utils.determine_packages() | 248 | pkgs = utils.determine_packages() |
4093 | 231 | self.assertIn('neutron-server', pkgs) | 249 | self.assertIn('neutron-server', pkgs) |
4094 | @@ -321,8 +339,8 @@ | |||
4095 | 321 | check_output.return_value = 'fookey' | 339 | check_output.return_value = 'fookey' |
4096 | 322 | host_key.return_value = 'fookey_old' | 340 | host_key.return_value = 'fookey_old' |
4097 | 323 | with patch_open() as (_open, _file): | 341 | with patch_open() as (_open, _file): |
4100 | 324 | utils.add_known_host('foohost') | 342 | utils.add_known_host('foohost', None, None) |
4101 | 325 | rm.assert_called_with('foohost', None) | 343 | rm.assert_called_with('foohost', None, None) |
4102 | 326 | 344 | ||
4103 | 327 | @patch.object(utils, 'known_hosts') | 345 | @patch.object(utils, 'known_hosts') |
4104 | 328 | @patch.object(utils, 'remove_known_host') | 346 | @patch.object(utils, 'remove_known_host') |
4105 | @@ -355,19 +373,19 @@ | |||
4106 | 355 | def test_known_hosts(self, ssh_dir): | 373 | def test_known_hosts(self, ssh_dir): |
4107 | 356 | ssh_dir.return_value = '/tmp/foo' | 374 | ssh_dir.return_value = '/tmp/foo' |
4108 | 357 | self.assertEquals(utils.known_hosts(), '/tmp/foo/known_hosts') | 375 | self.assertEquals(utils.known_hosts(), '/tmp/foo/known_hosts') |
4110 | 358 | ssh_dir.assert_called_with(None) | 376 | ssh_dir.assert_called_with(None, None) |
4111 | 359 | self.assertEquals(utils.known_hosts('bar'), '/tmp/foo/known_hosts') | 377 | self.assertEquals(utils.known_hosts('bar'), '/tmp/foo/known_hosts') |
4113 | 360 | ssh_dir.assert_called_with('bar') | 378 | ssh_dir.assert_called_with('bar', None) |
4114 | 361 | 379 | ||
4115 | 362 | @patch.object(utils, 'ssh_directory_for_unit') | 380 | @patch.object(utils, 'ssh_directory_for_unit') |
4116 | 363 | def test_authorized_keys(self, ssh_dir): | 381 | def test_authorized_keys(self, ssh_dir): |
4117 | 364 | ssh_dir.return_value = '/tmp/foo' | 382 | ssh_dir.return_value = '/tmp/foo' |
4118 | 365 | self.assertEquals(utils.authorized_keys(), '/tmp/foo/authorized_keys') | 383 | self.assertEquals(utils.authorized_keys(), '/tmp/foo/authorized_keys') |
4120 | 366 | ssh_dir.assert_called_with(None) | 384 | ssh_dir.assert_called_with(None, None) |
4121 | 367 | self.assertEquals( | 385 | self.assertEquals( |
4122 | 368 | utils.authorized_keys('bar'), | 386 | utils.authorized_keys('bar'), |
4123 | 369 | '/tmp/foo/authorized_keys') | 387 | '/tmp/foo/authorized_keys') |
4125 | 370 | ssh_dir.assert_called_with('bar') | 388 | ssh_dir.assert_called_with('bar', None) |
4126 | 371 | 389 | ||
4127 | 372 | @patch.object(utils, 'known_hosts') | 390 | @patch.object(utils, 'known_hosts') |
4128 | 373 | @patch('subprocess.check_call') | 391 | @patch('subprocess.check_call') |
4129 | @@ -421,11 +439,15 @@ | |||
4130 | 421 | self.os_release.return_value = 'folsom' | 439 | self.os_release.return_value = 'folsom' |
4131 | 422 | 440 | ||
4132 | 423 | def test_determine_endpoints_base(self): | 441 | def test_determine_endpoints_base(self): |
4133 | 442 | self.is_relation_made.return_value = False | ||
4134 | 424 | self.relation_ids.return_value = [] | 443 | self.relation_ids.return_value = [] |
4135 | 425 | self.assertEquals( | 444 | self.assertEquals( |
4137 | 426 | BASE_ENDPOINTS, utils.determine_endpoints('http://foohost.com')) | 445 | BASE_ENDPOINTS, utils.determine_endpoints('http://foohost.com', |
4138 | 446 | 'http://foohost.com', | ||
4139 | 447 | 'http://foohost.com')) | ||
4140 | 427 | 448 | ||
4141 | 428 | def test_determine_endpoints_nova_volume(self): | 449 | def test_determine_endpoints_nova_volume(self): |
4142 | 450 | self.is_relation_made.return_value = False | ||
4143 | 429 | self.relation_ids.return_value = ['nova-volume-service/0'] | 451 | self.relation_ids.return_value = ['nova-volume-service/0'] |
4144 | 430 | endpoints = deepcopy(BASE_ENDPOINTS) | 452 | endpoints = deepcopy(BASE_ENDPOINTS) |
4145 | 431 | endpoints.update({ | 453 | endpoints.update({ |
4146 | @@ -438,9 +460,12 @@ | |||
4147 | 438 | 'nova-volume_region': 'RegionOne', | 460 | 'nova-volume_region': 'RegionOne', |
4148 | 439 | 'nova-volume_service': 'nova-volume'}) | 461 | 'nova-volume_service': 'nova-volume'}) |
4149 | 440 | self.assertEquals( | 462 | self.assertEquals( |
4151 | 441 | endpoints, utils.determine_endpoints('http://foohost.com')) | 463 | endpoints, utils.determine_endpoints('http://foohost.com', |
4152 | 464 | 'http://foohost.com', | ||
4153 | 465 | 'http://foohost.com')) | ||
4154 | 442 | 466 | ||
4155 | 443 | def test_determine_endpoints_quantum_neutron(self): | 467 | def test_determine_endpoints_quantum_neutron(self): |
4156 | 468 | self.is_relation_made.return_value = False | ||
4157 | 444 | self.relation_ids.return_value = [] | 469 | self.relation_ids.return_value = [] |
4158 | 445 | self.network_manager.return_value = 'quantum' | 470 | self.network_manager.return_value = 'quantum' |
4159 | 446 | endpoints = deepcopy(BASE_ENDPOINTS) | 471 | endpoints = deepcopy(BASE_ENDPOINTS) |
4160 | @@ -451,7 +476,25 @@ | |||
4161 | 451 | 'quantum_region': 'RegionOne', | 476 | 'quantum_region': 'RegionOne', |
4162 | 452 | 'quantum_service': 'quantum'}) | 477 | 'quantum_service': 'quantum'}) |
4163 | 453 | self.assertEquals( | 478 | self.assertEquals( |
4165 | 454 | endpoints, utils.determine_endpoints('http://foohost.com')) | 479 | endpoints, utils.determine_endpoints('http://foohost.com', |
4166 | 480 | 'http://foohost.com', | ||
4167 | 481 | 'http://foohost.com')) | ||
4168 | 482 | |||
4169 | 483 | def test_determine_endpoints_neutron_api_rel(self): | ||
4170 | 484 | self.is_relation_made.return_value = True | ||
4171 | 485 | self.relation_ids.return_value = [] | ||
4172 | 486 | self.network_manager.return_value = 'quantum' | ||
4173 | 487 | endpoints = deepcopy(BASE_ENDPOINTS) | ||
4174 | 488 | endpoints.update({ | ||
4175 | 489 | 'quantum_admin_url': None, | ||
4176 | 490 | 'quantum_internal_url': None, | ||
4177 | 491 | 'quantum_public_url': None, | ||
4178 | 492 | 'quantum_region': None, | ||
4179 | 493 | 'quantum_service': None}) | ||
4180 | 494 | self.assertEquals( | ||
4181 | 495 | endpoints, utils.determine_endpoints('http://foohost.com', | ||
4182 | 496 | 'http://foohost.com', | ||
4183 | 497 | 'http://foohost.com')) | ||
4184 | 455 | 498 | ||
4185 | 456 | @patch.object(utils, 'known_hosts') | 499 | @patch.object(utils, 'known_hosts') |
4186 | 457 | @patch('subprocess.check_output') | 500 | @patch('subprocess.check_output') |
4187 | @@ -461,9 +504,9 @@ | |||
4188 | 461 | _check_output.assert_called_with( | 504 | _check_output.assert_called_with( |
4189 | 462 | ['ssh-keygen', '-f', '/foo/known_hosts', | 505 | ['ssh-keygen', '-f', '/foo/known_hosts', |
4190 | 463 | '-H', '-F', 'test']) | 506 | '-H', '-F', 'test']) |
4192 | 464 | _known_hosts.assert_called_with(None) | 507 | _known_hosts.assert_called_with(None, None) |
4193 | 465 | utils.ssh_known_host_key('test', 'bar') | 508 | utils.ssh_known_host_key('test', 'bar') |
4195 | 466 | _known_hosts.assert_called_with('bar') | 509 | _known_hosts.assert_called_with('bar', None) |
4196 | 467 | 510 | ||
4197 | 468 | @patch.object(utils, 'known_hosts') | 511 | @patch.object(utils, 'known_hosts') |
4198 | 469 | @patch('subprocess.check_call') | 512 | @patch('subprocess.check_call') |
4199 | @@ -473,9 +516,9 @@ | |||
4200 | 473 | _check_call.assert_called_with( | 516 | _check_call.assert_called_with( |
4201 | 474 | ['ssh-keygen', '-f', '/foo/known_hosts', | 517 | ['ssh-keygen', '-f', '/foo/known_hosts', |
4202 | 475 | '-R', 'test']) | 518 | '-R', 'test']) |
4204 | 476 | _known_hosts.assert_called_with(None) | 519 | _known_hosts.assert_called_with(None, None) |
4205 | 477 | utils.remove_known_host('test', 'bar') | 520 | utils.remove_known_host('test', 'bar') |
4207 | 478 | _known_hosts.assert_called_with('bar') | 521 | _known_hosts.assert_called_with('bar', None) |
4208 | 479 | 522 | ||
4209 | 480 | @patch('subprocess.check_output') | 523 | @patch('subprocess.check_output') |
4210 | 481 | def test_migrate_database(self, check_output): | 524 | def test_migrate_database(self, check_output): |
4211 | @@ -555,3 +598,113 @@ | |||
4212 | 555 | utils.do_openstack_upgrade() | 598 | utils.do_openstack_upgrade() |
4213 | 556 | expected = [call('cloud:precise-icehouse')] | 599 | expected = [call('cloud:precise-icehouse')] |
4214 | 557 | self.assertEquals(_do_openstack_upgrade.call_args_list, expected) | 600 | self.assertEquals(_do_openstack_upgrade.call_args_list, expected) |
4215 | 601 | |||
4216 | 602 | def test_guard_map_nova(self): | ||
4217 | 603 | self.relation_ids.return_value = [] | ||
4218 | 604 | self.os_release.return_value = 'havana' | ||
4219 | 605 | self.assertEqual( | ||
4220 | 606 | {'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'], | ||
4221 | 607 | 'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'], | ||
4222 | 608 | 'nova-cert': ['identity-service', 'amqp', 'shared-db'], | ||
4223 | 609 | 'nova-conductor': ['identity-service', 'amqp', 'shared-db'], | ||
4224 | 610 | 'nova-objectstore': ['identity-service', 'amqp', 'shared-db'], | ||
4225 | 611 | 'nova-scheduler': ['identity-service', 'amqp', 'shared-db']}, | ||
4226 | 612 | utils.guard_map() | ||
4227 | 613 | ) | ||
4228 | 614 | self.os_release.return_value = 'essex' | ||
4229 | 615 | self.assertEqual( | ||
4230 | 616 | {'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'], | ||
4231 | 617 | 'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'], | ||
4232 | 618 | 'nova-cert': ['identity-service', 'amqp', 'shared-db'], | ||
4233 | 619 | 'nova-objectstore': ['identity-service', 'amqp', 'shared-db'], | ||
4234 | 620 | 'nova-scheduler': ['identity-service', 'amqp', 'shared-db']}, | ||
4235 | 621 | utils.guard_map() | ||
4236 | 622 | ) | ||
4237 | 623 | |||
4238 | 624 | def test_guard_map_neutron(self): | ||
4239 | 625 | self.relation_ids.return_value = [] | ||
4240 | 626 | self.network_manager.return_value = 'neutron' | ||
4241 | 627 | self.os_release.return_value = 'icehouse' | ||
4242 | 628 | self.assertEqual( | ||
4243 | 629 | {'neutron-server': ['identity-service', 'amqp', 'shared-db'], | ||
4244 | 630 | 'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'], | ||
4245 | 631 | 'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'], | ||
4246 | 632 | 'nova-cert': ['identity-service', 'amqp', 'shared-db'], | ||
4247 | 633 | 'nova-conductor': ['identity-service', 'amqp', 'shared-db'], | ||
4248 | 634 | 'nova-objectstore': ['identity-service', 'amqp', 'shared-db'], | ||
4249 | 635 | 'nova-scheduler': ['identity-service', 'amqp', 'shared-db'], }, | ||
4250 | 636 | utils.guard_map() | ||
4251 | 637 | ) | ||
4252 | 638 | self.network_manager.return_value = 'quantum' | ||
4253 | 639 | self.os_release.return_value = 'grizzly' | ||
4254 | 640 | self.assertEqual( | ||
4255 | 641 | {'quantum-server': ['identity-service', 'amqp', 'shared-db'], | ||
4256 | 642 | 'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'], | ||
4257 | 643 | 'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'], | ||
4258 | 644 | 'nova-cert': ['identity-service', 'amqp', 'shared-db'], | ||
4259 | 645 | 'nova-conductor': ['identity-service', 'amqp', 'shared-db'], | ||
4260 | 646 | 'nova-objectstore': ['identity-service', 'amqp', 'shared-db'], | ||
4261 | 647 | 'nova-scheduler': ['identity-service', 'amqp', 'shared-db'], }, | ||
4262 | 648 | utils.guard_map() | ||
4263 | 649 | ) | ||
4264 | 650 | |||
4265 | 651 | def test_guard_map_pgsql(self): | ||
4266 | 652 | self.relation_ids.return_value = ['pgsql:1'] | ||
4267 | 653 | self.network_manager.return_value = 'neutron' | ||
4268 | 654 | self.os_release.return_value = 'icehouse' | ||
4269 | 655 | self.assertEqual( | ||
4270 | 656 | {'neutron-server': ['identity-service', 'amqp', | ||
4271 | 657 | 'pgsql-neutron-db'], | ||
4272 | 658 | 'nova-api-ec2': ['identity-service', 'amqp', 'pgsql-nova-db'], | ||
4273 | 659 | 'nova-api-os-compute': ['identity-service', 'amqp', | ||
4274 | 660 | 'pgsql-nova-db'], | ||
4275 | 661 | 'nova-cert': ['identity-service', 'amqp', 'pgsql-nova-db'], | ||
4276 | 662 | 'nova-conductor': ['identity-service', 'amqp', 'pgsql-nova-db'], | ||
4277 | 663 | 'nova-objectstore': ['identity-service', 'amqp', | ||
4278 | 664 | 'pgsql-nova-db'], | ||
4279 | 665 | 'nova-scheduler': ['identity-service', 'amqp', | ||
4280 | 666 | 'pgsql-nova-db'], }, | ||
4281 | 667 | utils.guard_map() | ||
4282 | 668 | ) | ||
4283 | 669 | |||
4284 | 670 | def test_service_guard_inactive(self): | ||
4285 | 671 | '''Ensure that if disabled, service guards nothing''' | ||
4286 | 672 | contexts = MagicMock() | ||
4287 | 673 | |||
4288 | 674 | @utils.service_guard({'test': ['interfacea', 'interfaceb']}, | ||
4289 | 675 | contexts, False) | ||
4290 | 676 | def dummy_func(): | ||
4291 | 677 | pass | ||
4292 | 678 | dummy_func() | ||
4293 | 679 | self.assertFalse(self.service_running.called) | ||
4294 | 680 | self.assertFalse(contexts.complete_contexts.called) | ||
4295 | 681 | |||
4296 | 682 | def test_service_guard_active_guard(self): | ||
4297 | 683 | '''Ensure services with incomplete interfaces are stopped''' | ||
4298 | 684 | contexts = MagicMock() | ||
4299 | 685 | contexts.complete_contexts.return_value = ['interfacea'] | ||
4300 | 686 | self.service_running.return_value = True | ||
4301 | 687 | |||
4302 | 688 | @utils.service_guard({'test': ['interfacea', 'interfaceb']}, | ||
4303 | 689 | contexts, True) | ||
4304 | 690 | def dummy_func(): | ||
4305 | 691 | pass | ||
4306 | 692 | dummy_func() | ||
4307 | 693 | self.service_running.assert_called_with('test') | ||
4308 | 694 | self.service_stop.assert_called_with('test') | ||
4309 | 695 | self.assertTrue(contexts.complete_contexts.called) | ||
4310 | 696 | |||
4311 | 697 | def test_service_guard_active_release(self): | ||
4312 | 698 | '''Ensure services with complete interfaces are not stopped''' | ||
4313 | 699 | contexts = MagicMock() | ||
4314 | 700 | contexts.complete_contexts.return_value = ['interfacea', | ||
4315 | 701 | 'interfaceb'] | ||
4316 | 702 | |||
4317 | 703 | @utils.service_guard({'test': ['interfacea', 'interfaceb']}, | ||
4318 | 704 | contexts, True) | ||
4319 | 705 | def dummy_func(): | ||
4320 | 706 | pass | ||
4321 | 707 | dummy_func() | ||
4322 | 708 | self.assertFalse(self.service_running.called) | ||
4323 | 709 | self.assertFalse(self.service_stop.called) | ||
4324 | 710 | self.assertTrue(contexts.complete_contexts.called) | ||
4325 | 558 | 711 | ||
4326 | === modified file 'unit_tests/test_utils.py' | |||
4327 | --- unit_tests/test_utils.py 2013-11-08 05:41:39 +0000 | |||
4328 | +++ unit_tests/test_utils.py 2014-07-29 13:07:23 +0000 | |||
4329 | @@ -82,9 +82,9 @@ | |||
4330 | 82 | return self.config | 82 | return self.config |
4331 | 83 | 83 | ||
4332 | 84 | def set(self, attr, value): | 84 | def set(self, attr, value): |
4336 | 85 | if attr not in self.config: | 85 | if attr not in self.config: |
4337 | 86 | raise KeyError | 86 | raise KeyError |
4338 | 87 | self.config[attr] = value | 87 | self.config[attr] = value |
4339 | 88 | 88 | ||
4340 | 89 | 89 | ||
4341 | 90 | class TestRelation(object): | 90 | class TestRelation(object): |