Merge lp:~james-page/charms/trusty/nova-cloud-controller/ram-allocation-ratio into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/trunk
- Trusty Tahr (14.04)
- ram-allocation-ratio
- Merge into trunk
Proposed by
James Page
Status: | Superseded |
---|---|
Proposed branch: | lp:~james-page/charms/trusty/nova-cloud-controller/ram-allocation-ratio |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/trunk |
Diff against target: |
5890 lines (+4147/-361) (has conflicts) 49 files modified
.bzrignore (+2/-0) Makefile (+24/-1) README.txt (+10/-0) charm-helpers-hooks.yaml (+12/-0) charm-helpers-tests.yaml (+5/-0) config.yaml (+100/-0) hooks/charmhelpers/contrib/hahelpers/cluster.py (+3/-2) hooks/charmhelpers/contrib/network/ip.py (+174/-0) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+61/-0) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+275/-0) hooks/charmhelpers/contrib/openstack/context.py (+121/-25) hooks/charmhelpers/contrib/openstack/ip.py (+79/-0) hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+9/-4) hooks/charmhelpers/contrib/openstack/templating.py (+22/-23) hooks/charmhelpers/contrib/openstack/utils.py (+18/-5) hooks/charmhelpers/contrib/peerstorage/__init__.py (+83/-0) hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1) hooks/charmhelpers/core/fstab.py (+116/-0) hooks/charmhelpers/core/hookenv.py (+7/-5) hooks/charmhelpers/core/host.py (+47/-8) hooks/charmhelpers/core/services/__init__.py (+2/-0) hooks/charmhelpers/core/services/base.py (+310/-0) hooks/charmhelpers/core/services/helpers.py (+125/-0) hooks/charmhelpers/core/templating.py (+51/-0) hooks/charmhelpers/fetch/__init__.py (+97/-28) hooks/nova_cc_context.py (+51/-2) hooks/nova_cc_hooks.py (+286/-61) hooks/nova_cc_utils.py (+369/-163) metadata.yaml (+2/-0) revision (+1/-1) templates/havana/nova.conf (+11/-2) templates/icehouse/neutron.conf (+5/-0) templates/icehouse/nova.conf (+18/-2) tests/00-setup (+10/-0) tests/10-basic-precise-essex (+10/-0) tests/11-basic-precise-folsom (+18/-0) tests/12-basic-precise-grizzly (+12/-0) tests/13-basic-precise-havana (+12/-0) tests/14-basic-precise-icehouse (+12/-0) tests/15-basic-trusty-icehouse (+10/-0) tests/README (+47/-0) tests/basic_deployment.py (+520/-0) tests/charmhelpers/contrib/amulet/deployment.py (+71/-0) tests/charmhelpers/contrib/amulet/utils.py (+176/-0) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+61/-0) tests/charmhelpers/contrib/openstack/amulet/utils.py (+275/-0) unit_tests/test_nova_cc_hooks.py (+262/-13) unit_tests/test_nova_cc_utils.py (+140/-15) Conflict adding file .bzrignore. Moved existing file to .bzrignore.moved. Text conflict in Makefile Contents conflict in charm-helpers.yaml Text conflict in config.yaml Text conflict in hooks/charmhelpers/contrib/openstack/context.py Text conflict in hooks/charmhelpers/contrib/openstack/utils.py Conflict adding file hooks/charmhelpers/core/fstab.py. Moved existing file to hooks/charmhelpers/core/fstab.py.moved. Text conflict in hooks/charmhelpers/core/host.py Text conflict in hooks/charmhelpers/fetch/__init__.py Text conflict in hooks/nova_cc_hooks.py Text conflict in hooks/nova_cc_utils.py Text conflict in templates/havana/nova.conf Text conflict in templates/icehouse/neutron.conf Text conflict in templates/icehouse/nova.conf |
To merge this branch: | bzr merge lp:~james-page/charms/trusty/nova-cloud-controller/ram-allocation-ratio |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+234781@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Unmerged revisions
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added file '.bzrignore' | |||
2 | --- .bzrignore 1970-01-01 00:00:00 +0000 | |||
3 | +++ .bzrignore 2014-09-16 09:08:32 +0000 | |||
4 | @@ -0,0 +1,2 @@ | |||
5 | 1 | bin | ||
6 | 2 | .coverage | ||
7 | 0 | 3 | ||
8 | === renamed file '.bzrignore' => '.bzrignore.moved' | |||
9 | === modified file 'Makefile' | |||
10 | --- Makefile 2014-09-09 23:43:43 +0000 | |||
11 | +++ Makefile 2014-09-16 09:08:32 +0000 | |||
12 | @@ -2,9 +2,10 @@ | |||
13 | 2 | PYTHON := /usr/bin/env python | 2 | PYTHON := /usr/bin/env python |
14 | 3 | 3 | ||
15 | 4 | lint: | 4 | lint: |
17 | 5 | @flake8 --exclude hooks/charmhelpers hooks unit_tests | 5 | @flake8 --exclude hooks/charmhelpers hooks unit_tests tests |
18 | 6 | @charm proof | 6 | @charm proof |
19 | 7 | 7 | ||
20 | 8 | <<<<<<< TREE | ||
21 | 8 | test: .venv | 9 | test: .venv |
22 | 9 | @echo Starting tests... | 10 | @echo Starting tests... |
23 | 10 | .venv/bin/nosetests --nologcapture --with-coverage unit_tests | 11 | .venv/bin/nosetests --nologcapture --with-coverage unit_tests |
24 | @@ -18,6 +19,28 @@ | |||
25 | 18 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml | 19 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml |
26 | 19 | 20 | ||
27 | 20 | publish: lint test | 21 | publish: lint test |
28 | 22 | ======= | ||
29 | 23 | unit_test: | ||
30 | 24 | @echo Starting unit tests... | ||
31 | 25 | @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests | ||
32 | 26 | |||
33 | 27 | bin/charm_helpers_sync.py: | ||
34 | 28 | @mkdir -p bin | ||
35 | 29 | @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ | ||
36 | 30 | > bin/charm_helpers_sync.py | ||
37 | 31 | test: | ||
38 | 32 | @echo Starting Amulet tests... | ||
39 | 33 | # coreycb note: The -v should only be temporary until Amulet sends | ||
40 | 34 | # raise_status() messages to stderr: | ||
41 | 35 | # https://bugs.launchpad.net/amulet/+bug/1320357 | ||
42 | 36 | @juju test -v -p AMULET_HTTP_PROXY | ||
43 | 37 | |||
44 | 38 | sync: bin/charm_helpers_sync.py | ||
45 | 39 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml | ||
46 | 40 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml | ||
47 | 41 | |||
48 | 42 | publish: lint unit_test | ||
49 | 43 | >>>>>>> MERGE-SOURCE | ||
50 | 21 | bzr push lp:charms/nova-cloud-controller | 44 | bzr push lp:charms/nova-cloud-controller |
51 | 22 | bzr push lp:charms/trusty/nova-cloud-controller | 45 | bzr push lp:charms/trusty/nova-cloud-controller |
52 | 23 | 46 | ||
53 | 24 | 47 | ||
54 | === modified file 'README.txt' | |||
55 | --- README.txt 2014-03-25 09:11:04 +0000 | |||
56 | +++ README.txt 2014-09-16 09:08:32 +0000 | |||
57 | @@ -4,6 +4,16 @@ | |||
58 | 4 | 4 | ||
59 | 5 | Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore. | 5 | Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore. |
60 | 6 | 6 | ||
61 | 7 | The neutron-api interface can be used join this charm with an external neutron-api server. If this is done | ||
62 | 8 | then this charm will shutdown its neutron-api service and the external charm will be registered as the | ||
63 | 9 | neutron-api endpoint in keystone. It will also use the quantum-security-groups setting which is passed to | ||
64 | 10 | it by the api service rather than its own quantum-security-groups setting. | ||
65 | 11 | |||
66 | 12 | If console access is required then console-proxy-ip should be set to a client accessible IP that resolves | ||
67 | 13 | to the nova-cloud-controller. If running in HA mode then the public vip is used if console-proxy-ip is set | ||
68 | 14 | to local. Note: The console access protocol is baked into a guest when it is created, if you change it then | ||
69 | 15 | console access for existing guests will stop working | ||
70 | 16 | |||
71 | 7 | ****************************************************** | 17 | ****************************************************** |
72 | 8 | Special considerations to be deployed using Postgresql | 18 | Special considerations to be deployed using Postgresql |
73 | 9 | ****************************************************** | 19 | ****************************************************** |
74 | 10 | 20 | ||
75 | === added file 'charm-helpers-hooks.yaml' | |||
76 | --- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000 | |||
77 | +++ charm-helpers-hooks.yaml 2014-09-16 09:08:32 +0000 | |||
78 | @@ -0,0 +1,12 @@ | |||
79 | 1 | branch: lp:charm-helpers | ||
80 | 2 | destination: hooks/charmhelpers | ||
81 | 3 | include: | ||
82 | 4 | - core | ||
83 | 5 | - fetch | ||
84 | 6 | - contrib.openstack|inc=* | ||
85 | 7 | - contrib.storage | ||
86 | 8 | - contrib.peerstorage | ||
87 | 9 | - contrib.hahelpers: | ||
88 | 10 | - apache | ||
89 | 11 | - payload.execd | ||
90 | 12 | - contrib.network.ip | ||
91 | 0 | 13 | ||
92 | === added file 'charm-helpers-tests.yaml' | |||
93 | --- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000 | |||
94 | +++ charm-helpers-tests.yaml 2014-09-16 09:08:32 +0000 | |||
95 | @@ -0,0 +1,5 @@ | |||
96 | 1 | branch: lp:charm-helpers | ||
97 | 2 | destination: tests/charmhelpers | ||
98 | 3 | include: | ||
99 | 4 | - contrib.amulet | ||
100 | 5 | - contrib.openstack.amulet | ||
101 | 0 | 6 | ||
102 | === renamed file 'charm-helpers.yaml' => 'charm-helpers.yaml.THIS' | |||
103 | === modified file 'config.yaml' | |||
104 | --- config.yaml 2014-09-09 23:43:43 +0000 | |||
105 | +++ config.yaml 2014-09-16 09:08:32 +0000 | |||
106 | @@ -97,6 +97,7 @@ | |||
107 | 97 | # HA configuration settings | 97 | # HA configuration settings |
108 | 98 | vip: | 98 | vip: |
109 | 99 | type: string | 99 | type: string |
110 | 100 | <<<<<<< TREE | ||
111 | 100 | default: | 101 | default: |
112 | 101 | description: "Virtual IP to use to front API services in ha configuration" | 102 | description: "Virtual IP to use to front API services in ha configuration" |
113 | 102 | vip_iface: | 103 | vip_iface: |
114 | @@ -107,6 +108,13 @@ | |||
115 | 107 | type: int | 108 | type: int |
116 | 108 | default: 24 | 109 | default: 24 |
117 | 109 | description: "Netmask that will be used for the Virtual IP" | 110 | description: "Netmask that will be used for the Virtual IP" |
118 | 111 | ======= | ||
119 | 112 | description: | | ||
120 | 113 | Virtual IP(s) to use to front API services in HA configuration. | ||
121 | 114 | . | ||
122 | 115 | If multiple networks are being used, a VIP should be provided for each | ||
123 | 116 | network, separated by spaces. | ||
124 | 117 | >>>>>>> MERGE-SOURCE | ||
125 | 110 | ha-bindiface: | 118 | ha-bindiface: |
126 | 111 | type: string | 119 | type: string |
127 | 112 | default: eth0 | 120 | default: eth0 |
128 | @@ -145,8 +153,12 @@ | |||
129 | 145 | # Neutron NVP and VMware NSX plugin configuration | 153 | # Neutron NVP and VMware NSX plugin configuration |
130 | 146 | nvp-controllers: | 154 | nvp-controllers: |
131 | 147 | type: string | 155 | type: string |
132 | 156 | <<<<<<< TREE | ||
133 | 148 | default: | 157 | default: |
134 | 149 | description: Space delimited addresses of NVP/NSX controllers | 158 | description: Space delimited addresses of NVP/NSX controllers |
135 | 159 | ======= | ||
136 | 160 | description: Space delimited addresses of NVP/NSX controllers | ||
137 | 161 | >>>>>>> MERGE-SOURCE | ||
138 | 150 | nvp-username: | 162 | nvp-username: |
139 | 151 | type: string | 163 | type: string |
140 | 152 | default: admin | 164 | default: admin |
141 | @@ -168,6 +180,7 @@ | |||
142 | 168 | in NVP before starting Quantum with the nvp plugin. | 180 | in NVP before starting Quantum with the nvp plugin. |
143 | 169 | nvp-l3-uuid: | 181 | nvp-l3-uuid: |
144 | 170 | type: string | 182 | type: string |
145 | 183 | <<<<<<< TREE | ||
146 | 171 | default: | 184 | default: |
147 | 172 | description: | | 185 | description: | |
148 | 173 | This is uuid of the default NVP/NSX L3 Gateway Service. | 186 | This is uuid of the default NVP/NSX L3 Gateway Service. |
149 | @@ -191,3 +204,90 @@ | |||
150 | 191 | * shared-db or (pgsql-nova-db, pgsql-neutron-db) | 204 | * shared-db or (pgsql-nova-db, pgsql-neutron-db) |
151 | 192 | * amqp | 205 | * amqp |
152 | 193 | * identity-service | 206 | * identity-service |
153 | 207 | ======= | ||
154 | 208 | description: | | ||
155 | 209 | This is uuid of the default NVP/NSX L3 Gateway Service. | ||
156 | 210 | # end of NVP/NSX configuration | ||
157 | 211 | # Network configuration options | ||
158 | 212 | # by default all access is over 'private-address' | ||
159 | 213 | os-admin-network: | ||
160 | 214 | type: string | ||
161 | 215 | description: | | ||
162 | 216 | The IP address and netmask of the OpenStack Admin network (e.g., | ||
163 | 217 | 192.168.0.0/24) | ||
164 | 218 | . | ||
165 | 219 | This network will be used for admin endpoints. | ||
166 | 220 | os-internal-network: | ||
167 | 221 | type: string | ||
168 | 222 | description: | | ||
169 | 223 | The IP address and netmask of the OpenStack Internal network (e.g., | ||
170 | 224 | 192.168.0.0/24) | ||
171 | 225 | . | ||
172 | 226 | This network will be used for internal endpoints. | ||
173 | 227 | os-public-network: | ||
174 | 228 | type: string | ||
175 | 229 | description: | | ||
176 | 230 | The IP address and netmask of the OpenStack Public network (e.g., | ||
177 | 231 | 192.168.0.0/24) | ||
178 | 232 | . | ||
179 | 233 | This network will be used for public endpoints. | ||
180 | 234 | service-guard: | ||
181 | 235 | type: boolean | ||
182 | 236 | default: false | ||
183 | 237 | description: | | ||
184 | 238 | Ensure required relations are made and complete before allowing services | ||
185 | 239 | to be started | ||
186 | 240 | . | ||
187 | 241 | By default, services may be up and accepting API request from install | ||
188 | 242 | onwards. | ||
189 | 243 | . | ||
190 | 244 | Enabling this flag ensures that services will not be started until the | ||
191 | 245 | minimum 'core relations' have been made between this charm and other | ||
192 | 246 | charms. | ||
193 | 247 | . | ||
194 | 248 | For this charm the following relations must be made: | ||
195 | 249 | . | ||
196 | 250 | * shared-db or (pgsql-nova-db, pgsql-neutron-db) | ||
197 | 251 | * amqp | ||
198 | 252 | * identity-service | ||
199 | 253 | console-access-protocol: | ||
200 | 254 | type: string | ||
201 | 255 | description: | | ||
202 | 256 | Protocol to use when accessing virtual machine console. Supported types | ||
203 | 257 | are None, spice, xvpvnc, novnc and vnc (for both xvpvnc and novnc) | ||
204 | 258 | console-proxy-ip: | ||
205 | 259 | type: string | ||
206 | 260 | default: local | ||
207 | 261 | description: | | ||
208 | 262 | If console-access-protocol != None then this is the ip published to | ||
209 | 263 | clients for access to console proxy. Set to local for the ip address of | ||
210 | 264 | the nova-cloud-controller serving the request to be used | ||
211 | 265 | console-keymap: | ||
212 | 266 | type: string | ||
213 | 267 | default: 'en-us' | ||
214 | 268 | description: | | ||
215 | 269 | Console keymap | ||
216 | 270 | worker-multiplier: | ||
217 | 271 | type: int | ||
218 | 272 | default: 2 | ||
219 | 273 | description: | | ||
220 | 274 | The CPU core multiplier to use when configuring worker processes for | ||
221 | 275 | Nova and Neutron. By default, the number of workers for each daemon | ||
222 | 276 | is set to twice the number of CPU cores a service unit has. | ||
223 | 277 | cpu-allocation-ratio: | ||
224 | 278 | type: float | ||
225 | 279 | default: 16.0 | ||
226 | 280 | description: | | ||
227 | 281 | The per physical core -> virtual core ratio to use in the Nova scheduler. | ||
228 | 282 | . | ||
229 | 283 | Increasing this value will increase instance density on compute nodes | ||
230 | 284 | at the expense of instance performance. | ||
231 | 285 | ram-allocation-ratio: | ||
232 | 286 | type: float | ||
233 | 287 | default: 1.5 | ||
234 | 288 | description: | | ||
235 | 289 | The physical ram -> virtual ram ratio to use in the Nova scheduler. | ||
236 | 290 | . | ||
237 | 291 | Increasing this value will increase instance density on compute nodes | ||
238 | 292 | at the potential expense of instance performance. | ||
239 | 293 | >>>>>>> MERGE-SOURCE | ||
240 | 194 | 294 | ||
241 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
242 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-02-17 12:10:27 +0000 | |||
243 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-09-16 09:08:32 +0000 | |||
244 | @@ -146,12 +146,12 @@ | |||
245 | 146 | Obtains all relevant configuration from charm configuration required | 146 | Obtains all relevant configuration from charm configuration required |
246 | 147 | for initiating a relation to hacluster: | 147 | for initiating a relation to hacluster: |
247 | 148 | 148 | ||
249 | 149 | ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr | 149 | ha-bindiface, ha-mcastport, vip |
250 | 150 | 150 | ||
251 | 151 | returns: dict: A dict containing settings keyed by setting name. | 151 | returns: dict: A dict containing settings keyed by setting name. |
252 | 152 | raises: HAIncompleteConfig if settings are missing. | 152 | raises: HAIncompleteConfig if settings are missing. |
253 | 153 | ''' | 153 | ''' |
255 | 154 | settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] | 154 | settings = ['ha-bindiface', 'ha-mcastport', 'vip'] |
256 | 155 | conf = {} | 155 | conf = {} |
257 | 156 | for setting in settings: | 156 | for setting in settings: |
258 | 157 | conf[setting] = config_get(setting) | 157 | conf[setting] = config_get(setting) |
259 | @@ -170,6 +170,7 @@ | |||
260 | 170 | 170 | ||
261 | 171 | :configs : OSTemplateRenderer: A config tempating object to inspect for | 171 | :configs : OSTemplateRenderer: A config tempating object to inspect for |
262 | 172 | a complete https context. | 172 | a complete https context. |
263 | 173 | |||
264 | 173 | :vip_setting: str: Setting in charm config that specifies | 174 | :vip_setting: str: Setting in charm config that specifies |
265 | 174 | VIP address. | 175 | VIP address. |
266 | 175 | ''' | 176 | ''' |
267 | 176 | 177 | ||
268 | === added directory 'hooks/charmhelpers/contrib/network' | |||
269 | === added file 'hooks/charmhelpers/contrib/network/__init__.py' | |||
270 | === added file 'hooks/charmhelpers/contrib/network/ip.py' | |||
271 | --- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000 | |||
272 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-09-16 09:08:32 +0000 | |||
273 | @@ -0,0 +1,174 @@ | |||
274 | 1 | import sys | ||
275 | 2 | |||
276 | 3 | from functools import partial | ||
277 | 4 | |||
278 | 5 | from charmhelpers.fetch import apt_install | ||
279 | 6 | from charmhelpers.core.hookenv import ( | ||
280 | 7 | ERROR, log, config, | ||
281 | 8 | ) | ||
282 | 9 | |||
283 | 10 | try: | ||
284 | 11 | import netifaces | ||
285 | 12 | except ImportError: | ||
286 | 13 | apt_install('python-netifaces') | ||
287 | 14 | import netifaces | ||
288 | 15 | |||
289 | 16 | try: | ||
290 | 17 | import netaddr | ||
291 | 18 | except ImportError: | ||
292 | 19 | apt_install('python-netaddr') | ||
293 | 20 | import netaddr | ||
294 | 21 | |||
295 | 22 | |||
296 | 23 | def _validate_cidr(network): | ||
297 | 24 | try: | ||
298 | 25 | netaddr.IPNetwork(network) | ||
299 | 26 | except (netaddr.core.AddrFormatError, ValueError): | ||
300 | 27 | raise ValueError("Network (%s) is not in CIDR presentation format" % | ||
301 | 28 | network) | ||
302 | 29 | |||
303 | 30 | |||
304 | 31 | def get_address_in_network(network, fallback=None, fatal=False): | ||
305 | 32 | """ | ||
306 | 33 | Get an IPv4 or IPv6 address within the network from the host. | ||
307 | 34 | |||
308 | 35 | :param network (str): CIDR presentation format. For example, | ||
309 | 36 | '192.168.1.0/24'. | ||
310 | 37 | :param fallback (str): If no address is found, return fallback. | ||
311 | 38 | :param fatal (boolean): If no address is found, fallback is not | ||
312 | 39 | set and fatal is True then exit(1). | ||
313 | 40 | |||
314 | 41 | """ | ||
315 | 42 | |||
316 | 43 | def not_found_error_out(): | ||
317 | 44 | log("No IP address found in network: %s" % network, | ||
318 | 45 | level=ERROR) | ||
319 | 46 | sys.exit(1) | ||
320 | 47 | |||
321 | 48 | if network is None: | ||
322 | 49 | if fallback is not None: | ||
323 | 50 | return fallback | ||
324 | 51 | else: | ||
325 | 52 | if fatal: | ||
326 | 53 | not_found_error_out() | ||
327 | 54 | |||
328 | 55 | _validate_cidr(network) | ||
329 | 56 | network = netaddr.IPNetwork(network) | ||
330 | 57 | for iface in netifaces.interfaces(): | ||
331 | 58 | addresses = netifaces.ifaddresses(iface) | ||
332 | 59 | if network.version == 4 and netifaces.AF_INET in addresses: | ||
333 | 60 | addr = addresses[netifaces.AF_INET][0]['addr'] | ||
334 | 61 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | ||
335 | 62 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | ||
336 | 63 | if cidr in network: | ||
337 | 64 | return str(cidr.ip) | ||
338 | 65 | if network.version == 6 and netifaces.AF_INET6 in addresses: | ||
339 | 66 | for addr in addresses[netifaces.AF_INET6]: | ||
340 | 67 | if not addr['addr'].startswith('fe80'): | ||
341 | 68 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | ||
342 | 69 | addr['netmask'])) | ||
343 | 70 | if cidr in network: | ||
344 | 71 | return str(cidr.ip) | ||
345 | 72 | |||
346 | 73 | if fallback is not None: | ||
347 | 74 | return fallback | ||
348 | 75 | |||
349 | 76 | if fatal: | ||
350 | 77 | not_found_error_out() | ||
351 | 78 | |||
352 | 79 | return None | ||
353 | 80 | |||
354 | 81 | |||
355 | 82 | def is_ipv6(address): | ||
356 | 83 | '''Determine whether provided address is IPv6 or not''' | ||
357 | 84 | try: | ||
358 | 85 | address = netaddr.IPAddress(address) | ||
359 | 86 | except netaddr.AddrFormatError: | ||
360 | 87 | # probably a hostname - so not an address at all! | ||
361 | 88 | return False | ||
362 | 89 | else: | ||
363 | 90 | return address.version == 6 | ||
364 | 91 | |||
365 | 92 | |||
366 | 93 | def is_address_in_network(network, address): | ||
367 | 94 | """ | ||
368 | 95 | Determine whether the provided address is within a network range. | ||
369 | 96 | |||
370 | 97 | :param network (str): CIDR presentation format. For example, | ||
371 | 98 | '192.168.1.0/24'. | ||
372 | 99 | :param address: An individual IPv4 or IPv6 address without a net | ||
373 | 100 | mask or subnet prefix. For example, '192.168.1.1'. | ||
374 | 101 | :returns boolean: Flag indicating whether address is in network. | ||
375 | 102 | """ | ||
376 | 103 | try: | ||
377 | 104 | network = netaddr.IPNetwork(network) | ||
378 | 105 | except (netaddr.core.AddrFormatError, ValueError): | ||
379 | 106 | raise ValueError("Network (%s) is not in CIDR presentation format" % | ||
380 | 107 | network) | ||
381 | 108 | try: | ||
382 | 109 | address = netaddr.IPAddress(address) | ||
383 | 110 | except (netaddr.core.AddrFormatError, ValueError): | ||
384 | 111 | raise ValueError("Address (%s) is not in correct presentation format" % | ||
385 | 112 | address) | ||
386 | 113 | if address in network: | ||
387 | 114 | return True | ||
388 | 115 | else: | ||
389 | 116 | return False | ||
390 | 117 | |||
391 | 118 | |||
392 | 119 | def _get_for_address(address, key): | ||
393 | 120 | """Retrieve an attribute of or the physical interface that | ||
394 | 121 | the IP address provided could be bound to. | ||
395 | 122 | |||
396 | 123 | :param address (str): An individual IPv4 or IPv6 address without a net | ||
397 | 124 | mask or subnet prefix. For example, '192.168.1.1'. | ||
398 | 125 | :param key: 'iface' for the physical interface name or an attribute | ||
399 | 126 | of the configured interface, for example 'netmask'. | ||
400 | 127 | :returns str: Requested attribute or None if address is not bindable. | ||
401 | 128 | """ | ||
402 | 129 | address = netaddr.IPAddress(address) | ||
403 | 130 | for iface in netifaces.interfaces(): | ||
404 | 131 | addresses = netifaces.ifaddresses(iface) | ||
405 | 132 | if address.version == 4 and netifaces.AF_INET in addresses: | ||
406 | 133 | addr = addresses[netifaces.AF_INET][0]['addr'] | ||
407 | 134 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | ||
408 | 135 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | ||
409 | 136 | if address in cidr: | ||
410 | 137 | if key == 'iface': | ||
411 | 138 | return iface | ||
412 | 139 | else: | ||
413 | 140 | return addresses[netifaces.AF_INET][0][key] | ||
414 | 141 | if address.version == 6 and netifaces.AF_INET6 in addresses: | ||
415 | 142 | for addr in addresses[netifaces.AF_INET6]: | ||
416 | 143 | if not addr['addr'].startswith('fe80'): | ||
417 | 144 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | ||
418 | 145 | addr['netmask'])) | ||
419 | 146 | if address in cidr: | ||
420 | 147 | if key == 'iface': | ||
421 | 148 | return iface | ||
422 | 149 | else: | ||
423 | 150 | return addr[key] | ||
424 | 151 | return None | ||
425 | 152 | |||
426 | 153 | |||
427 | 154 | get_iface_for_address = partial(_get_for_address, key='iface') | ||
428 | 155 | |||
429 | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') | ||
430 | 157 | |||
431 | 158 | |||
432 | 159 | def get_ipv6_addr(iface="eth0"): | ||
433 | 160 | try: | ||
434 | 161 | iface_addrs = netifaces.ifaddresses(iface) | ||
435 | 162 | if netifaces.AF_INET6 not in iface_addrs: | ||
436 | 163 | raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) | ||
437 | 164 | |||
438 | 165 | addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] | ||
439 | 166 | ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') | ||
440 | 167 | and config('vip') != a['addr']] | ||
441 | 168 | if not ipv6_addr: | ||
442 | 169 | raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) | ||
443 | 170 | |||
444 | 171 | return ipv6_addr[0] | ||
445 | 172 | |||
446 | 173 | except ValueError: | ||
447 | 174 | raise ValueError("Invalid interface '%s'" % iface) | ||
448 | 0 | 175 | ||
449 | === added directory 'hooks/charmhelpers/contrib/openstack/amulet' | |||
450 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
451 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
452 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
453 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-16 09:08:32 +0000 | |||
454 | @@ -0,0 +1,61 @@ | |||
455 | 1 | from charmhelpers.contrib.amulet.deployment import ( | ||
456 | 2 | AmuletDeployment | ||
457 | 3 | ) | ||
458 | 4 | |||
459 | 5 | |||
460 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
461 | 7 | """OpenStack amulet deployment. | ||
462 | 8 | |||
463 | 9 | This class inherits from AmuletDeployment and has additional support | ||
464 | 10 | that is specifically for use by OpenStack charms. | ||
465 | 11 | """ | ||
466 | 12 | |||
467 | 13 | def __init__(self, series=None, openstack=None, source=None): | ||
468 | 14 | """Initialize the deployment environment.""" | ||
469 | 15 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
470 | 16 | self.openstack = openstack | ||
471 | 17 | self.source = source | ||
472 | 18 | |||
473 | 19 | def _add_services(self, this_service, other_services): | ||
474 | 20 | """Add services to the deployment and set openstack-origin.""" | ||
475 | 21 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
476 | 22 | other_services) | ||
477 | 23 | name = 0 | ||
478 | 24 | services = other_services | ||
479 | 25 | services.append(this_service) | ||
480 | 26 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
481 | 27 | |||
482 | 28 | if self.openstack: | ||
483 | 29 | for svc in services: | ||
484 | 30 | if svc[name] not in use_source: | ||
485 | 31 | config = {'openstack-origin': self.openstack} | ||
486 | 32 | self.d.configure(svc[name], config) | ||
487 | 33 | |||
488 | 34 | if self.source: | ||
489 | 35 | for svc in services: | ||
490 | 36 | if svc[name] in use_source: | ||
491 | 37 | config = {'source': self.source} | ||
492 | 38 | self.d.configure(svc[name], config) | ||
493 | 39 | |||
494 | 40 | def _configure_services(self, configs): | ||
495 | 41 | """Configure all of the services.""" | ||
496 | 42 | for service, config in configs.iteritems(): | ||
497 | 43 | self.d.configure(service, config) | ||
498 | 44 | |||
499 | 45 | def _get_openstack_release(self): | ||
500 | 46 | """Get openstack release. | ||
501 | 47 | |||
502 | 48 | Return an integer representing the enum value of the openstack | ||
503 | 49 | release. | ||
504 | 50 | """ | ||
505 | 51 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, | ||
506 | 52 | self.precise_havana, self.precise_icehouse, | ||
507 | 53 | self.trusty_icehouse) = range(6) | ||
508 | 54 | releases = { | ||
509 | 55 | ('precise', None): self.precise_essex, | ||
510 | 56 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
511 | 57 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
512 | 58 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
513 | 59 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
514 | 60 | ('trusty', None): self.trusty_icehouse} | ||
515 | 61 | return releases[(self.series, self.openstack)] | ||
516 | 0 | 62 | ||
517 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
518 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
519 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-16 09:08:32 +0000 | |||
520 | @@ -0,0 +1,275 @@ | |||
521 | 1 | import logging | ||
522 | 2 | import os | ||
523 | 3 | import time | ||
524 | 4 | import urllib | ||
525 | 5 | |||
526 | 6 | import glanceclient.v1.client as glance_client | ||
527 | 7 | import keystoneclient.v2_0 as keystone_client | ||
528 | 8 | import novaclient.v1_1.client as nova_client | ||
529 | 9 | |||
530 | 10 | from charmhelpers.contrib.amulet.utils import ( | ||
531 | 11 | AmuletUtils | ||
532 | 12 | ) | ||
533 | 13 | |||
534 | 14 | DEBUG = logging.DEBUG | ||
535 | 15 | ERROR = logging.ERROR | ||
536 | 16 | |||
537 | 17 | |||
538 | 18 | class OpenStackAmuletUtils(AmuletUtils): | ||
539 | 19 | """OpenStack amulet utilities. | ||
540 | 20 | |||
541 | 21 | This class inherits from AmuletUtils and has additional support | ||
542 | 22 | that is specifically for use by OpenStack charms. | ||
543 | 23 | """ | ||
544 | 24 | |||
545 | 25 | def __init__(self, log_level=ERROR): | ||
546 | 26 | """Initialize the deployment environment.""" | ||
547 | 27 | super(OpenStackAmuletUtils, self).__init__(log_level) | ||
548 | 28 | |||
549 | 29 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | ||
550 | 30 | public_port, expected): | ||
551 | 31 | """Validate endpoint data. | ||
552 | 32 | |||
553 | 33 | Validate actual endpoint data vs expected endpoint data. The ports | ||
554 | 34 | are used to find the matching endpoint. | ||
555 | 35 | """ | ||
556 | 36 | found = False | ||
557 | 37 | for ep in endpoints: | ||
558 | 38 | self.log.debug('endpoint: {}'.format(repr(ep))) | ||
559 | 39 | if (admin_port in ep.adminurl and | ||
560 | 40 | internal_port in ep.internalurl and | ||
561 | 41 | public_port in ep.publicurl): | ||
562 | 42 | found = True | ||
563 | 43 | actual = {'id': ep.id, | ||
564 | 44 | 'region': ep.region, | ||
565 | 45 | 'adminurl': ep.adminurl, | ||
566 | 46 | 'internalurl': ep.internalurl, | ||
567 | 47 | 'publicurl': ep.publicurl, | ||
568 | 48 | 'service_id': ep.service_id} | ||
569 | 49 | ret = self._validate_dict_data(expected, actual) | ||
570 | 50 | if ret: | ||
571 | 51 | return 'unexpected endpoint data - {}'.format(ret) | ||
572 | 52 | |||
573 | 53 | if not found: | ||
574 | 54 | return 'endpoint not found' | ||
575 | 55 | |||
576 | 56 | def validate_svc_catalog_endpoint_data(self, expected, actual): | ||
577 | 57 | """Validate service catalog endpoint data. | ||
578 | 58 | |||
579 | 59 | Validate a list of actual service catalog endpoints vs a list of | ||
580 | 60 | expected service catalog endpoints. | ||
581 | 61 | """ | ||
582 | 62 | self.log.debug('actual: {}'.format(repr(actual))) | ||
583 | 63 | for k, v in expected.iteritems(): | ||
584 | 64 | if k in actual: | ||
585 | 65 | ret = self._validate_dict_data(expected[k][0], actual[k][0]) | ||
586 | 66 | if ret: | ||
587 | 67 | return self.endpoint_error(k, ret) | ||
588 | 68 | else: | ||
589 | 69 | return "endpoint {} does not exist".format(k) | ||
590 | 70 | return ret | ||
591 | 71 | |||
592 | 72 | def validate_tenant_data(self, expected, actual): | ||
593 | 73 | """Validate tenant data. | ||
594 | 74 | |||
595 | 75 | Validate a list of actual tenant data vs list of expected tenant | ||
596 | 76 | data. | ||
597 | 77 | """ | ||
598 | 78 | self.log.debug('actual: {}'.format(repr(actual))) | ||
599 | 79 | for e in expected: | ||
600 | 80 | found = False | ||
601 | 81 | for act in actual: | ||
602 | 82 | a = {'enabled': act.enabled, 'description': act.description, | ||
603 | 83 | 'name': act.name, 'id': act.id} | ||
604 | 84 | if e['name'] == a['name']: | ||
605 | 85 | found = True | ||
606 | 86 | ret = self._validate_dict_data(e, a) | ||
607 | 87 | if ret: | ||
608 | 88 | return "unexpected tenant data - {}".format(ret) | ||
609 | 89 | if not found: | ||
610 | 90 | return "tenant {} does not exist".format(e['name']) | ||
611 | 91 | return ret | ||
612 | 92 | |||
613 | 93 | def validate_role_data(self, expected, actual): | ||
614 | 94 | """Validate role data. | ||
615 | 95 | |||
616 | 96 | Validate a list of actual role data vs a list of expected role | ||
617 | 97 | data. | ||
618 | 98 | """ | ||
619 | 99 | self.log.debug('actual: {}'.format(repr(actual))) | ||
620 | 100 | for e in expected: | ||
621 | 101 | found = False | ||
622 | 102 | for act in actual: | ||
623 | 103 | a = {'name': act.name, 'id': act.id} | ||
624 | 104 | if e['name'] == a['name']: | ||
625 | 105 | found = True | ||
626 | 106 | ret = self._validate_dict_data(e, a) | ||
627 | 107 | if ret: | ||
628 | 108 | return "unexpected role data - {}".format(ret) | ||
629 | 109 | if not found: | ||
630 | 110 | return "role {} does not exist".format(e['name']) | ||
631 | 111 | return ret | ||
632 | 112 | |||
633 | 113 | def validate_user_data(self, expected, actual): | ||
634 | 114 | """Validate user data. | ||
635 | 115 | |||
636 | 116 | Validate a list of actual user data vs a list of expected user | ||
637 | 117 | data. | ||
638 | 118 | """ | ||
639 | 119 | self.log.debug('actual: {}'.format(repr(actual))) | ||
640 | 120 | for e in expected: | ||
641 | 121 | found = False | ||
642 | 122 | for act in actual: | ||
643 | 123 | a = {'enabled': act.enabled, 'name': act.name, | ||
644 | 124 | 'email': act.email, 'tenantId': act.tenantId, | ||
645 | 125 | 'id': act.id} | ||
646 | 126 | if e['name'] == a['name']: | ||
647 | 127 | found = True | ||
648 | 128 | ret = self._validate_dict_data(e, a) | ||
649 | 129 | if ret: | ||
650 | 130 | return "unexpected user data - {}".format(ret) | ||
651 | 131 | if not found: | ||
652 | 132 | return "user {} does not exist".format(e['name']) | ||
653 | 133 | return ret | ||
654 | 134 | |||
655 | 135 | def validate_flavor_data(self, expected, actual): | ||
656 | 136 | """Validate flavor data. | ||
657 | 137 | |||
658 | 138 | Validate a list of actual flavors vs a list of expected flavors. | ||
659 | 139 | """ | ||
660 | 140 | self.log.debug('actual: {}'.format(repr(actual))) | ||
661 | 141 | act = [a.name for a in actual] | ||
662 | 142 | return self._validate_list_data(expected, act) | ||
663 | 143 | |||
664 | 144 | def tenant_exists(self, keystone, tenant): | ||
665 | 145 | """Return True if tenant exists.""" | ||
666 | 146 | return tenant in [t.name for t in keystone.tenants.list()] | ||
667 | 147 | |||
668 | 148 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | ||
669 | 149 | tenant): | ||
670 | 150 | """Authenticates admin user with the keystone admin endpoint.""" | ||
671 | 151 | unit = keystone_sentry | ||
672 | 152 | service_ip = unit.relation('shared-db', | ||
673 | 153 | 'mysql:shared-db')['private-address'] | ||
674 | 154 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | ||
675 | 155 | return keystone_client.Client(username=user, password=password, | ||
676 | 156 | tenant_name=tenant, auth_url=ep) | ||
677 | 157 | |||
678 | 158 | def authenticate_keystone_user(self, keystone, user, password, tenant): | ||
679 | 159 | """Authenticates a regular user with the keystone public endpoint.""" | ||
680 | 160 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
681 | 161 | endpoint_type='publicURL') | ||
682 | 162 | return keystone_client.Client(username=user, password=password, | ||
683 | 163 | tenant_name=tenant, auth_url=ep) | ||
684 | 164 | |||
685 | 165 | def authenticate_glance_admin(self, keystone): | ||
686 | 166 | """Authenticates admin user with glance.""" | ||
687 | 167 | ep = keystone.service_catalog.url_for(service_type='image', | ||
688 | 168 | endpoint_type='adminURL') | ||
689 | 169 | return glance_client.Client(ep, token=keystone.auth_token) | ||
690 | 170 | |||
691 | 171 | def authenticate_nova_user(self, keystone, user, password, tenant): | ||
692 | 172 | """Authenticates a regular user with nova-api.""" | ||
693 | 173 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
694 | 174 | endpoint_type='publicURL') | ||
695 | 175 | return nova_client.Client(username=user, api_key=password, | ||
696 | 176 | project_id=tenant, auth_url=ep) | ||
697 | 177 | |||
698 | 178 | def create_cirros_image(self, glance, image_name): | ||
699 | 179 | """Download the latest cirros image and upload it to glance.""" | ||
700 | 180 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
701 | 181 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
702 | 182 | if http_proxy: | ||
703 | 183 | proxies = {'http': http_proxy} | ||
704 | 184 | opener = urllib.FancyURLopener(proxies) | ||
705 | 185 | else: | ||
706 | 186 | opener = urllib.FancyURLopener() | ||
707 | 187 | |||
708 | 188 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
709 | 189 | version = f.read().strip() | ||
710 | 190 | cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) | ||
711 | 191 | |||
712 | 192 | if not os.path.exists(cirros_img): | ||
713 | 193 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
714 | 194 | version, cirros_img) | ||
715 | 195 | opener.retrieve(cirros_url, cirros_img) | ||
716 | 196 | f.close() | ||
717 | 197 | |||
718 | 198 | with open(cirros_img) as f: | ||
719 | 199 | image = glance.images.create(name=image_name, is_public=True, | ||
720 | 200 | disk_format='qcow2', | ||
721 | 201 | container_format='bare', data=f) | ||
722 | 202 | count = 1 | ||
723 | 203 | status = image.status | ||
724 | 204 | while status != 'active' and count < 10: | ||
725 | 205 | time.sleep(3) | ||
726 | 206 | image = glance.images.get(image.id) | ||
727 | 207 | status = image.status | ||
728 | 208 | self.log.debug('image status: {}'.format(status)) | ||
729 | 209 | count += 1 | ||
730 | 210 | |||
731 | 211 | if status != 'active': | ||
732 | 212 | self.log.error('image creation timed out') | ||
733 | 213 | return None | ||
734 | 214 | |||
735 | 215 | return image | ||
736 | 216 | |||
737 | 217 | def delete_image(self, glance, image): | ||
738 | 218 | """Delete the specified image.""" | ||
739 | 219 | num_before = len(list(glance.images.list())) | ||
740 | 220 | glance.images.delete(image) | ||
741 | 221 | |||
742 | 222 | count = 1 | ||
743 | 223 | num_after = len(list(glance.images.list())) | ||
744 | 224 | while num_after != (num_before - 1) and count < 10: | ||
745 | 225 | time.sleep(3) | ||
746 | 226 | num_after = len(list(glance.images.list())) | ||
747 | 227 | self.log.debug('number of images: {}'.format(num_after)) | ||
748 | 228 | count += 1 | ||
749 | 229 | |||
750 | 230 | if num_after != (num_before - 1): | ||
751 | 231 | self.log.error('image deletion timed out') | ||
752 | 232 | return False | ||
753 | 233 | |||
754 | 234 | return True | ||
755 | 235 | |||
756 | 236 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
757 | 237 | """Create the specified instance.""" | ||
758 | 238 | image = nova.images.find(name=image_name) | ||
759 | 239 | flavor = nova.flavors.find(name=flavor) | ||
760 | 240 | instance = nova.servers.create(name=instance_name, image=image, | ||
761 | 241 | flavor=flavor) | ||
762 | 242 | |||
763 | 243 | count = 1 | ||
764 | 244 | status = instance.status | ||
765 | 245 | while status != 'ACTIVE' and count < 60: | ||
766 | 246 | time.sleep(3) | ||
767 | 247 | instance = nova.servers.get(instance.id) | ||
768 | 248 | status = instance.status | ||
769 | 249 | self.log.debug('instance status: {}'.format(status)) | ||
770 | 250 | count += 1 | ||
771 | 251 | |||
772 | 252 | if status != 'ACTIVE': | ||
773 | 253 | self.log.error('instance creation timed out') | ||
774 | 254 | return None | ||
775 | 255 | |||
776 | 256 | return instance | ||
777 | 257 | |||
778 | 258 | def delete_instance(self, nova, instance): | ||
779 | 259 | """Delete the specified instance.""" | ||
780 | 260 | num_before = len(list(nova.servers.list())) | ||
781 | 261 | nova.servers.delete(instance) | ||
782 | 262 | |||
783 | 263 | count = 1 | ||
784 | 264 | num_after = len(list(nova.servers.list())) | ||
785 | 265 | while num_after != (num_before - 1) and count < 10: | ||
786 | 266 | time.sleep(3) | ||
787 | 267 | num_after = len(list(nova.servers.list())) | ||
788 | 268 | self.log.debug('number of instances: {}'.format(num_after)) | ||
789 | 269 | count += 1 | ||
790 | 270 | |||
791 | 271 | if num_after != (num_before - 1): | ||
792 | 272 | self.log.error('instance deletion timed out') | ||
793 | 273 | return False | ||
794 | 274 | |||
795 | 275 | return True | ||
796 | 0 | 276 | ||
797 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
798 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 15:53:46 +0000 | |||
799 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-09-16 09:08:32 +0000 | |||
800 | @@ -21,6 +21,7 @@ | |||
801 | 21 | relation_get, | 21 | relation_get, |
802 | 22 | relation_ids, | 22 | relation_ids, |
803 | 23 | related_units, | 23 | related_units, |
804 | 24 | relation_set, | ||
805 | 24 | unit_get, | 25 | unit_get, |
806 | 25 | unit_private_ip, | 26 | unit_private_ip, |
807 | 26 | ERROR, | 27 | ERROR, |
808 | @@ -43,6 +44,11 @@ | |||
809 | 43 | neutron_plugin_attribute, | 44 | neutron_plugin_attribute, |
810 | 44 | ) | 45 | ) |
811 | 45 | 46 | ||
812 | 47 | from charmhelpers.contrib.network.ip import ( | ||
813 | 48 | get_address_in_network, | ||
814 | 49 | get_ipv6_addr, | ||
815 | 50 | ) | ||
816 | 51 | |||
817 | 46 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 52 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
818 | 47 | 53 | ||
819 | 48 | 54 | ||
820 | @@ -135,8 +141,26 @@ | |||
821 | 135 | 'Missing required charm config options. ' | 141 | 'Missing required charm config options. ' |
822 | 136 | '(database name and user)') | 142 | '(database name and user)') |
823 | 137 | raise OSContextError | 143 | raise OSContextError |
824 | 144 | |||
825 | 138 | ctxt = {} | 145 | ctxt = {} |
826 | 139 | 146 | ||
827 | 147 | # NOTE(jamespage) if mysql charm provides a network upon which | ||
828 | 148 | # access to the database should be made, reconfigure relation | ||
829 | 149 | # with the service units local address and defer execution | ||
830 | 150 | access_network = relation_get('access-network') | ||
831 | 151 | if access_network is not None: | ||
832 | 152 | if self.relation_prefix is not None: | ||
833 | 153 | hostname_key = "{}_hostname".format(self.relation_prefix) | ||
834 | 154 | else: | ||
835 | 155 | hostname_key = "hostname" | ||
836 | 156 | access_hostname = get_address_in_network(access_network, | ||
837 | 157 | unit_get('private-address')) | ||
838 | 158 | set_hostname = relation_get(attribute=hostname_key, | ||
839 | 159 | unit=local_unit()) | ||
840 | 160 | if set_hostname != access_hostname: | ||
841 | 161 | relation_set(relation_settings={hostname_key: access_hostname}) | ||
842 | 162 | return ctxt # Defer any further hook execution for now.... | ||
843 | 163 | |||
844 | 140 | password_setting = 'password' | 164 | password_setting = 'password' |
845 | 141 | if self.relation_prefix: | 165 | if self.relation_prefix: |
846 | 142 | password_setting = self.relation_prefix + '_password' | 166 | password_setting = self.relation_prefix + '_password' |
847 | @@ -244,23 +268,31 @@ | |||
848 | 244 | 268 | ||
849 | 245 | 269 | ||
850 | 246 | class AMQPContext(OSContextGenerator): | 270 | class AMQPContext(OSContextGenerator): |
851 | 247 | interfaces = ['amqp'] | ||
852 | 248 | 271 | ||
854 | 249 | def __init__(self, ssl_dir=None): | 272 | def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): |
855 | 250 | self.ssl_dir = ssl_dir | 273 | self.ssl_dir = ssl_dir |
856 | 274 | self.rel_name = rel_name | ||
857 | 275 | self.relation_prefix = relation_prefix | ||
858 | 276 | self.interfaces = [rel_name] | ||
859 | 251 | 277 | ||
860 | 252 | def __call__(self): | 278 | def __call__(self): |
861 | 253 | log('Generating template context for amqp') | 279 | log('Generating template context for amqp') |
862 | 254 | conf = config() | 280 | conf = config() |
863 | 281 | user_setting = 'rabbit-user' | ||
864 | 282 | vhost_setting = 'rabbit-vhost' | ||
865 | 283 | if self.relation_prefix: | ||
866 | 284 | user_setting = self.relation_prefix + '-rabbit-user' | ||
867 | 285 | vhost_setting = self.relation_prefix + '-rabbit-vhost' | ||
868 | 286 | |||
869 | 255 | try: | 287 | try: |
872 | 256 | username = conf['rabbit-user'] | 288 | username = conf[user_setting] |
873 | 257 | vhost = conf['rabbit-vhost'] | 289 | vhost = conf[vhost_setting] |
874 | 258 | except KeyError as e: | 290 | except KeyError as e: |
875 | 259 | log('Could not generate shared_db context. ' | 291 | log('Could not generate shared_db context. ' |
876 | 260 | 'Missing required charm config options: %s.' % e) | 292 | 'Missing required charm config options: %s.' % e) |
877 | 261 | raise OSContextError | 293 | raise OSContextError |
878 | 262 | ctxt = {} | 294 | ctxt = {} |
880 | 263 | for rid in relation_ids('amqp'): | 295 | for rid in relation_ids(self.rel_name): |
881 | 264 | ha_vip_only = False | 296 | ha_vip_only = False |
882 | 265 | for unit in related_units(rid): | 297 | for unit in related_units(rid): |
883 | 266 | if relation_get('clustered', rid=rid, unit=unit): | 298 | if relation_get('clustered', rid=rid, unit=unit): |
884 | @@ -333,10 +365,12 @@ | |||
885 | 333 | use_syslog = str(config('use-syslog')).lower() | 365 | use_syslog = str(config('use-syslog')).lower() |
886 | 334 | for rid in relation_ids('ceph'): | 366 | for rid in relation_ids('ceph'): |
887 | 335 | for unit in related_units(rid): | 367 | for unit in related_units(rid): |
888 | 336 | mon_hosts.append(relation_get('private-address', rid=rid, | ||
889 | 337 | unit=unit)) | ||
890 | 338 | auth = relation_get('auth', rid=rid, unit=unit) | 368 | auth = relation_get('auth', rid=rid, unit=unit) |
891 | 339 | key = relation_get('key', rid=rid, unit=unit) | 369 | key = relation_get('key', rid=rid, unit=unit) |
892 | 370 | ceph_addr = \ | ||
893 | 371 | relation_get('ceph-public-address', rid=rid, unit=unit) or \ | ||
894 | 372 | relation_get('private-address', rid=rid, unit=unit) | ||
895 | 373 | mon_hosts.append(ceph_addr) | ||
896 | 340 | 374 | ||
897 | 341 | ctxt = { | 375 | ctxt = { |
898 | 342 | 'mon_hosts': ' '.join(mon_hosts), | 376 | 'mon_hosts': ' '.join(mon_hosts), |
899 | @@ -370,7 +404,12 @@ | |||
900 | 370 | 404 | ||
901 | 371 | cluster_hosts = {} | 405 | cluster_hosts = {} |
902 | 372 | l_unit = local_unit().replace('/', '-') | 406 | l_unit = local_unit().replace('/', '-') |
904 | 373 | cluster_hosts[l_unit] = unit_get('private-address') | 407 | if config('prefer-ipv6'): |
905 | 408 | addr = get_ipv6_addr() | ||
906 | 409 | else: | ||
907 | 410 | addr = unit_get('private-address') | ||
908 | 411 | cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), | ||
909 | 412 | addr) | ||
910 | 374 | 413 | ||
911 | 375 | for rid in relation_ids('cluster'): | 414 | for rid in relation_ids('cluster'): |
912 | 376 | for unit in related_units(rid): | 415 | for unit in related_units(rid): |
913 | @@ -381,6 +420,16 @@ | |||
914 | 381 | ctxt = { | 420 | ctxt = { |
915 | 382 | 'units': cluster_hosts, | 421 | 'units': cluster_hosts, |
916 | 383 | } | 422 | } |
917 | 423 | |||
918 | 424 | if config('prefer-ipv6'): | ||
919 | 425 | ctxt['local_host'] = 'ip6-localhost' | ||
920 | 426 | ctxt['haproxy_host'] = '::' | ||
921 | 427 | ctxt['stat_port'] = ':::8888' | ||
922 | 428 | else: | ||
923 | 429 | ctxt['local_host'] = '127.0.0.1' | ||
924 | 430 | ctxt['haproxy_host'] = '0.0.0.0' | ||
925 | 431 | ctxt['stat_port'] = ':8888' | ||
926 | 432 | |||
927 | 384 | if len(cluster_hosts.keys()) > 1: | 433 | if len(cluster_hosts.keys()) > 1: |
928 | 385 | # Enable haproxy when we have enough peers. | 434 | # Enable haproxy when we have enough peers. |
929 | 386 | log('Ensuring haproxy enabled in /etc/default/haproxy.') | 435 | log('Ensuring haproxy enabled in /etc/default/haproxy.') |
930 | @@ -419,12 +468,13 @@ | |||
931 | 419 | """ | 468 | """ |
932 | 420 | Generates a context for an apache vhost configuration that configures | 469 | Generates a context for an apache vhost configuration that configures |
933 | 421 | HTTPS reverse proxying for one or many endpoints. Generated context | 470 | HTTPS reverse proxying for one or many endpoints. Generated context |
940 | 422 | looks something like: | 471 | looks something like:: |
941 | 423 | { | 472 | |
942 | 424 | 'namespace': 'cinder', | 473 | { |
943 | 425 | 'private_address': 'iscsi.mycinderhost.com', | 474 | 'namespace': 'cinder', |
944 | 426 | 'endpoints': [(8776, 8766), (8777, 8767)] | 475 | 'private_address': 'iscsi.mycinderhost.com', |
945 | 427 | } | 476 | 'endpoints': [(8776, 8766), (8777, 8767)] |
946 | 477 | } | ||
947 | 428 | 478 | ||
948 | 429 | The endpoints list consists of a tuples mapping external ports | 479 | The endpoints list consists of a tuples mapping external ports |
949 | 430 | to internal ports. | 480 | to internal ports. |
950 | @@ -542,6 +592,26 @@ | |||
951 | 542 | 592 | ||
952 | 543 | return nvp_ctxt | 593 | return nvp_ctxt |
953 | 544 | 594 | ||
954 | 595 | def n1kv_ctxt(self): | ||
955 | 596 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
956 | 597 | self.network_manager) | ||
957 | 598 | n1kv_config = neutron_plugin_attribute(self.plugin, 'config', | ||
958 | 599 | self.network_manager) | ||
959 | 600 | n1kv_ctxt = { | ||
960 | 601 | 'core_plugin': driver, | ||
961 | 602 | 'neutron_plugin': 'n1kv', | ||
962 | 603 | 'neutron_security_groups': self.neutron_security_groups, | ||
963 | 604 | 'local_ip': unit_private_ip(), | ||
964 | 605 | 'config': n1kv_config, | ||
965 | 606 | 'vsm_ip': config('n1kv-vsm-ip'), | ||
966 | 607 | 'vsm_username': config('n1kv-vsm-username'), | ||
967 | 608 | 'vsm_password': config('n1kv-vsm-password'), | ||
968 | 609 | 'restrict_policy_profiles': config( | ||
969 | 610 | 'n1kv_restrict_policy_profiles'), | ||
970 | 611 | } | ||
971 | 612 | |||
972 | 613 | return n1kv_ctxt | ||
973 | 614 | |||
974 | 545 | def neutron_ctxt(self): | 615 | def neutron_ctxt(self): |
975 | 546 | if https(): | 616 | if https(): |
976 | 547 | proto = 'https' | 617 | proto = 'https' |
977 | @@ -573,6 +643,8 @@ | |||
978 | 573 | ctxt.update(self.ovs_ctxt()) | 643 | ctxt.update(self.ovs_ctxt()) |
979 | 574 | elif self.plugin in ['nvp', 'nsx']: | 644 | elif self.plugin in ['nvp', 'nsx']: |
980 | 575 | ctxt.update(self.nvp_ctxt()) | 645 | ctxt.update(self.nvp_ctxt()) |
981 | 646 | elif self.plugin == 'n1kv': | ||
982 | 647 | ctxt.update(self.n1kv_ctxt()) | ||
983 | 576 | 648 | ||
984 | 577 | alchemy_flags = config('neutron-alchemy-flags') | 649 | alchemy_flags = config('neutron-alchemy-flags') |
985 | 578 | if alchemy_flags: | 650 | if alchemy_flags: |
986 | @@ -612,7 +684,7 @@ | |||
987 | 612 | The subordinate interface allows subordinates to export their | 684 | The subordinate interface allows subordinates to export their |
988 | 613 | configuration requirements to the principle for multiple config | 685 | configuration requirements to the principle for multiple config |
989 | 614 | files and multiple serivces. Ie, a subordinate that has interfaces | 686 | files and multiple serivces. Ie, a subordinate that has interfaces |
991 | 615 | to both glance and nova may export to following yaml blob as json: | 687 | to both glance and nova may export to following yaml blob as json:: |
992 | 616 | 688 | ||
993 | 617 | glance: | 689 | glance: |
994 | 618 | /etc/glance/glance-api.conf: | 690 | /etc/glance/glance-api.conf: |
995 | @@ -631,7 +703,8 @@ | |||
996 | 631 | 703 | ||
997 | 632 | It is then up to the principle charms to subscribe this context to | 704 | It is then up to the principle charms to subscribe this context to |
998 | 633 | the service+config file it is interestd in. Configuration data will | 705 | the service+config file it is interestd in. Configuration data will |
1000 | 634 | be available in the template context, in glance's case, as: | 706 | be available in the template context, in glance's case, as:: |
1001 | 707 | |||
1002 | 635 | ctxt = { | 708 | ctxt = { |
1003 | 636 | ... other context ... | 709 | ... other context ... |
1004 | 637 | 'subordinate_config': { | 710 | 'subordinate_config': { |
1005 | @@ -684,15 +757,38 @@ | |||
1006 | 684 | 757 | ||
1007 | 685 | sub_config = sub_config[self.config_file] | 758 | sub_config = sub_config[self.config_file] |
1008 | 686 | for k, v in sub_config.iteritems(): | 759 | for k, v in sub_config.iteritems(): |
1018 | 687 | if k == 'sections': | 760 | <<<<<<< TREE |
1019 | 688 | for section, config_dict in v.iteritems(): | 761 | if k == 'sections': |
1020 | 689 | log("adding section '%s'" % (section)) | 762 | for section, config_dict in v.iteritems(): |
1021 | 690 | ctxt[k][section] = config_dict | 763 | log("adding section '%s'" % (section)) |
1022 | 691 | else: | 764 | ctxt[k][section] = config_dict |
1023 | 692 | ctxt[k] = v | 765 | else: |
1024 | 693 | 766 | ctxt[k] = v | |
1025 | 694 | log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) | 767 | |
1026 | 695 | 768 | log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) | |
1027 | 769 | |||
1028 | 770 | ======= | ||
1029 | 771 | if k == 'sections': | ||
1030 | 772 | for section, config_dict in v.iteritems(): | ||
1031 | 773 | log("adding section '%s'" % (section)) | ||
1032 | 774 | ctxt[k][section] = config_dict | ||
1033 | 775 | else: | ||
1034 | 776 | ctxt[k] = v | ||
1035 | 777 | |||
1036 | 778 | log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) | ||
1037 | 779 | |||
1038 | 780 | return ctxt | ||
1039 | 781 | |||
1040 | 782 | |||
1041 | 783 | class LogLevelContext(OSContextGenerator): | ||
1042 | 784 | |||
1043 | 785 | def __call__(self): | ||
1044 | 786 | ctxt = {} | ||
1045 | 787 | ctxt['debug'] = \ | ||
1046 | 788 | False if config('debug') is None else config('debug') | ||
1047 | 789 | ctxt['verbose'] = \ | ||
1048 | 790 | False if config('verbose') is None else config('verbose') | ||
1049 | 791 | >>>>>>> MERGE-SOURCE | ||
1050 | 696 | return ctxt | 792 | return ctxt |
1051 | 697 | 793 | ||
1052 | 698 | 794 | ||
1053 | 699 | 795 | ||
1054 | === added file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
1055 | --- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000 | |||
1056 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-09-16 09:08:32 +0000 | |||
1057 | @@ -0,0 +1,79 @@ | |||
1058 | 1 | from charmhelpers.core.hookenv import ( | ||
1059 | 2 | config, | ||
1060 | 3 | unit_get, | ||
1061 | 4 | ) | ||
1062 | 5 | |||
1063 | 6 | from charmhelpers.contrib.network.ip import ( | ||
1064 | 7 | get_address_in_network, | ||
1065 | 8 | is_address_in_network, | ||
1066 | 9 | is_ipv6, | ||
1067 | 10 | get_ipv6_addr, | ||
1068 | 11 | ) | ||
1069 | 12 | |||
1070 | 13 | from charmhelpers.contrib.hahelpers.cluster import is_clustered | ||
1071 | 14 | |||
1072 | 15 | PUBLIC = 'public' | ||
1073 | 16 | INTERNAL = 'int' | ||
1074 | 17 | ADMIN = 'admin' | ||
1075 | 18 | |||
1076 | 19 | _address_map = { | ||
1077 | 20 | PUBLIC: { | ||
1078 | 21 | 'config': 'os-public-network', | ||
1079 | 22 | 'fallback': 'public-address' | ||
1080 | 23 | }, | ||
1081 | 24 | INTERNAL: { | ||
1082 | 25 | 'config': 'os-internal-network', | ||
1083 | 26 | 'fallback': 'private-address' | ||
1084 | 27 | }, | ||
1085 | 28 | ADMIN: { | ||
1086 | 29 | 'config': 'os-admin-network', | ||
1087 | 30 | 'fallback': 'private-address' | ||
1088 | 31 | } | ||
1089 | 32 | } | ||
1090 | 33 | |||
1091 | 34 | |||
1092 | 35 | def canonical_url(configs, endpoint_type=PUBLIC): | ||
1093 | 36 | ''' | ||
1094 | 37 | Returns the correct HTTP URL to this host given the state of HTTPS | ||
1095 | 38 | configuration, hacluster and charm configuration. | ||
1096 | 39 | |||
1097 | 40 | :configs OSTemplateRenderer: A config tempating object to inspect for | ||
1098 | 41 | a complete https context. | ||
1099 | 42 | :endpoint_type str: The endpoint type to resolve. | ||
1100 | 43 | |||
1101 | 44 | :returns str: Base URL for services on the current service unit. | ||
1102 | 45 | ''' | ||
1103 | 46 | scheme = 'http' | ||
1104 | 47 | if 'https' in configs.complete_contexts(): | ||
1105 | 48 | scheme = 'https' | ||
1106 | 49 | address = resolve_address(endpoint_type) | ||
1107 | 50 | if is_ipv6(address): | ||
1108 | 51 | address = "[{}]".format(address) | ||
1109 | 52 | return '%s://%s' % (scheme, address) | ||
1110 | 53 | |||
1111 | 54 | |||
1112 | 55 | def resolve_address(endpoint_type=PUBLIC): | ||
1113 | 56 | resolved_address = None | ||
1114 | 57 | if is_clustered(): | ||
1115 | 58 | if config(_address_map[endpoint_type]['config']) is None: | ||
1116 | 59 | # Assume vip is simple and pass back directly | ||
1117 | 60 | resolved_address = config('vip') | ||
1118 | 61 | else: | ||
1119 | 62 | for vip in config('vip').split(): | ||
1120 | 63 | if is_address_in_network( | ||
1121 | 64 | config(_address_map[endpoint_type]['config']), | ||
1122 | 65 | vip): | ||
1123 | 66 | resolved_address = vip | ||
1124 | 67 | else: | ||
1125 | 68 | if config('prefer-ipv6'): | ||
1126 | 69 | fallback_addr = get_ipv6_addr() | ||
1127 | 70 | else: | ||
1128 | 71 | fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) | ||
1129 | 72 | resolved_address = get_address_in_network( | ||
1130 | 73 | config(_address_map[endpoint_type]['config']), fallback_addr) | ||
1131 | 74 | |||
1132 | 75 | if resolved_address is None: | ||
1133 | 76 | raise ValueError('Unable to resolve a suitable IP address' | ||
1134 | 77 | ' based on charm state and configuration') | ||
1135 | 78 | else: | ||
1136 | 79 | return resolved_address | ||
1137 | 0 | 80 | ||
1138 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
1139 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-19 11:38:09 +0000 | |||
1140 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-09-16 09:08:32 +0000 | |||
1141 | @@ -128,6 +128,20 @@ | |||
1142 | 128 | 'server_packages': ['neutron-server', | 128 | 'server_packages': ['neutron-server', |
1143 | 129 | 'neutron-plugin-vmware'], | 129 | 'neutron-plugin-vmware'], |
1144 | 130 | 'server_services': ['neutron-server'] | 130 | 'server_services': ['neutron-server'] |
1145 | 131 | }, | ||
1146 | 132 | 'n1kv': { | ||
1147 | 133 | 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', | ||
1148 | 134 | 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', | ||
1149 | 135 | 'contexts': [ | ||
1150 | 136 | context.SharedDBContext(user=config('neutron-database-user'), | ||
1151 | 137 | database=config('neutron-database'), | ||
1152 | 138 | relation_prefix='neutron', | ||
1153 | 139 | ssl_dir=NEUTRON_CONF_DIR)], | ||
1154 | 140 | 'services': [], | ||
1155 | 141 | 'packages': [['neutron-plugin-cisco']], | ||
1156 | 142 | 'server_packages': ['neutron-server', | ||
1157 | 143 | 'neutron-plugin-cisco'], | ||
1158 | 144 | 'server_services': ['neutron-server'] | ||
1159 | 131 | } | 145 | } |
1160 | 132 | } | 146 | } |
1161 | 133 | if release >= 'icehouse': | 147 | if release >= 'icehouse': |
1162 | 134 | 148 | ||
1163 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' | |||
1164 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-02-27 09:26:38 +0000 | |||
1165 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-09-16 09:08:32 +0000 | |||
1166 | @@ -1,6 +1,6 @@ | |||
1167 | 1 | global | 1 | global |
1170 | 2 | log 127.0.0.1 local0 | 2 | log {{ local_host }} local0 |
1171 | 3 | log 127.0.0.1 local1 notice | 3 | log {{ local_host }} local1 notice |
1172 | 4 | maxconn 20000 | 4 | maxconn 20000 |
1173 | 5 | user haproxy | 5 | user haproxy |
1174 | 6 | group haproxy | 6 | group haproxy |
1175 | @@ -17,7 +17,7 @@ | |||
1176 | 17 | timeout client 30000 | 17 | timeout client 30000 |
1177 | 18 | timeout server 30000 | 18 | timeout server 30000 |
1178 | 19 | 19 | ||
1180 | 20 | listen stats :8888 | 20 | listen stats {{ stat_port }} |
1181 | 21 | mode http | 21 | mode http |
1182 | 22 | stats enable | 22 | stats enable |
1183 | 23 | stats hide-version | 23 | stats hide-version |
1184 | @@ -27,7 +27,12 @@ | |||
1185 | 27 | 27 | ||
1186 | 28 | {% if units -%} | 28 | {% if units -%} |
1187 | 29 | {% for service, ports in service_ports.iteritems() -%} | 29 | {% for service, ports in service_ports.iteritems() -%} |
1189 | 30 | listen {{ service }} 0.0.0.0:{{ ports[0] }} | 30 | listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }} |
1190 | 31 | balance roundrobin | ||
1191 | 32 | {% for unit, address in units.iteritems() -%} | ||
1192 | 33 | server {{ unit }} {{ address }}:{{ ports[1] }} check | ||
1193 | 34 | {% endfor %} | ||
1194 | 35 | listen {{ service }}_ipv6 :::{{ ports[0] }} | ||
1195 | 31 | balance roundrobin | 36 | balance roundrobin |
1196 | 32 | {% for unit, address in units.iteritems() -%} | 37 | {% for unit, address in units.iteritems() -%} |
1197 | 33 | server {{ unit }} {{ address }}:{{ ports[1] }} check | 38 | server {{ unit }} {{ address }}:{{ ports[1] }} check |
1198 | 34 | 39 | ||
1199 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
1200 | --- hooks/charmhelpers/contrib/openstack/templating.py 2014-02-24 19:31:57 +0000 | |||
1201 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2014-09-16 09:08:32 +0000 | |||
1202 | @@ -30,17 +30,17 @@ | |||
1203 | 30 | loading dir. | 30 | loading dir. |
1204 | 31 | 31 | ||
1205 | 32 | A charm may also ship a templates dir with this module | 32 | A charm may also ship a templates dir with this module |
1217 | 33 | and it will be appended to the bottom of the search list, eg: | 33 | and it will be appended to the bottom of the search list, eg:: |
1218 | 34 | hooks/charmhelpers/contrib/openstack/templates. | 34 | |
1219 | 35 | 35 | hooks/charmhelpers/contrib/openstack/templates | |
1220 | 36 | :param templates_dir: str: Base template directory containing release | 36 | |
1221 | 37 | sub-directories. | 37 | :param templates_dir (str): Base template directory containing release |
1222 | 38 | :param os_release : str: OpenStack release codename to construct template | 38 | sub-directories. |
1223 | 39 | loader. | 39 | :param os_release (str): OpenStack release codename to construct template |
1224 | 40 | 40 | loader. | |
1225 | 41 | :returns : jinja2.ChoiceLoader constructed with a list of | 41 | :returns: jinja2.ChoiceLoader constructed with a list of |
1226 | 42 | jinja2.FilesystemLoaders, ordered in descending | 42 | jinja2.FilesystemLoaders, ordered in descending |
1227 | 43 | order by OpenStack release. | 43 | order by OpenStack release. |
1228 | 44 | """ | 44 | """ |
1229 | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) |
1230 | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] |
1231 | @@ -111,7 +111,8 @@ | |||
1232 | 111 | and ease the burden of managing config templates across multiple OpenStack | 111 | and ease the burden of managing config templates across multiple OpenStack |
1233 | 112 | releases. | 112 | releases. |
1234 | 113 | 113 | ||
1236 | 114 | Basic usage: | 114 | Basic usage:: |
1237 | 115 | |||
1238 | 115 | # import some common context generates from charmhelpers | 116 | # import some common context generates from charmhelpers |
1239 | 116 | from charmhelpers.contrib.openstack import context | 117 | from charmhelpers.contrib.openstack import context |
1240 | 117 | 118 | ||
1241 | @@ -131,21 +132,19 @@ | |||
1242 | 131 | # write out all registered configs | 132 | # write out all registered configs |
1243 | 132 | configs.write_all() | 133 | configs.write_all() |
1244 | 133 | 134 | ||
1246 | 134 | Details: | 135 | **OpenStack Releases and template loading** |
1247 | 135 | 136 | ||
1248 | 136 | OpenStack Releases and template loading | ||
1249 | 137 | --------------------------------------- | ||
1250 | 138 | When the object is instantiated, it is associated with a specific OS | 137 | When the object is instantiated, it is associated with a specific OS |
1251 | 139 | release. This dictates how the template loader will be constructed. | 138 | release. This dictates how the template loader will be constructed. |
1252 | 140 | 139 | ||
1253 | 141 | The constructed loader attempts to load the template from several places | 140 | The constructed loader attempts to load the template from several places |
1254 | 142 | in the following order: | 141 | in the following order: |
1261 | 143 | - from the most recent OS release-specific template dir (if one exists) | 142 | - from the most recent OS release-specific template dir (if one exists) |
1262 | 144 | - the base templates_dir | 143 | - the base templates_dir |
1263 | 145 | - a template directory shipped in the charm with this helper file. | 144 | - a template directory shipped in the charm with this helper file. |
1264 | 146 | 145 | ||
1265 | 147 | 146 | For the example above, '/tmp/templates' contains the following structure:: | |
1266 | 148 | For the example above, '/tmp/templates' contains the following structure: | 147 | |
1267 | 149 | /tmp/templates/nova.conf | 148 | /tmp/templates/nova.conf |
1268 | 150 | /tmp/templates/api-paste.ini | 149 | /tmp/templates/api-paste.ini |
1269 | 151 | /tmp/templates/grizzly/api-paste.ini | 150 | /tmp/templates/grizzly/api-paste.ini |
1270 | @@ -169,8 +168,8 @@ | |||
1271 | 169 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows | 168 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows |
1272 | 170 | us to ship common templates (haproxy, apache) with the helpers. | 169 | us to ship common templates (haproxy, apache) with the helpers. |
1273 | 171 | 170 | ||
1276 | 172 | Context generators | 171 | **Context generators** |
1277 | 173 | --------------------------------------- | 172 | |
1278 | 174 | Context generators are used to generate template contexts during hook | 173 | Context generators are used to generate template contexts during hook |
1279 | 175 | execution. Doing so may require inspecting service relations, charm | 174 | execution. Doing so may require inspecting service relations, charm |
1280 | 176 | config, etc. When registered, a config file is associated with a list | 175 | config, etc. When registered, a config file is associated with a list |
1281 | 177 | 176 | ||
1282 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
1283 | --- hooks/charmhelpers/contrib/openstack/utils.py 2014-08-27 07:14:03 +0000 | |||
1284 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2014-09-16 09:08:32 +0000 | |||
1285 | @@ -3,7 +3,6 @@ | |||
1286 | 3 | # Common python helper functions used for OpenStack charms. | 3 | # Common python helper functions used for OpenStack charms. |
1287 | 4 | from collections import OrderedDict | 4 | from collections import OrderedDict |
1288 | 5 | 5 | ||
1289 | 6 | import apt_pkg as apt | ||
1290 | 7 | import subprocess | 6 | import subprocess |
1291 | 8 | import os | 7 | import os |
1292 | 9 | import socket | 8 | import socket |
1293 | @@ -41,7 +40,8 @@ | |||
1294 | 41 | ('quantal', 'folsom'), | 40 | ('quantal', 'folsom'), |
1295 | 42 | ('raring', 'grizzly'), | 41 | ('raring', 'grizzly'), |
1296 | 43 | ('saucy', 'havana'), | 42 | ('saucy', 'havana'), |
1298 | 44 | ('trusty', 'icehouse') | 43 | ('trusty', 'icehouse'), |
1299 | 44 | ('utopic', 'juno'), | ||
1300 | 45 | ]) | 45 | ]) |
1301 | 46 | 46 | ||
1302 | 47 | 47 | ||
1303 | @@ -52,6 +52,7 @@ | |||
1304 | 52 | ('2013.1', 'grizzly'), | 52 | ('2013.1', 'grizzly'), |
1305 | 53 | ('2013.2', 'havana'), | 53 | ('2013.2', 'havana'), |
1306 | 54 | ('2014.1', 'icehouse'), | 54 | ('2014.1', 'icehouse'), |
1307 | 55 | ('2014.2', 'juno'), | ||
1308 | 55 | ]) | 56 | ]) |
1309 | 56 | 57 | ||
1310 | 57 | # The ugly duckling | 58 | # The ugly duckling |
1311 | @@ -83,6 +84,8 @@ | |||
1312 | 83 | '''Derive OpenStack release codename from a given installation source.''' | 84 | '''Derive OpenStack release codename from a given installation source.''' |
1313 | 84 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | 85 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] |
1314 | 85 | rel = '' | 86 | rel = '' |
1315 | 87 | if src is None: | ||
1316 | 88 | return rel | ||
1317 | 86 | if src in ['distro', 'distro-proposed']: | 89 | if src in ['distro', 'distro-proposed']: |
1318 | 87 | try: | 90 | try: |
1319 | 88 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] | 91 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] |
1320 | @@ -130,8 +133,14 @@ | |||
1321 | 130 | 133 | ||
1322 | 131 | def get_os_codename_package(package, fatal=True): | 134 | def get_os_codename_package(package, fatal=True): |
1323 | 132 | '''Derive OpenStack release codename from an installed package.''' | 135 | '''Derive OpenStack release codename from an installed package.''' |
1326 | 133 | 136 | <<<<<<< TREE | |
1327 | 134 | cache = apt_cache() | 137 | |
1328 | 138 | cache = apt_cache() | ||
1329 | 139 | ======= | ||
1330 | 140 | import apt_pkg as apt | ||
1331 | 141 | |||
1332 | 142 | cache = apt_cache() | ||
1333 | 143 | >>>>>>> MERGE-SOURCE | ||
1334 | 135 | 144 | ||
1335 | 136 | try: | 145 | try: |
1336 | 137 | pkg = cache[package] | 146 | pkg = cache[package] |
1337 | @@ -182,7 +191,7 @@ | |||
1338 | 182 | for version, cname in vers_map.iteritems(): | 191 | for version, cname in vers_map.iteritems(): |
1339 | 183 | if cname == codename: | 192 | if cname == codename: |
1340 | 184 | return version | 193 | return version |
1342 | 185 | #e = "Could not determine OpenStack version for package: %s" % pkg | 194 | # e = "Could not determine OpenStack version for package: %s" % pkg |
1343 | 186 | # error_out(e) | 195 | # error_out(e) |
1344 | 187 | 196 | ||
1345 | 188 | 197 | ||
1346 | @@ -268,6 +277,9 @@ | |||
1347 | 268 | 'icehouse': 'precise-updates/icehouse', | 277 | 'icehouse': 'precise-updates/icehouse', |
1348 | 269 | 'icehouse/updates': 'precise-updates/icehouse', | 278 | 'icehouse/updates': 'precise-updates/icehouse', |
1349 | 270 | 'icehouse/proposed': 'precise-proposed/icehouse', | 279 | 'icehouse/proposed': 'precise-proposed/icehouse', |
1350 | 280 | 'juno': 'trusty-updates/juno', | ||
1351 | 281 | 'juno/updates': 'trusty-updates/juno', | ||
1352 | 282 | 'juno/proposed': 'trusty-proposed/juno', | ||
1353 | 271 | } | 283 | } |
1354 | 272 | 284 | ||
1355 | 273 | try: | 285 | try: |
1356 | @@ -315,6 +327,7 @@ | |||
1357 | 315 | 327 | ||
1358 | 316 | """ | 328 | """ |
1359 | 317 | 329 | ||
1360 | 330 | import apt_pkg as apt | ||
1361 | 318 | src = config('openstack-origin') | 331 | src = config('openstack-origin') |
1362 | 319 | cur_vers = get_os_version_package(package) | 332 | cur_vers = get_os_version_package(package) |
1363 | 320 | available_vers = get_os_version_install_source(src) | 333 | available_vers = get_os_version_install_source(src) |
1364 | 321 | 334 | ||
1365 | === added directory 'hooks/charmhelpers/contrib/peerstorage' | |||
1366 | === added file 'hooks/charmhelpers/contrib/peerstorage/__init__.py' | |||
1367 | --- hooks/charmhelpers/contrib/peerstorage/__init__.py 1970-01-01 00:00:00 +0000 | |||
1368 | +++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-09-16 09:08:32 +0000 | |||
1369 | @@ -0,0 +1,83 @@ | |||
1370 | 1 | from charmhelpers.core.hookenv import ( | ||
1371 | 2 | relation_ids, | ||
1372 | 3 | relation_get, | ||
1373 | 4 | local_unit, | ||
1374 | 5 | relation_set, | ||
1375 | 6 | ) | ||
1376 | 7 | |||
1377 | 8 | """ | ||
1378 | 9 | This helper provides functions to support use of a peer relation | ||
1379 | 10 | for basic key/value storage, with the added benefit that all storage | ||
1380 | 11 | can be replicated across peer units, so this is really useful for | ||
1381 | 12 | services that issue usernames/passwords to remote services. | ||
1382 | 13 | |||
1383 | 14 | def shared_db_changed() | ||
1384 | 15 | # Only the lead unit should create passwords | ||
1385 | 16 | if not is_leader(): | ||
1386 | 17 | return | ||
1387 | 18 | username = relation_get('username') | ||
1388 | 19 | key = '{}.password'.format(username) | ||
1389 | 20 | # Attempt to retrieve any existing password for this user | ||
1390 | 21 | password = peer_retrieve(key) | ||
1391 | 22 | if password is None: | ||
1392 | 23 | # New user, create password and store | ||
1393 | 24 | password = pwgen(length=64) | ||
1394 | 25 | peer_store(key, password) | ||
1395 | 26 | create_access(username, password) | ||
1396 | 27 | relation_set(password=password) | ||
1397 | 28 | |||
1398 | 29 | |||
1399 | 30 | def cluster_changed() | ||
1400 | 31 | # Echo any relation data other that *-address | ||
1401 | 32 | # back onto the peer relation so all units have | ||
1402 | 33 | # all *.password keys stored on their local relation | ||
1403 | 34 | # for later retrieval. | ||
1404 | 35 | peer_echo() | ||
1405 | 36 | |||
1406 | 37 | """ | ||
1407 | 38 | |||
1408 | 39 | |||
1409 | 40 | def peer_retrieve(key, relation_name='cluster'): | ||
1410 | 41 | """ Retrieve a named key from peer relation relation_name """ | ||
1411 | 42 | cluster_rels = relation_ids(relation_name) | ||
1412 | 43 | if len(cluster_rels) > 0: | ||
1413 | 44 | cluster_rid = cluster_rels[0] | ||
1414 | 45 | return relation_get(attribute=key, rid=cluster_rid, | ||
1415 | 46 | unit=local_unit()) | ||
1416 | 47 | else: | ||
1417 | 48 | raise ValueError('Unable to detect' | ||
1418 | 49 | 'peer relation {}'.format(relation_name)) | ||
1419 | 50 | |||
1420 | 51 | |||
1421 | 52 | def peer_store(key, value, relation_name='cluster'): | ||
1422 | 53 | """ Store the key/value pair on the named peer relation relation_name """ | ||
1423 | 54 | cluster_rels = relation_ids(relation_name) | ||
1424 | 55 | if len(cluster_rels) > 0: | ||
1425 | 56 | cluster_rid = cluster_rels[0] | ||
1426 | 57 | relation_set(relation_id=cluster_rid, | ||
1427 | 58 | relation_settings={key: value}) | ||
1428 | 59 | else: | ||
1429 | 60 | raise ValueError('Unable to detect ' | ||
1430 | 61 | 'peer relation {}'.format(relation_name)) | ||
1431 | 62 | |||
1432 | 63 | |||
1433 | 64 | def peer_echo(includes=None): | ||
1434 | 65 | """Echo filtered attributes back onto the same relation for storage | ||
1435 | 66 | |||
1436 | 67 | Note that this helper must only be called within a peer relation | ||
1437 | 68 | changed hook | ||
1438 | 69 | """ | ||
1439 | 70 | rdata = relation_get() | ||
1440 | 71 | echo_data = {} | ||
1441 | 72 | if includes is None: | ||
1442 | 73 | echo_data = rdata.copy() | ||
1443 | 74 | for ex in ['private-address', 'public-address']: | ||
1444 | 75 | if ex in echo_data: | ||
1445 | 76 | echo_data.pop(ex) | ||
1446 | 77 | else: | ||
1447 | 78 | for attribute, value in rdata.iteritems(): | ||
1448 | 79 | for include in includes: | ||
1449 | 80 | if include in attribute: | ||
1450 | 81 | echo_data[attribute] = value | ||
1451 | 82 | if len(echo_data) > 0: | ||
1452 | 83 | relation_set(relation_settings=echo_data) | ||
1453 | 0 | 84 | ||
1454 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
1455 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:02:24 +0000 | |||
1456 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-09-16 09:08:32 +0000 | |||
1457 | @@ -303,7 +303,7 @@ | |||
1458 | 303 | blk_device, fstype, system_services=[]): | 303 | blk_device, fstype, system_services=[]): |
1459 | 304 | """ | 304 | """ |
1460 | 305 | NOTE: This function must only be called from a single service unit for | 305 | NOTE: This function must only be called from a single service unit for |
1462 | 306 | the same rbd_img otherwise data loss will occur. | 306 | the same rbd_img otherwise data loss will occur. |
1463 | 307 | 307 | ||
1464 | 308 | Ensures given pool and RBD image exists, is mapped to a block device, | 308 | Ensures given pool and RBD image exists, is mapped to a block device, |
1465 | 309 | and the device is formatted and mounted at the given mount_point. | 309 | and the device is formatted and mounted at the given mount_point. |
1466 | 310 | 310 | ||
1467 | === added file 'hooks/charmhelpers/core/fstab.py' | |||
1468 | --- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000 | |||
1469 | +++ hooks/charmhelpers/core/fstab.py 2014-09-16 09:08:32 +0000 | |||
1470 | @@ -0,0 +1,116 @@ | |||
1471 | 1 | #!/usr/bin/env python | ||
1472 | 2 | # -*- coding: utf-8 -*- | ||
1473 | 3 | |||
1474 | 4 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' | ||
1475 | 5 | |||
1476 | 6 | import os | ||
1477 | 7 | |||
1478 | 8 | |||
1479 | 9 | class Fstab(file): | ||
1480 | 10 | """This class extends file in order to implement a file reader/writer | ||
1481 | 11 | for file `/etc/fstab` | ||
1482 | 12 | """ | ||
1483 | 13 | |||
1484 | 14 | class Entry(object): | ||
1485 | 15 | """Entry class represents a non-comment line on the `/etc/fstab` file | ||
1486 | 16 | """ | ||
1487 | 17 | def __init__(self, device, mountpoint, filesystem, | ||
1488 | 18 | options, d=0, p=0): | ||
1489 | 19 | self.device = device | ||
1490 | 20 | self.mountpoint = mountpoint | ||
1491 | 21 | self.filesystem = filesystem | ||
1492 | 22 | |||
1493 | 23 | if not options: | ||
1494 | 24 | options = "defaults" | ||
1495 | 25 | |||
1496 | 26 | self.options = options | ||
1497 | 27 | self.d = d | ||
1498 | 28 | self.p = p | ||
1499 | 29 | |||
1500 | 30 | def __eq__(self, o): | ||
1501 | 31 | return str(self) == str(o) | ||
1502 | 32 | |||
1503 | 33 | def __str__(self): | ||
1504 | 34 | return "{} {} {} {} {} {}".format(self.device, | ||
1505 | 35 | self.mountpoint, | ||
1506 | 36 | self.filesystem, | ||
1507 | 37 | self.options, | ||
1508 | 38 | self.d, | ||
1509 | 39 | self.p) | ||
1510 | 40 | |||
1511 | 41 | DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') | ||
1512 | 42 | |||
1513 | 43 | def __init__(self, path=None): | ||
1514 | 44 | if path: | ||
1515 | 45 | self._path = path | ||
1516 | 46 | else: | ||
1517 | 47 | self._path = self.DEFAULT_PATH | ||
1518 | 48 | file.__init__(self, self._path, 'r+') | ||
1519 | 49 | |||
1520 | 50 | def _hydrate_entry(self, line): | ||
1521 | 51 | # NOTE: use split with no arguments to split on any | ||
1522 | 52 | # whitespace including tabs | ||
1523 | 53 | return Fstab.Entry(*filter( | ||
1524 | 54 | lambda x: x not in ('', None), | ||
1525 | 55 | line.strip("\n").split())) | ||
1526 | 56 | |||
1527 | 57 | @property | ||
1528 | 58 | def entries(self): | ||
1529 | 59 | self.seek(0) | ||
1530 | 60 | for line in self.readlines(): | ||
1531 | 61 | try: | ||
1532 | 62 | if not line.startswith("#"): | ||
1533 | 63 | yield self._hydrate_entry(line) | ||
1534 | 64 | except ValueError: | ||
1535 | 65 | pass | ||
1536 | 66 | |||
1537 | 67 | def get_entry_by_attr(self, attr, value): | ||
1538 | 68 | for entry in self.entries: | ||
1539 | 69 | e_attr = getattr(entry, attr) | ||
1540 | 70 | if e_attr == value: | ||
1541 | 71 | return entry | ||
1542 | 72 | return None | ||
1543 | 73 | |||
1544 | 74 | def add_entry(self, entry): | ||
1545 | 75 | if self.get_entry_by_attr('device', entry.device): | ||
1546 | 76 | return False | ||
1547 | 77 | |||
1548 | 78 | self.write(str(entry) + '\n') | ||
1549 | 79 | self.truncate() | ||
1550 | 80 | return entry | ||
1551 | 81 | |||
1552 | 82 | def remove_entry(self, entry): | ||
1553 | 83 | self.seek(0) | ||
1554 | 84 | |||
1555 | 85 | lines = self.readlines() | ||
1556 | 86 | |||
1557 | 87 | found = False | ||
1558 | 88 | for index, line in enumerate(lines): | ||
1559 | 89 | if not line.startswith("#"): | ||
1560 | 90 | if self._hydrate_entry(line) == entry: | ||
1561 | 91 | found = True | ||
1562 | 92 | break | ||
1563 | 93 | |||
1564 | 94 | if not found: | ||
1565 | 95 | return False | ||
1566 | 96 | |||
1567 | 97 | lines.remove(line) | ||
1568 | 98 | |||
1569 | 99 | self.seek(0) | ||
1570 | 100 | self.write(''.join(lines)) | ||
1571 | 101 | self.truncate() | ||
1572 | 102 | return True | ||
1573 | 103 | |||
1574 | 104 | @classmethod | ||
1575 | 105 | def remove_by_mountpoint(cls, mountpoint, path=None): | ||
1576 | 106 | fstab = cls(path=path) | ||
1577 | 107 | entry = fstab.get_entry_by_attr('mountpoint', mountpoint) | ||
1578 | 108 | if entry: | ||
1579 | 109 | return fstab.remove_entry(entry) | ||
1580 | 110 | return False | ||
1581 | 111 | |||
1582 | 112 | @classmethod | ||
1583 | 113 | def add(cls, device, mountpoint, filesystem, options=None, path=None): | ||
1584 | 114 | return cls(path=path).add_entry(Fstab.Entry(device, | ||
1585 | 115 | mountpoint, filesystem, | ||
1586 | 116 | options=options)) | ||
1587 | 0 | 117 | ||
1588 | === renamed file 'hooks/charmhelpers/core/fstab.py' => 'hooks/charmhelpers/core/fstab.py.moved' | |||
1589 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
1590 | --- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:38:09 +0000 | |||
1591 | +++ hooks/charmhelpers/core/hookenv.py 2014-09-16 09:08:32 +0000 | |||
1592 | @@ -25,7 +25,7 @@ | |||
1593 | 25 | def cached(func): | 25 | def cached(func): |
1594 | 26 | """Cache return values for multiple executions of func + args | 26 | """Cache return values for multiple executions of func + args |
1595 | 27 | 27 | ||
1597 | 28 | For example: | 28 | For example:: |
1598 | 29 | 29 | ||
1599 | 30 | @cached | 30 | @cached |
1600 | 31 | def unit_get(attribute): | 31 | def unit_get(attribute): |
1601 | @@ -285,8 +285,9 @@ | |||
1602 | 285 | raise | 285 | raise |
1603 | 286 | 286 | ||
1604 | 287 | 287 | ||
1606 | 288 | def relation_set(relation_id=None, relation_settings={}, **kwargs): | 288 | def relation_set(relation_id=None, relation_settings=None, **kwargs): |
1607 | 289 | """Set relation information for the current unit""" | 289 | """Set relation information for the current unit""" |
1608 | 290 | relation_settings = relation_settings if relation_settings else {} | ||
1609 | 290 | relation_cmd_line = ['relation-set'] | 291 | relation_cmd_line = ['relation-set'] |
1610 | 291 | if relation_id is not None: | 292 | if relation_id is not None: |
1611 | 292 | relation_cmd_line.extend(('-r', relation_id)) | 293 | relation_cmd_line.extend(('-r', relation_id)) |
1612 | @@ -445,18 +446,19 @@ | |||
1613 | 445 | class Hooks(object): | 446 | class Hooks(object): |
1614 | 446 | """A convenient handler for hook functions. | 447 | """A convenient handler for hook functions. |
1615 | 447 | 448 | ||
1617 | 448 | Example: | 449 | Example:: |
1618 | 450 | |||
1619 | 449 | hooks = Hooks() | 451 | hooks = Hooks() |
1620 | 450 | 452 | ||
1621 | 451 | # register a hook, taking its name from the function name | 453 | # register a hook, taking its name from the function name |
1622 | 452 | @hooks.hook() | 454 | @hooks.hook() |
1623 | 453 | def install(): | 455 | def install(): |
1625 | 454 | ... | 456 | pass # your code here |
1626 | 455 | 457 | ||
1627 | 456 | # register a hook, providing a custom hook name | 458 | # register a hook, providing a custom hook name |
1628 | 457 | @hooks.hook("config-changed") | 459 | @hooks.hook("config-changed") |
1629 | 458 | def config_changed(): | 460 | def config_changed(): |
1631 | 459 | ... | 461 | pass # your code here |
1632 | 460 | 462 | ||
1633 | 461 | if __name__ == "__main__": | 463 | if __name__ == "__main__": |
1634 | 462 | # execute a hook based on the name the program is called by | 464 | # execute a hook based on the name the program is called by |
1635 | 463 | 465 | ||
1636 | === modified file 'hooks/charmhelpers/core/host.py' | |||
1637 | --- hooks/charmhelpers/core/host.py 2014-08-27 07:14:03 +0000 | |||
1638 | +++ hooks/charmhelpers/core/host.py 2014-09-16 09:08:32 +0000 | |||
1639 | @@ -12,7 +12,8 @@ | |||
1640 | 12 | import string | 12 | import string |
1641 | 13 | import subprocess | 13 | import subprocess |
1642 | 14 | import hashlib | 14 | import hashlib |
1644 | 15 | import apt_pkg | 15 | import shutil |
1645 | 16 | from contextlib import contextmanager | ||
1646 | 16 | 17 | ||
1647 | 17 | from collections import OrderedDict | 18 | from collections import OrderedDict |
1648 | 18 | 19 | ||
1649 | @@ -53,7 +54,7 @@ | |||
1650 | 53 | def service_running(service): | 54 | def service_running(service): |
1651 | 54 | """Determine whether a system service is running""" | 55 | """Determine whether a system service is running""" |
1652 | 55 | try: | 56 | try: |
1654 | 56 | output = subprocess.check_output(['service', service, 'status']) | 57 | output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) |
1655 | 57 | except subprocess.CalledProcessError: | 58 | except subprocess.CalledProcessError: |
1656 | 58 | return False | 59 | return False |
1657 | 59 | else: | 60 | else: |
1658 | @@ -63,6 +64,16 @@ | |||
1659 | 63 | return False | 64 | return False |
1660 | 64 | 65 | ||
1661 | 65 | 66 | ||
1662 | 67 | def service_available(service_name): | ||
1663 | 68 | """Determine whether a system service is available""" | ||
1664 | 69 | try: | ||
1665 | 70 | subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) | ||
1666 | 71 | except subprocess.CalledProcessError: | ||
1667 | 72 | return False | ||
1668 | 73 | else: | ||
1669 | 74 | return True | ||
1670 | 75 | |||
1671 | 76 | |||
1672 | 66 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | 77 | def adduser(username, password=None, shell='/bin/bash', system_user=False): |
1673 | 67 | """Add a user to the system""" | 78 | """Add a user to the system""" |
1674 | 68 | try: | 79 | try: |
1675 | @@ -212,13 +223,13 @@ | |||
1676 | 212 | def restart_on_change(restart_map, stopstart=False): | 223 | def restart_on_change(restart_map, stopstart=False): |
1677 | 213 | """Restart services based on configuration files changing | 224 | """Restart services based on configuration files changing |
1678 | 214 | 225 | ||
1680 | 215 | This function is used a decorator, for example | 226 | This function is used a decorator, for example:: |
1681 | 216 | 227 | ||
1682 | 217 | @restart_on_change({ | 228 | @restart_on_change({ |
1683 | 218 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] | 229 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] |
1684 | 219 | }) | 230 | }) |
1685 | 220 | def ceph_client_changed(): | 231 | def ceph_client_changed(): |
1687 | 221 | ... | 232 | pass # your code here |
1688 | 222 | 233 | ||
1689 | 223 | In this example, the cinder-api and cinder-volume services | 234 | In this example, the cinder-api and cinder-volume services |
1690 | 224 | would be restarted if /etc/ceph/ceph.conf is changed by the | 235 | would be restarted if /etc/ceph/ceph.conf is changed by the |
1691 | @@ -314,12 +325,40 @@ | |||
1692 | 314 | 325 | ||
1693 | 315 | def cmp_pkgrevno(package, revno, pkgcache=None): | 326 | def cmp_pkgrevno(package, revno, pkgcache=None): |
1694 | 316 | '''Compare supplied revno with the revno of the installed package | 327 | '''Compare supplied revno with the revno of the installed package |
1698 | 317 | 1 => Installed revno is greater than supplied arg | 328 | |
1699 | 318 | 0 => Installed revno is the same as supplied arg | 329 | * 1 => Installed revno is greater than supplied arg |
1700 | 319 | -1 => Installed revno is less than supplied arg | 330 | * 0 => Installed revno is the same as supplied arg |
1701 | 331 | * -1 => Installed revno is less than supplied arg | ||
1702 | 332 | |||
1703 | 320 | ''' | 333 | ''' |
1705 | 321 | from charmhelpers.fetch import apt_cache | 334 | <<<<<<< TREE |
1706 | 335 | from charmhelpers.fetch import apt_cache | ||
1707 | 336 | ======= | ||
1708 | 337 | import apt_pkg | ||
1709 | 338 | from charmhelpers.fetch import apt_cache | ||
1710 | 339 | >>>>>>> MERGE-SOURCE | ||
1711 | 322 | if not pkgcache: | 340 | if not pkgcache: |
1712 | 323 | pkgcache = apt_cache() | 341 | pkgcache = apt_cache() |
1713 | 324 | pkg = pkgcache[package] | 342 | pkg = pkgcache[package] |
1714 | 325 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | 343 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
1715 | 344 | |||
1716 | 345 | |||
1717 | 346 | @contextmanager | ||
1718 | 347 | def chdir(d): | ||
1719 | 348 | cur = os.getcwd() | ||
1720 | 349 | try: | ||
1721 | 350 | yield os.chdir(d) | ||
1722 | 351 | finally: | ||
1723 | 352 | os.chdir(cur) | ||
1724 | 353 | |||
1725 | 354 | |||
1726 | 355 | def chownr(path, owner, group): | ||
1727 | 356 | uid = pwd.getpwnam(owner).pw_uid | ||
1728 | 357 | gid = grp.getgrnam(group).gr_gid | ||
1729 | 358 | |||
1730 | 359 | for root, dirs, files in os.walk(path): | ||
1731 | 360 | for name in dirs + files: | ||
1732 | 361 | full = os.path.join(root, name) | ||
1733 | 362 | broken_symlink = os.path.lexists(full) and not os.path.exists(full) | ||
1734 | 363 | if not broken_symlink: | ||
1735 | 364 | os.chown(full, uid, gid) | ||
1736 | 326 | 365 | ||
1737 | === added directory 'hooks/charmhelpers/core/services' | |||
1738 | === added file 'hooks/charmhelpers/core/services/__init__.py' | |||
1739 | --- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000 | |||
1740 | +++ hooks/charmhelpers/core/services/__init__.py 2014-09-16 09:08:32 +0000 | |||
1741 | @@ -0,0 +1,2 @@ | |||
1742 | 1 | from .base import * | ||
1743 | 2 | from .helpers import * | ||
1744 | 0 | 3 | ||
1745 | === added file 'hooks/charmhelpers/core/services/base.py' | |||
1746 | --- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000 | |||
1747 | +++ hooks/charmhelpers/core/services/base.py 2014-09-16 09:08:32 +0000 | |||
1748 | @@ -0,0 +1,310 @@ | |||
1749 | 1 | import os | ||
1750 | 2 | import re | ||
1751 | 3 | import json | ||
1752 | 4 | from collections import Iterable | ||
1753 | 5 | |||
1754 | 6 | from charmhelpers.core import host | ||
1755 | 7 | from charmhelpers.core import hookenv | ||
1756 | 8 | |||
1757 | 9 | |||
1758 | 10 | __all__ = ['ServiceManager', 'ManagerCallback', | ||
1759 | 11 | 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', | ||
1760 | 12 | 'service_restart', 'service_stop'] | ||
1761 | 13 | |||
1762 | 14 | |||
1763 | 15 | class ServiceManager(object): | ||
1764 | 16 | def __init__(self, services=None): | ||
1765 | 17 | """ | ||
1766 | 18 | Register a list of services, given their definitions. | ||
1767 | 19 | |||
1768 | 20 | Service definitions are dicts in the following formats (all keys except | ||
1769 | 21 | 'service' are optional):: | ||
1770 | 22 | |||
1771 | 23 | { | ||
1772 | 24 | "service": <service name>, | ||
1773 | 25 | "required_data": <list of required data contexts>, | ||
1774 | 26 | "provided_data": <list of provided data contexts>, | ||
1775 | 27 | "data_ready": <one or more callbacks>, | ||
1776 | 28 | "data_lost": <one or more callbacks>, | ||
1777 | 29 | "start": <one or more callbacks>, | ||
1778 | 30 | "stop": <one or more callbacks>, | ||
1779 | 31 | "ports": <list of ports to manage>, | ||
1780 | 32 | } | ||
1781 | 33 | |||
1782 | 34 | The 'required_data' list should contain dicts of required data (or | ||
1783 | 35 | dependency managers that act like dicts and know how to collect the data). | ||
1784 | 36 | Only when all items in the 'required_data' list are populated are the list | ||
1785 | 37 | of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more | ||
1786 | 38 | information. | ||
1787 | 39 | |||
1788 | 40 | The 'provided_data' list should contain relation data providers, most likely | ||
1789 | 41 | a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, | ||
1790 | 42 | that will indicate a set of data to set on a given relation. | ||
1791 | 43 | |||
1792 | 44 | The 'data_ready' value should be either a single callback, or a list of | ||
1793 | 45 | callbacks, to be called when all items in 'required_data' pass `is_ready()`. | ||
1794 | 46 | Each callback will be called with the service name as the only parameter. | ||
1795 | 47 | After all of the 'data_ready' callbacks are called, the 'start' callbacks | ||
1796 | 48 | are fired. | ||
1797 | 49 | |||
1798 | 50 | The 'data_lost' value should be either a single callback, or a list of | ||
1799 | 51 | callbacks, to be called when a 'required_data' item no longer passes | ||
1800 | 52 | `is_ready()`. Each callback will be called with the service name as the | ||
1801 | 53 | only parameter. After all of the 'data_lost' callbacks are called, | ||
1802 | 54 | the 'stop' callbacks are fired. | ||
1803 | 55 | |||
1804 | 56 | The 'start' value should be either a single callback, or a list of | ||
1805 | 57 | callbacks, to be called when starting the service, after the 'data_ready' | ||
1806 | 58 | callbacks are complete. Each callback will be called with the service | ||
1807 | 59 | name as the only parameter. This defaults to | ||
1808 | 60 | `[host.service_start, services.open_ports]`. | ||
1809 | 61 | |||
1810 | 62 | The 'stop' value should be either a single callback, or a list of | ||
1811 | 63 | callbacks, to be called when stopping the service. If the service is | ||
1812 | 64 | being stopped because it no longer has all of its 'required_data', this | ||
1813 | 65 | will be called after all of the 'data_lost' callbacks are complete. | ||
1814 | 66 | Each callback will be called with the service name as the only parameter. | ||
1815 | 67 | This defaults to `[services.close_ports, host.service_stop]`. | ||
1816 | 68 | |||
1817 | 69 | The 'ports' value should be a list of ports to manage. The default | ||
1818 | 70 | 'start' handler will open the ports after the service is started, | ||
1819 | 71 | and the default 'stop' handler will close the ports prior to stopping | ||
1820 | 72 | the service. | ||
1821 | 73 | |||
1822 | 74 | |||
1823 | 75 | Examples: | ||
1824 | 76 | |||
1825 | 77 | The following registers an Upstart service called bingod that depends on | ||
1826 | 78 | a mongodb relation and which runs a custom `db_migrate` function prior to | ||
1827 | 79 | restarting the service, and a Runit service called spadesd:: | ||
1828 | 80 | |||
1829 | 81 | manager = services.ServiceManager([ | ||
1830 | 82 | { | ||
1831 | 83 | 'service': 'bingod', | ||
1832 | 84 | 'ports': [80, 443], | ||
1833 | 85 | 'required_data': [MongoRelation(), config(), {'my': 'data'}], | ||
1834 | 86 | 'data_ready': [ | ||
1835 | 87 | services.template(source='bingod.conf'), | ||
1836 | 88 | services.template(source='bingod.ini', | ||
1837 | 89 | target='/etc/bingod.ini', | ||
1838 | 90 | owner='bingo', perms=0400), | ||
1839 | 91 | ], | ||
1840 | 92 | }, | ||
1841 | 93 | { | ||
1842 | 94 | 'service': 'spadesd', | ||
1843 | 95 | 'data_ready': services.template(source='spadesd_run.j2', | ||
1844 | 96 | target='/etc/sv/spadesd/run', | ||
1845 | 97 | perms=0555), | ||
1846 | 98 | 'start': runit_start, | ||
1847 | 99 | 'stop': runit_stop, | ||
1848 | 100 | }, | ||
1849 | 101 | ]) | ||
1850 | 102 | manager.manage() | ||
1851 | 103 | """ | ||
1852 | 104 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') | ||
1853 | 105 | self._ready = None | ||
1854 | 106 | self.services = {} | ||
1855 | 107 | for service in services or []: | ||
1856 | 108 | service_name = service['service'] | ||
1857 | 109 | self.services[service_name] = service | ||
1858 | 110 | |||
1859 | 111 | def manage(self): | ||
1860 | 112 | """ | ||
1861 | 113 | Handle the current hook by doing The Right Thing with the registered services. | ||
1862 | 114 | """ | ||
1863 | 115 | hook_name = hookenv.hook_name() | ||
1864 | 116 | if hook_name == 'stop': | ||
1865 | 117 | self.stop_services() | ||
1866 | 118 | else: | ||
1867 | 119 | self.provide_data() | ||
1868 | 120 | self.reconfigure_services() | ||
1869 | 121 | |||
1870 | 122 | def provide_data(self): | ||
1871 | 123 | """ | ||
1872 | 124 | Set the relation data for each provider in the ``provided_data`` list. | ||
1873 | 125 | |||
1874 | 126 | A provider must have a `name` attribute, which indicates which relation | ||
1875 | 127 | to set data on, and a `provide_data()` method, which returns a dict of | ||
1876 | 128 | data to set. | ||
1877 | 129 | """ | ||
1878 | 130 | hook_name = hookenv.hook_name() | ||
1879 | 131 | for service in self.services.values(): | ||
1880 | 132 | for provider in service.get('provided_data', []): | ||
1881 | 133 | if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): | ||
1882 | 134 | data = provider.provide_data() | ||
1883 | 135 | _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data | ||
1884 | 136 | if _ready: | ||
1885 | 137 | hookenv.relation_set(None, data) | ||
1886 | 138 | |||
1887 | 139 | def reconfigure_services(self, *service_names): | ||
1888 | 140 | """ | ||
1889 | 141 | Update all files for one or more registered services, and, | ||
1890 | 142 | if ready, optionally restart them. | ||
1891 | 143 | |||
1892 | 144 | If no service names are given, reconfigures all registered services. | ||
1893 | 145 | """ | ||
1894 | 146 | for service_name in service_names or self.services.keys(): | ||
1895 | 147 | if self.is_ready(service_name): | ||
1896 | 148 | self.fire_event('data_ready', service_name) | ||
1897 | 149 | self.fire_event('start', service_name, default=[ | ||
1898 | 150 | service_restart, | ||
1899 | 151 | manage_ports]) | ||
1900 | 152 | self.save_ready(service_name) | ||
1901 | 153 | else: | ||
1902 | 154 | if self.was_ready(service_name): | ||
1903 | 155 | self.fire_event('data_lost', service_name) | ||
1904 | 156 | self.fire_event('stop', service_name, default=[ | ||
1905 | 157 | manage_ports, | ||
1906 | 158 | service_stop]) | ||
1907 | 159 | self.save_lost(service_name) | ||
1908 | 160 | |||
1909 | 161 | def stop_services(self, *service_names): | ||
1910 | 162 | """ | ||
1911 | 163 | Stop one or more registered services, by name. | ||
1912 | 164 | |||
1913 | 165 | If no service names are given, stops all registered services. | ||
1914 | 166 | """ | ||
1915 | 167 | for service_name in service_names or self.services.keys(): | ||
1916 | 168 | self.fire_event('stop', service_name, default=[ | ||
1917 | 169 | manage_ports, | ||
1918 | 170 | service_stop]) | ||
1919 | 171 | |||
1920 | 172 | def get_service(self, service_name): | ||
1921 | 173 | """ | ||
1922 | 174 | Given the name of a registered service, return its service definition. | ||
1923 | 175 | """ | ||
1924 | 176 | service = self.services.get(service_name) | ||
1925 | 177 | if not service: | ||
1926 | 178 | raise KeyError('Service not registered: %s' % service_name) | ||
1927 | 179 | return service | ||
1928 | 180 | |||
1929 | 181 | def fire_event(self, event_name, service_name, default=None): | ||
1930 | 182 | """ | ||
1931 | 183 | Fire a data_ready, data_lost, start, or stop event on a given service. | ||
1932 | 184 | """ | ||
1933 | 185 | service = self.get_service(service_name) | ||
1934 | 186 | callbacks = service.get(event_name, default) | ||
1935 | 187 | if not callbacks: | ||
1936 | 188 | return | ||
1937 | 189 | if not isinstance(callbacks, Iterable): | ||
1938 | 190 | callbacks = [callbacks] | ||
1939 | 191 | for callback in callbacks: | ||
1940 | 192 | if isinstance(callback, ManagerCallback): | ||
1941 | 193 | callback(self, service_name, event_name) | ||
1942 | 194 | else: | ||
1943 | 195 | callback(service_name) | ||
1944 | 196 | |||
1945 | 197 | def is_ready(self, service_name): | ||
1946 | 198 | """ | ||
1947 | 199 | Determine if a registered service is ready, by checking its 'required_data'. | ||
1948 | 200 | |||
1949 | 201 | A 'required_data' item can be any mapping type, and is considered ready | ||
1950 | 202 | if `bool(item)` evaluates as True. | ||
1951 | 203 | """ | ||
1952 | 204 | service = self.get_service(service_name) | ||
1953 | 205 | reqs = service.get('required_data', []) | ||
1954 | 206 | return all(bool(req) for req in reqs) | ||
1955 | 207 | |||
1956 | 208 | def _load_ready_file(self): | ||
1957 | 209 | if self._ready is not None: | ||
1958 | 210 | return | ||
1959 | 211 | if os.path.exists(self._ready_file): | ||
1960 | 212 | with open(self._ready_file) as fp: | ||
1961 | 213 | self._ready = set(json.load(fp)) | ||
1962 | 214 | else: | ||
1963 | 215 | self._ready = set() | ||
1964 | 216 | |||
1965 | 217 | def _save_ready_file(self): | ||
1966 | 218 | if self._ready is None: | ||
1967 | 219 | return | ||
1968 | 220 | with open(self._ready_file, 'w') as fp: | ||
1969 | 221 | json.dump(list(self._ready), fp) | ||
1970 | 222 | |||
1971 | 223 | def save_ready(self, service_name): | ||
1972 | 224 | """ | ||
1973 | 225 | Save an indicator that the given service is now data_ready. | ||
1974 | 226 | """ | ||
1975 | 227 | self._load_ready_file() | ||
1976 | 228 | self._ready.add(service_name) | ||
1977 | 229 | self._save_ready_file() | ||
1978 | 230 | |||
1979 | 231 | def save_lost(self, service_name): | ||
1980 | 232 | """ | ||
1981 | 233 | Save an indicator that the given service is no longer data_ready. | ||
1982 | 234 | """ | ||
1983 | 235 | self._load_ready_file() | ||
1984 | 236 | self._ready.discard(service_name) | ||
1985 | 237 | self._save_ready_file() | ||
1986 | 238 | |||
1987 | 239 | def was_ready(self, service_name): | ||
1988 | 240 | """ | ||
1989 | 241 | Determine if the given service was previously data_ready. | ||
1990 | 242 | """ | ||
1991 | 243 | self._load_ready_file() | ||
1992 | 244 | return service_name in self._ready | ||
1993 | 245 | |||
1994 | 246 | |||
1995 | 247 | class ManagerCallback(object): | ||
1996 | 248 | """ | ||
1997 | 249 | Special case of a callback that takes the `ServiceManager` instance | ||
1998 | 250 | in addition to the service name. | ||
1999 | 251 | |||
2000 | 252 | Subclasses should implement `__call__` which should accept three parameters: | ||
2001 | 253 | |||
2002 | 254 | * `manager` The `ServiceManager` instance | ||
2003 | 255 | * `service_name` The name of the service it's being triggered for | ||
2004 | 256 | * `event_name` The name of the event that this callback is handling | ||
2005 | 257 | """ | ||
2006 | 258 | def __call__(self, manager, service_name, event_name): | ||
2007 | 259 | raise NotImplementedError() | ||
2008 | 260 | |||
2009 | 261 | |||
2010 | 262 | class PortManagerCallback(ManagerCallback): | ||
2011 | 263 | """ | ||
2012 | 264 | Callback class that will open or close ports, for use as either | ||
2013 | 265 | a start or stop action. | ||
2014 | 266 | """ | ||
2015 | 267 | def __call__(self, manager, service_name, event_name): | ||
2016 | 268 | service = manager.get_service(service_name) | ||
2017 | 269 | new_ports = service.get('ports', []) | ||
2018 | 270 | port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) | ||
2019 | 271 | if os.path.exists(port_file): | ||
2020 | 272 | with open(port_file) as fp: | ||
2021 | 273 | old_ports = fp.read().split(',') | ||
2022 | 274 | for old_port in old_ports: | ||
2023 | 275 | if bool(old_port): | ||
2024 | 276 | old_port = int(old_port) | ||
2025 | 277 | if old_port not in new_ports: | ||
2026 | 278 | hookenv.close_port(old_port) | ||
2027 | 279 | with open(port_file, 'w') as fp: | ||
2028 | 280 | fp.write(','.join(str(port) for port in new_ports)) | ||
2029 | 281 | for port in new_ports: | ||
2030 | 282 | if event_name == 'start': | ||
2031 | 283 | hookenv.open_port(port) | ||
2032 | 284 | elif event_name == 'stop': | ||
2033 | 285 | hookenv.close_port(port) | ||
2034 | 286 | |||
2035 | 287 | |||
2036 | 288 | def service_stop(service_name): | ||
2037 | 289 | """ | ||
2038 | 290 | Wrapper around host.service_stop to prevent spurious "unknown service" | ||
2039 | 291 | messages in the logs. | ||
2040 | 292 | """ | ||
2041 | 293 | if host.service_running(service_name): | ||
2042 | 294 | host.service_stop(service_name) | ||
2043 | 295 | |||
2044 | 296 | |||
2045 | 297 | def service_restart(service_name): | ||
2046 | 298 | """ | ||
2047 | 299 | Wrapper around host.service_restart to prevent spurious "unknown service" | ||
2048 | 300 | messages in the logs. | ||
2049 | 301 | """ | ||
2050 | 302 | if host.service_available(service_name): | ||
2051 | 303 | if host.service_running(service_name): | ||
2052 | 304 | host.service_restart(service_name) | ||
2053 | 305 | else: | ||
2054 | 306 | host.service_start(service_name) | ||
2055 | 307 | |||
2056 | 308 | |||
2057 | 309 | # Convenience aliases | ||
2058 | 310 | open_ports = close_ports = manage_ports = PortManagerCallback() | ||
2059 | 0 | 311 | ||
2060 | === added file 'hooks/charmhelpers/core/services/helpers.py' | |||
2061 | --- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000 | |||
2062 | +++ hooks/charmhelpers/core/services/helpers.py 2014-09-16 09:08:32 +0000 | |||
2063 | @@ -0,0 +1,125 @@ | |||
2064 | 1 | from charmhelpers.core import hookenv | ||
2065 | 2 | from charmhelpers.core import templating | ||
2066 | 3 | |||
2067 | 4 | from charmhelpers.core.services.base import ManagerCallback | ||
2068 | 5 | |||
2069 | 6 | |||
2070 | 7 | __all__ = ['RelationContext', 'TemplateCallback', | ||
2071 | 8 | 'render_template', 'template'] | ||
2072 | 9 | |||
2073 | 10 | |||
2074 | 11 | class RelationContext(dict): | ||
2075 | 12 | """ | ||
2076 | 13 | Base class for a context generator that gets relation data from juju. | ||
2077 | 14 | |||
2078 | 15 | Subclasses must provide the attributes `name`, which is the name of the | ||
2079 | 16 | interface of interest, `interface`, which is the type of the interface of | ||
2080 | 17 | interest, and `required_keys`, which is the set of keys required for the | ||
2081 | 18 | relation to be considered complete. The data for all interfaces matching | ||
2082 | 19 | the `name` attribute that are complete will used to populate the dictionary | ||
2083 | 20 | values (see `get_data`, below). | ||
2084 | 21 | |||
2085 | 22 | The generated context will be namespaced under the interface type, to prevent | ||
2086 | 23 | potential naming conflicts. | ||
2087 | 24 | """ | ||
2088 | 25 | name = None | ||
2089 | 26 | interface = None | ||
2090 | 27 | required_keys = [] | ||
2091 | 28 | |||
2092 | 29 | def __init__(self, *args, **kwargs): | ||
2093 | 30 | super(RelationContext, self).__init__(*args, **kwargs) | ||
2094 | 31 | self.get_data() | ||
2095 | 32 | |||
2096 | 33 | def __bool__(self): | ||
2097 | 34 | """ | ||
2098 | 35 | Returns True if all of the required_keys are available. | ||
2099 | 36 | """ | ||
2100 | 37 | return self.is_ready() | ||
2101 | 38 | |||
2102 | 39 | __nonzero__ = __bool__ | ||
2103 | 40 | |||
2104 | 41 | def __repr__(self): | ||
2105 | 42 | return super(RelationContext, self).__repr__() | ||
2106 | 43 | |||
2107 | 44 | def is_ready(self): | ||
2108 | 45 | """ | ||
2109 | 46 | Returns True if all of the `required_keys` are available from any units. | ||
2110 | 47 | """ | ||
2111 | 48 | ready = len(self.get(self.name, [])) > 0 | ||
2112 | 49 | if not ready: | ||
2113 | 50 | hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) | ||
2114 | 51 | return ready | ||
2115 | 52 | |||
2116 | 53 | def _is_ready(self, unit_data): | ||
2117 | 54 | """ | ||
2118 | 55 | Helper method that tests a set of relation data and returns True if | ||
2119 | 56 | all of the `required_keys` are present. | ||
2120 | 57 | """ | ||
2121 | 58 | return set(unit_data.keys()).issuperset(set(self.required_keys)) | ||
2122 | 59 | |||
2123 | 60 | def get_data(self): | ||
2124 | 61 | """ | ||
2125 | 62 | Retrieve the relation data for each unit involved in a relation and, | ||
2126 | 63 | if complete, store it in a list under `self[self.name]`. This | ||
2127 | 64 | is automatically called when the RelationContext is instantiated. | ||
2128 | 65 | |||
2129 | 66 | The units are sorted lexographically first by the service ID, then by | ||
2130 | 67 | the unit ID. Thus, if an interface has two other services, 'db:1' | ||
2131 | 68 | and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', | ||
2132 | 69 | and 'db:2' having one unit, 'mediawiki/0', all of which have a complete | ||
2133 | 70 | set of data, the relation data for the units will be stored in the | ||
2134 | 71 | order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. | ||
2135 | 72 | |||
2136 | 73 | If you only care about a single unit on the relation, you can just | ||
2137 | 74 | access it as `{{ interface[0]['key'] }}`. However, if you can at all | ||
2138 | 75 | support multiple units on a relation, you should iterate over the list, | ||
2139 | 76 | like:: | ||
2140 | 77 | |||
2141 | 78 | {% for unit in interface -%} | ||
2142 | 79 | {{ unit['key'] }}{% if not loop.last %},{% endif %} | ||
2143 | 80 | {%- endfor %} | ||
2144 | 81 | |||
2145 | 82 | Note that since all sets of relation data from all related services and | ||
2146 | 83 | units are in a single list, if you need to know which service or unit a | ||
2147 | 84 | set of data came from, you'll need to extend this class to preserve | ||
2148 | 85 | that information. | ||
2149 | 86 | """ | ||
2150 | 87 | if not hookenv.relation_ids(self.name): | ||
2151 | 88 | return | ||
2152 | 89 | |||
2153 | 90 | ns = self.setdefault(self.name, []) | ||
2154 | 91 | for rid in sorted(hookenv.relation_ids(self.name)): | ||
2155 | 92 | for unit in sorted(hookenv.related_units(rid)): | ||
2156 | 93 | reldata = hookenv.relation_get(rid=rid, unit=unit) | ||
2157 | 94 | if self._is_ready(reldata): | ||
2158 | 95 | ns.append(reldata) | ||
2159 | 96 | |||
2160 | 97 | def provide_data(self): | ||
2161 | 98 | """ | ||
2162 | 99 | Return data to be relation_set for this interface. | ||
2163 | 100 | """ | ||
2164 | 101 | return {} | ||
2165 | 102 | |||
2166 | 103 | |||
2167 | 104 | class TemplateCallback(ManagerCallback): | ||
2168 | 105 | """ | ||
2169 | 106 | Callback class that will render a template, for use as a ready action. | ||
2170 | 107 | """ | ||
2171 | 108 | def __init__(self, source, target, owner='root', group='root', perms=0444): | ||
2172 | 109 | self.source = source | ||
2173 | 110 | self.target = target | ||
2174 | 111 | self.owner = owner | ||
2175 | 112 | self.group = group | ||
2176 | 113 | self.perms = perms | ||
2177 | 114 | |||
2178 | 115 | def __call__(self, manager, service_name, event_name): | ||
2179 | 116 | service = manager.get_service(service_name) | ||
2180 | 117 | context = {} | ||
2181 | 118 | for ctx in service.get('required_data', []): | ||
2182 | 119 | context.update(ctx) | ||
2183 | 120 | templating.render(self.source, self.target, context, | ||
2184 | 121 | self.owner, self.group, self.perms) | ||
2185 | 122 | |||
2186 | 123 | |||
2187 | 124 | # Convenience aliases for templates | ||
2188 | 125 | render_template = template = TemplateCallback | ||
2189 | 0 | 126 | ||
2190 | === added file 'hooks/charmhelpers/core/templating.py' | |||
2191 | --- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000 | |||
2192 | +++ hooks/charmhelpers/core/templating.py 2014-09-16 09:08:32 +0000 | |||
2193 | @@ -0,0 +1,51 @@ | |||
2194 | 1 | import os | ||
2195 | 2 | |||
2196 | 3 | from charmhelpers.core import host | ||
2197 | 4 | from charmhelpers.core import hookenv | ||
2198 | 5 | |||
2199 | 6 | |||
2200 | 7 | def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): | ||
2201 | 8 | """ | ||
2202 | 9 | Render a template. | ||
2203 | 10 | |||
2204 | 11 | The `source` path, if not absolute, is relative to the `templates_dir`. | ||
2205 | 12 | |||
2206 | 13 | The `target` path should be absolute. | ||
2207 | 14 | |||
2208 | 15 | The context should be a dict containing the values to be replaced in the | ||
2209 | 16 | template. | ||
2210 | 17 | |||
2211 | 18 | The `owner`, `group`, and `perms` options will be passed to `write_file`. | ||
2212 | 19 | |||
2213 | 20 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. | ||
2214 | 21 | |||
2215 | 22 | Note: Using this requires python-jinja2; if it is not installed, calling | ||
2216 | 23 | this will attempt to use charmhelpers.fetch.apt_install to install it. | ||
2217 | 24 | """ | ||
2218 | 25 | try: | ||
2219 | 26 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
2220 | 27 | except ImportError: | ||
2221 | 28 | try: | ||
2222 | 29 | from charmhelpers.fetch import apt_install | ||
2223 | 30 | except ImportError: | ||
2224 | 31 | hookenv.log('Could not import jinja2, and could not import ' | ||
2225 | 32 | 'charmhelpers.fetch to install it', | ||
2226 | 33 | level=hookenv.ERROR) | ||
2227 | 34 | raise | ||
2228 | 35 | apt_install('python-jinja2', fatal=True) | ||
2229 | 36 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
2230 | 37 | |||
2231 | 38 | if templates_dir is None: | ||
2232 | 39 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') | ||
2233 | 40 | loader = Environment(loader=FileSystemLoader(templates_dir)) | ||
2234 | 41 | try: | ||
2235 | 42 | source = source | ||
2236 | 43 | template = loader.get_template(source) | ||
2237 | 44 | except exceptions.TemplateNotFound as e: | ||
2238 | 45 | hookenv.log('Could not load template %s from %s.' % | ||
2239 | 46 | (source, templates_dir), | ||
2240 | 47 | level=hookenv.ERROR) | ||
2241 | 48 | raise e | ||
2242 | 49 | content = template.render(context) | ||
2243 | 50 | host.mkdir(os.path.dirname(target)) | ||
2244 | 51 | host.write_file(target, content, owner, group, perms) | ||
2245 | 0 | 52 | ||
2246 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
2247 | --- hooks/charmhelpers/fetch/__init__.py 2014-08-27 07:14:03 +0000 | |||
2248 | +++ hooks/charmhelpers/fetch/__init__.py 2014-09-16 09:08:32 +0000 | |||
2249 | @@ -1,4 +1,5 @@ | |||
2250 | 1 | import importlib | 1 | import importlib |
2251 | 2 | from tempfile import NamedTemporaryFile | ||
2252 | 2 | import time | 3 | import time |
2253 | 3 | from yaml import safe_load | 4 | from yaml import safe_load |
2254 | 4 | from charmhelpers.core.host import ( | 5 | from charmhelpers.core.host import ( |
2255 | @@ -13,7 +14,6 @@ | |||
2256 | 13 | config, | 14 | config, |
2257 | 14 | log, | 15 | log, |
2258 | 15 | ) | 16 | ) |
2259 | 16 | import apt_pkg | ||
2260 | 17 | import os | 17 | import os |
2261 | 18 | 18 | ||
2262 | 19 | 19 | ||
2263 | @@ -56,6 +56,15 @@ | |||
2264 | 56 | 'icehouse/proposed': 'precise-proposed/icehouse', | 56 | 'icehouse/proposed': 'precise-proposed/icehouse', |
2265 | 57 | 'precise-icehouse/proposed': 'precise-proposed/icehouse', | 57 | 'precise-icehouse/proposed': 'precise-proposed/icehouse', |
2266 | 58 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', | 58 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', |
2267 | 59 | # Juno | ||
2268 | 60 | 'juno': 'trusty-updates/juno', | ||
2269 | 61 | 'trusty-juno': 'trusty-updates/juno', | ||
2270 | 62 | 'trusty-juno/updates': 'trusty-updates/juno', | ||
2271 | 63 | 'trusty-updates/juno': 'trusty-updates/juno', | ||
2272 | 64 | 'juno/proposed': 'trusty-proposed/juno', | ||
2273 | 65 | 'juno/proposed': 'trusty-proposed/juno', | ||
2274 | 66 | 'trusty-juno/proposed': 'trusty-proposed/juno', | ||
2275 | 67 | 'trusty-proposed/juno': 'trusty-proposed/juno', | ||
2276 | 59 | } | 68 | } |
2277 | 60 | 69 | ||
2278 | 61 | # The order of this list is very important. Handlers should be listed in from | 70 | # The order of this list is very important. Handlers should be listed in from |
2279 | @@ -108,8 +117,12 @@ | |||
2280 | 108 | 117 | ||
2281 | 109 | def filter_installed_packages(packages): | 118 | def filter_installed_packages(packages): |
2282 | 110 | """Returns a list of packages that require installation""" | 119 | """Returns a list of packages that require installation""" |
2283 | 120 | <<<<<<< TREE | ||
2284 | 111 | 121 | ||
2285 | 112 | cache = apt_cache() | 122 | cache = apt_cache() |
2286 | 123 | ======= | ||
2287 | 124 | cache = apt_cache() | ||
2288 | 125 | >>>>>>> MERGE-SOURCE | ||
2289 | 113 | _pkgs = [] | 126 | _pkgs = [] |
2290 | 114 | for package in packages: | 127 | for package in packages: |
2291 | 115 | try: | 128 | try: |
2292 | @@ -122,15 +135,28 @@ | |||
2293 | 122 | return _pkgs | 135 | return _pkgs |
2294 | 123 | 136 | ||
2295 | 124 | 137 | ||
2305 | 125 | def apt_cache(in_memory=True): | 138 | <<<<<<< TREE |
2306 | 126 | """Build and return an apt cache""" | 139 | def apt_cache(in_memory=True): |
2307 | 127 | apt_pkg.init() | 140 | """Build and return an apt cache""" |
2308 | 128 | if in_memory: | 141 | apt_pkg.init() |
2309 | 129 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | 142 | if in_memory: |
2310 | 130 | apt_pkg.config.set("Dir::Cache::srcpkgcache", "") | 143 | apt_pkg.config.set("Dir::Cache::pkgcache", "") |
2311 | 131 | return apt_pkg.Cache() | 144 | apt_pkg.config.set("Dir::Cache::srcpkgcache", "") |
2312 | 132 | 145 | return apt_pkg.Cache() | |
2313 | 133 | 146 | ||
2314 | 147 | |||
2315 | 148 | ======= | ||
2316 | 149 | def apt_cache(in_memory=True): | ||
2317 | 150 | """Build and return an apt cache""" | ||
2318 | 151 | import apt_pkg | ||
2319 | 152 | apt_pkg.init() | ||
2320 | 153 | if in_memory: | ||
2321 | 154 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | ||
2322 | 155 | apt_pkg.config.set("Dir::Cache::srcpkgcache", "") | ||
2323 | 156 | return apt_pkg.Cache() | ||
2324 | 157 | |||
2325 | 158 | |||
2326 | 159 | >>>>>>> MERGE-SOURCE | ||
2327 | 134 | def apt_install(packages, options=None, fatal=False): | 160 | def apt_install(packages, options=None, fatal=False): |
2328 | 135 | """Install one or more packages""" | 161 | """Install one or more packages""" |
2329 | 136 | if options is None: | 162 | if options is None: |
2330 | @@ -196,6 +222,27 @@ | |||
2331 | 196 | 222 | ||
2332 | 197 | 223 | ||
2333 | 198 | def add_source(source, key=None): | 224 | def add_source(source, key=None): |
2334 | 225 | """Add a package source to this system. | ||
2335 | 226 | |||
2336 | 227 | @param source: a URL or sources.list entry, as supported by | ||
2337 | 228 | add-apt-repository(1). Examples: | ||
2338 | 229 | ppa:charmers/example | ||
2339 | 230 | deb https://stub:key@private.example.com/ubuntu trusty main | ||
2340 | 231 | |||
2341 | 232 | In addition: | ||
2342 | 233 | 'proposed:' may be used to enable the standard 'proposed' | ||
2343 | 234 | pocket for the release. | ||
2344 | 235 | 'cloud:' may be used to activate official cloud archive pockets, | ||
2345 | 236 | such as 'cloud:icehouse' | ||
2346 | 237 | |||
2347 | 238 | @param key: A key to be added to the system's APT keyring and used | ||
2348 | 239 | to verify the signatures on packages. Ideally, this should be an | ||
2349 | 240 | ASCII format GPG public key including the block headers. A GPG key | ||
2350 | 241 | id may also be used, but be aware that only insecure protocols are | ||
2351 | 242 | available to retrieve the actual public key from a public keyserver | ||
2352 | 243 | placing your Juju environment at risk. ppa and cloud archive keys | ||
2353 | 244 | are securely added automtically, so sould not be provided. | ||
2354 | 245 | """ | ||
2355 | 199 | if source is None: | 246 | if source is None: |
2356 | 200 | log('Source is not present. Skipping') | 247 | log('Source is not present. Skipping') |
2357 | 201 | return | 248 | return |
2358 | @@ -220,41 +267,63 @@ | |||
2359 | 220 | release = lsb_release()['DISTRIB_CODENAME'] | 267 | release = lsb_release()['DISTRIB_CODENAME'] |
2360 | 221 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: | 268 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: |
2361 | 222 | apt.write(PROPOSED_POCKET.format(release)) | 269 | apt.write(PROPOSED_POCKET.format(release)) |
2362 | 270 | else: | ||
2363 | 271 | raise SourceConfigError("Unknown source: {!r}".format(source)) | ||
2364 | 272 | |||
2365 | 223 | if key: | 273 | if key: |
2369 | 224 | subprocess.check_call(['apt-key', 'adv', '--keyserver', | 274 | if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: |
2370 | 225 | 'hkp://keyserver.ubuntu.com:80', '--recv', | 275 | with NamedTemporaryFile() as key_file: |
2371 | 226 | key]) | 276 | key_file.write(key) |
2372 | 277 | key_file.flush() | ||
2373 | 278 | key_file.seek(0) | ||
2374 | 279 | subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) | ||
2375 | 280 | else: | ||
2376 | 281 | # Note that hkp: is in no way a secure protocol. Using a | ||
2377 | 282 | # GPG key id is pointless from a security POV unless you | ||
2378 | 283 | # absolutely trust your network and DNS. | ||
2379 | 284 | subprocess.check_call(['apt-key', 'adv', '--keyserver', | ||
2380 | 285 | 'hkp://keyserver.ubuntu.com:80', '--recv', | ||
2381 | 286 | key]) | ||
2382 | 227 | 287 | ||
2383 | 228 | 288 | ||
2384 | 229 | def configure_sources(update=False, | 289 | def configure_sources(update=False, |
2385 | 230 | sources_var='install_sources', | 290 | sources_var='install_sources', |
2386 | 231 | keys_var='install_keys'): | 291 | keys_var='install_keys'): |
2387 | 232 | """ | 292 | """ |
2389 | 233 | Configure multiple sources from charm configuration | 293 | Configure multiple sources from charm configuration. |
2390 | 294 | |||
2391 | 295 | The lists are encoded as yaml fragments in the configuration. | ||
2392 | 296 | The frament needs to be included as a string. Sources and their | ||
2393 | 297 | corresponding keys are of the types supported by add_source(). | ||
2394 | 234 | 298 | ||
2395 | 235 | Example config: | 299 | Example config: |
2397 | 236 | install_sources: | 300 | install_sources: | |
2398 | 237 | - "ppa:foo" | 301 | - "ppa:foo" |
2399 | 238 | - "http://example.com/repo precise main" | 302 | - "http://example.com/repo precise main" |
2401 | 239 | install_keys: | 303 | install_keys: | |
2402 | 240 | - null | 304 | - null |
2403 | 241 | - "a1b2c3d4" | 305 | - "a1b2c3d4" |
2404 | 242 | 306 | ||
2405 | 243 | Note that 'null' (a.k.a. None) should not be quoted. | 307 | Note that 'null' (a.k.a. None) should not be quoted. |
2406 | 244 | """ | 308 | """ |
2414 | 245 | sources = safe_load(config(sources_var)) | 309 | sources = safe_load((config(sources_var) or '').strip()) or [] |
2415 | 246 | keys = config(keys_var) | 310 | keys = safe_load((config(keys_var) or '').strip()) or None |
2416 | 247 | if keys is not None: | 311 | |
2417 | 248 | keys = safe_load(keys) | 312 | if isinstance(sources, basestring): |
2418 | 249 | if isinstance(sources, basestring) and ( | 313 | sources = [sources] |
2419 | 250 | keys is None or isinstance(keys, basestring)): | 314 | |
2420 | 251 | add_source(sources, keys) | 315 | if keys is None: |
2421 | 316 | for source in sources: | ||
2422 | 317 | add_source(source, None) | ||
2423 | 252 | else: | 318 | else: |
2429 | 253 | if not len(sources) == len(keys): | 319 | if isinstance(keys, basestring): |
2430 | 254 | msg = 'Install sources and keys lists are different lengths' | 320 | keys = [keys] |
2431 | 255 | raise SourceConfigError(msg) | 321 | |
2432 | 256 | for src_num in range(len(sources)): | 322 | if len(sources) != len(keys): |
2433 | 257 | add_source(sources[src_num], keys[src_num]) | 323 | raise SourceConfigError( |
2434 | 324 | 'Install sources and keys lists are different lengths') | ||
2435 | 325 | for source, key in zip(sources, keys): | ||
2436 | 326 | add_source(source, key) | ||
2437 | 258 | if update: | 327 | if update: |
2438 | 259 | apt_update(fatal=True) | 328 | apt_update(fatal=True) |
2439 | 260 | 329 | ||
2440 | 261 | 330 | ||
2441 | === added symlink 'hooks/neutron-api-relation-broken' | |||
2442 | === target is u'nova_cc_hooks.py' | |||
2443 | === added symlink 'hooks/neutron-api-relation-changed' | |||
2444 | === target is u'nova_cc_hooks.py' | |||
2445 | === added symlink 'hooks/neutron-api-relation-departed' | |||
2446 | === target is u'nova_cc_hooks.py' | |||
2447 | === added symlink 'hooks/neutron-api-relation-joined' | |||
2448 | === target is u'nova_cc_hooks.py' | |||
2449 | === modified file 'hooks/nova_cc_context.py' | |||
2450 | --- hooks/nova_cc_context.py 2014-06-17 10:01:21 +0000 | |||
2451 | +++ hooks/nova_cc_context.py 2014-09-16 09:08:32 +0000 | |||
2452 | @@ -1,7 +1,6 @@ | |||
2453 | 1 | |||
2454 | 2 | from charmhelpers.core.hookenv import ( | 1 | from charmhelpers.core.hookenv import ( |
2455 | 3 | config, relation_ids, relation_set, log, ERROR, | 2 | config, relation_ids, relation_set, log, ERROR, |
2457 | 4 | unit_get) | 3 | unit_get, related_units, relation_get) |
2458 | 5 | 4 | ||
2459 | 6 | from charmhelpers.fetch import apt_install, filter_installed_packages | 5 | from charmhelpers.fetch import apt_install, filter_installed_packages |
2460 | 7 | from charmhelpers.contrib.openstack import context, neutron, utils | 6 | from charmhelpers.contrib.openstack import context, neutron, utils |
2461 | @@ -14,6 +13,17 @@ | |||
2462 | 14 | ) | 13 | ) |
2463 | 15 | 14 | ||
2464 | 16 | 15 | ||
2465 | 16 | def context_complete(ctxt): | ||
2466 | 17 | _missing = [] | ||
2467 | 18 | for k, v in ctxt.iteritems(): | ||
2468 | 19 | if v is None or v == '': | ||
2469 | 20 | _missing.append(k) | ||
2470 | 21 | if _missing: | ||
2471 | 22 | log('Missing required data: %s' % ' '.join(_missing), level='INFO') | ||
2472 | 23 | return False | ||
2473 | 24 | return True | ||
2474 | 25 | |||
2475 | 26 | |||
2476 | 17 | class ApacheSSLContext(context.ApacheSSLContext): | 27 | class ApacheSSLContext(context.ApacheSSLContext): |
2477 | 18 | 28 | ||
2478 | 19 | interfaces = ['https'] | 29 | interfaces = ['https'] |
2479 | @@ -27,6 +37,26 @@ | |||
2480 | 27 | return super(ApacheSSLContext, self).__call__() | 37 | return super(ApacheSSLContext, self).__call__() |
2481 | 28 | 38 | ||
2482 | 29 | 39 | ||
2483 | 40 | class NeutronAPIContext(context.OSContextGenerator): | ||
2484 | 41 | |||
2485 | 42 | def __call__(self): | ||
2486 | 43 | log('Generating template context from neutron api relation') | ||
2487 | 44 | ctxt = {} | ||
2488 | 45 | for rid in relation_ids('neutron-api'): | ||
2489 | 46 | for unit in related_units(rid): | ||
2490 | 47 | rdata = relation_get(rid=rid, unit=unit) | ||
2491 | 48 | ctxt = { | ||
2492 | 49 | 'neutron_url': rdata.get('neutron-url'), | ||
2493 | 50 | 'neutron_plugin': rdata.get('neutron-plugin'), | ||
2494 | 51 | 'neutron_security_groups': | ||
2495 | 52 | rdata.get('neutron-security-groups'), | ||
2496 | 53 | 'network_manager': 'neutron', | ||
2497 | 54 | } | ||
2498 | 55 | if context_complete(ctxt): | ||
2499 | 56 | return ctxt | ||
2500 | 57 | return {} | ||
2501 | 58 | |||
2502 | 59 | |||
2503 | 30 | class VolumeServiceContext(context.OSContextGenerator): | 60 | class VolumeServiceContext(context.OSContextGenerator): |
2504 | 31 | interfaces = [] | 61 | interfaces = [] |
2505 | 32 | 62 | ||
2506 | @@ -204,3 +234,22 @@ | |||
2507 | 204 | def __init__(self): | 234 | def __init__(self): |
2508 | 205 | super(NeutronPostgresqlDBContext, | 235 | super(NeutronPostgresqlDBContext, |
2509 | 206 | self).__init__(config('neutron-database')) | 236 | self).__init__(config('neutron-database')) |
2510 | 237 | |||
2511 | 238 | |||
2512 | 239 | class WorkerConfigContext(context.OSContextGenerator): | ||
2513 | 240 | |||
2514 | 241 | def __call__(self): | ||
2515 | 242 | import psutil | ||
2516 | 243 | multiplier = config('worker-multiplier') or 1 | ||
2517 | 244 | ctxt = { | ||
2518 | 245 | "workers": psutil.NUM_CPUS * multiplier | ||
2519 | 246 | } | ||
2520 | 247 | return ctxt | ||
2521 | 248 | |||
2522 | 249 | |||
2523 | 250 | class NovaConfigContext(WorkerConfigContext): | ||
2524 | 251 | def __call__(self): | ||
2525 | 252 | ctxt = super(NovaConfigContext, self).__call__() | ||
2526 | 253 | ctxt['cpu_allocation_ratio'] = config('cpu-allocation-ratio') | ||
2527 | 254 | ctxt['ram_allocation_ratio'] = config('ram-allocation-ratio') | ||
2528 | 255 | return ctxt | ||
2529 | 207 | 256 | ||
2530 | === modified file 'hooks/nova_cc_hooks.py' | |||
2531 | --- hooks/nova_cc_hooks.py 2014-08-01 11:13:57 +0000 | |||
2532 | +++ hooks/nova_cc_hooks.py 2014-09-16 09:08:32 +0000 | |||
2533 | @@ -15,20 +15,25 @@ | |||
2534 | 15 | charm_dir, | 15 | charm_dir, |
2535 | 16 | is_relation_made, | 16 | is_relation_made, |
2536 | 17 | log, | 17 | log, |
2537 | 18 | local_unit, | ||
2538 | 18 | ERROR, | 19 | ERROR, |
2539 | 19 | relation_get, | 20 | relation_get, |
2540 | 20 | relation_ids, | 21 | relation_ids, |
2541 | 21 | relation_set, | 22 | relation_set, |
2542 | 23 | related_units, | ||
2543 | 22 | open_port, | 24 | open_port, |
2544 | 23 | unit_get, | 25 | unit_get, |
2545 | 24 | ) | 26 | ) |
2546 | 25 | 27 | ||
2547 | 26 | from charmhelpers.core.host import ( | 28 | from charmhelpers.core.host import ( |
2549 | 27 | restart_on_change | 29 | restart_on_change, |
2550 | 30 | service_running, | ||
2551 | 31 | service_stop, | ||
2552 | 28 | ) | 32 | ) |
2553 | 29 | 33 | ||
2554 | 30 | from charmhelpers.fetch import ( | 34 | from charmhelpers.fetch import ( |
2556 | 31 | apt_install, apt_update | 35 | apt_install, apt_update, |
2557 | 36 | filter_installed_packages | ||
2558 | 32 | ) | 37 | ) |
2559 | 33 | 38 | ||
2560 | 34 | from charmhelpers.contrib.openstack.utils import ( | 39 | from charmhelpers.contrib.openstack.utils import ( |
2561 | @@ -41,21 +46,33 @@ | |||
2562 | 41 | neutron_plugin_attribute, | 46 | neutron_plugin_attribute, |
2563 | 42 | ) | 47 | ) |
2564 | 43 | 48 | ||
2565 | 49 | from nova_cc_context import ( | ||
2566 | 50 | NeutronAPIContext | ||
2567 | 51 | ) | ||
2568 | 52 | |||
2569 | 53 | from charmhelpers.contrib.peerstorage import ( | ||
2570 | 54 | peer_retrieve, | ||
2571 | 55 | peer_echo, | ||
2572 | 56 | ) | ||
2573 | 57 | |||
2574 | 44 | from nova_cc_utils import ( | 58 | from nova_cc_utils import ( |
2575 | 45 | api_port, | 59 | api_port, |
2576 | 46 | auth_token_config, | 60 | auth_token_config, |
2577 | 61 | cmd_all_services, | ||
2578 | 47 | determine_endpoints, | 62 | determine_endpoints, |
2579 | 48 | determine_packages, | 63 | determine_packages, |
2580 | 49 | determine_ports, | 64 | determine_ports, |
2581 | 65 | disable_services, | ||
2582 | 50 | do_openstack_upgrade, | 66 | do_openstack_upgrade, |
2583 | 67 | enable_services, | ||
2584 | 51 | keystone_ca_cert_b64, | 68 | keystone_ca_cert_b64, |
2585 | 52 | migrate_database, | 69 | migrate_database, |
2586 | 53 | neutron_plugin, | 70 | neutron_plugin, |
2587 | 54 | save_script_rc, | 71 | save_script_rc, |
2588 | 55 | ssh_compute_add, | 72 | ssh_compute_add, |
2589 | 56 | ssh_compute_remove, | 73 | ssh_compute_remove, |
2592 | 57 | ssh_known_hosts_b64, | 74 | ssh_known_hosts_lines, |
2593 | 58 | ssh_authorized_keys_b64, | 75 | ssh_authorized_keys_lines, |
2594 | 59 | register_configs, | 76 | register_configs, |
2595 | 60 | restart_map, | 77 | restart_map, |
2596 | 61 | volume_service, | 78 | volume_service, |
2597 | @@ -63,13 +80,19 @@ | |||
2598 | 63 | NOVA_CONF, | 80 | NOVA_CONF, |
2599 | 64 | QUANTUM_CONF, | 81 | QUANTUM_CONF, |
2600 | 65 | NEUTRON_CONF, | 82 | NEUTRON_CONF, |
2604 | 66 | QUANTUM_API_PASTE, | 83 | <<<<<<< TREE |
2605 | 67 | service_guard, | 84 | QUANTUM_API_PASTE, |
2606 | 68 | guard_map, | 85 | service_guard, |
2607 | 86 | guard_map, | ||
2608 | 87 | ======= | ||
2609 | 88 | QUANTUM_API_PASTE, | ||
2610 | 89 | console_attributes, | ||
2611 | 90 | service_guard, | ||
2612 | 91 | guard_map, | ||
2613 | 92 | >>>>>>> MERGE-SOURCE | ||
2614 | 69 | ) | 93 | ) |
2615 | 70 | 94 | ||
2616 | 71 | from charmhelpers.contrib.hahelpers.cluster import ( | 95 | from charmhelpers.contrib.hahelpers.cluster import ( |
2617 | 72 | canonical_url, | ||
2618 | 73 | eligible_leader, | 96 | eligible_leader, |
2619 | 74 | get_hacluster_config, | 97 | get_hacluster_config, |
2620 | 75 | is_leader, | 98 | is_leader, |
2621 | @@ -77,6 +100,16 @@ | |||
2622 | 77 | 100 | ||
2623 | 78 | from charmhelpers.payload.execd import execd_preinstall | 101 | from charmhelpers.payload.execd import execd_preinstall |
2624 | 79 | 102 | ||
2625 | 103 | from charmhelpers.contrib.openstack.ip import ( | ||
2626 | 104 | canonical_url, | ||
2627 | 105 | PUBLIC, INTERNAL, ADMIN | ||
2628 | 106 | ) | ||
2629 | 107 | |||
2630 | 108 | from charmhelpers.contrib.network.ip import ( | ||
2631 | 109 | get_iface_for_address, | ||
2632 | 110 | get_netmask_for_address | ||
2633 | 111 | ) | ||
2634 | 112 | |||
2635 | 80 | hooks = Hooks() | 113 | hooks = Hooks() |
2636 | 81 | CONFIGS = register_configs() | 114 | CONFIGS = register_configs() |
2637 | 82 | 115 | ||
2638 | @@ -95,6 +128,9 @@ | |||
2639 | 95 | log('Installing %s to /usr/bin' % f) | 128 | log('Installing %s to /usr/bin' % f) |
2640 | 96 | shutil.copy2(f, '/usr/bin') | 129 | shutil.copy2(f, '/usr/bin') |
2641 | 97 | [open_port(port) for port in determine_ports()] | 130 | [open_port(port) for port in determine_ports()] |
2642 | 131 | log('Disabling services into db relation joined') | ||
2643 | 132 | disable_services() | ||
2644 | 133 | cmd_all_services('stop') | ||
2645 | 98 | 134 | ||
2646 | 99 | 135 | ||
2647 | 100 | @hooks.hook('config-changed') | 136 | @hooks.hook('config-changed') |
2648 | @@ -108,6 +144,13 @@ | |||
2649 | 108 | save_script_rc() | 144 | save_script_rc() |
2650 | 109 | configure_https() | 145 | configure_https() |
2651 | 110 | CONFIGS.write_all() | 146 | CONFIGS.write_all() |
2652 | 147 | if console_attributes('protocol'): | ||
2653 | 148 | apt_update() | ||
2654 | 149 | apt_install(console_attributes('packages'), fatal=True) | ||
2655 | 150 | [compute_joined(rid=rid) | ||
2656 | 151 | for rid in relation_ids('cloud-compute')] | ||
2657 | 152 | for r_id in relation_ids('identity-service'): | ||
2658 | 153 | identity_joined(rid=r_id) | ||
2659 | 111 | 154 | ||
2660 | 112 | 155 | ||
2661 | 113 | @hooks.hook('amqp-relation-joined') | 156 | @hooks.hook('amqp-relation-joined') |
2662 | @@ -126,10 +169,11 @@ | |||
2663 | 126 | log('amqp relation incomplete. Peer not ready?') | 169 | log('amqp relation incomplete. Peer not ready?') |
2664 | 127 | return | 170 | return |
2665 | 128 | CONFIGS.write(NOVA_CONF) | 171 | CONFIGS.write(NOVA_CONF) |
2670 | 129 | if network_manager() == 'quantum': | 172 | if not is_relation_made('neutron-api'): |
2671 | 130 | CONFIGS.write(QUANTUM_CONF) | 173 | if network_manager() == 'quantum': |
2672 | 131 | if network_manager() == 'neutron': | 174 | CONFIGS.write(QUANTUM_CONF) |
2673 | 132 | CONFIGS.write(NEUTRON_CONF) | 175 | if network_manager() == 'neutron': |
2674 | 176 | CONFIGS.write(NEUTRON_CONF) | ||
2675 | 133 | 177 | ||
2676 | 134 | 178 | ||
2677 | 135 | @hooks.hook('shared-db-relation-joined') | 179 | @hooks.hook('shared-db-relation-joined') |
2678 | @@ -187,6 +231,13 @@ | |||
2679 | 187 | CONFIGS.write_all() | 231 | CONFIGS.write_all() |
2680 | 188 | 232 | ||
2681 | 189 | if eligible_leader(CLUSTER_RES): | 233 | if eligible_leader(CLUSTER_RES): |
2682 | 234 | # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units | ||
2683 | 235 | # acl entry has been added. So, if the db supports passing a list of | ||
2684 | 236 | # permitted units then check if we're in the list. | ||
2685 | 237 | allowed_units = relation_get('nova_allowed_units') | ||
2686 | 238 | if allowed_units and local_unit() not in allowed_units.split(): | ||
2687 | 239 | log('Allowed_units list provided and this unit not present') | ||
2688 | 240 | return | ||
2689 | 190 | migrate_database() | 241 | migrate_database() |
2690 | 191 | log('Triggering remote cloud-compute restarts.') | 242 | log('Triggering remote cloud-compute restarts.') |
2691 | 192 | [compute_joined(rid=rid, remote_restart=True) | 243 | [compute_joined(rid=rid, remote_restart=True) |
2692 | @@ -237,8 +288,12 @@ | |||
2693 | 237 | def identity_joined(rid=None): | 288 | def identity_joined(rid=None): |
2694 | 238 | if not eligible_leader(CLUSTER_RES): | 289 | if not eligible_leader(CLUSTER_RES): |
2695 | 239 | return | 290 | return |
2698 | 240 | base_url = canonical_url(CONFIGS) | 291 | public_url = canonical_url(CONFIGS, PUBLIC) |
2699 | 241 | relation_set(relation_id=rid, **determine_endpoints(base_url)) | 292 | internal_url = canonical_url(CONFIGS, INTERNAL) |
2700 | 293 | admin_url = canonical_url(CONFIGS, ADMIN) | ||
2701 | 294 | relation_set(relation_id=rid, **determine_endpoints(public_url, | ||
2702 | 295 | internal_url, | ||
2703 | 296 | admin_url)) | ||
2704 | 242 | 297 | ||
2705 | 243 | 298 | ||
2706 | 244 | @hooks.hook('identity-service-relation-changed') | 299 | @hooks.hook('identity-service-relation-changed') |
2707 | @@ -251,15 +306,17 @@ | |||
2708 | 251 | return | 306 | return |
2709 | 252 | CONFIGS.write('/etc/nova/api-paste.ini') | 307 | CONFIGS.write('/etc/nova/api-paste.ini') |
2710 | 253 | CONFIGS.write(NOVA_CONF) | 308 | CONFIGS.write(NOVA_CONF) |
2717 | 254 | if network_manager() == 'quantum': | 309 | if not is_relation_made('neutron-api'): |
2718 | 255 | CONFIGS.write(QUANTUM_API_PASTE) | 310 | if network_manager() == 'quantum': |
2719 | 256 | CONFIGS.write(QUANTUM_CONF) | 311 | CONFIGS.write(QUANTUM_API_PASTE) |
2720 | 257 | save_novarc() | 312 | CONFIGS.write(QUANTUM_CONF) |
2721 | 258 | if network_manager() == 'neutron': | 313 | save_novarc() |
2722 | 259 | CONFIGS.write(NEUTRON_CONF) | 314 | if network_manager() == 'neutron': |
2723 | 315 | CONFIGS.write(NEUTRON_CONF) | ||
2724 | 260 | [compute_joined(rid) for rid in relation_ids('cloud-compute')] | 316 | [compute_joined(rid) for rid in relation_ids('cloud-compute')] |
2725 | 261 | [quantum_joined(rid) for rid in relation_ids('quantum-network-service')] | 317 | [quantum_joined(rid) for rid in relation_ids('quantum-network-service')] |
2726 | 262 | [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')] | 318 | [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')] |
2727 | 319 | [neutron_api_relation_joined(rid) for rid in relation_ids('neutron-api')] | ||
2728 | 263 | configure_https() | 320 | configure_https() |
2729 | 264 | 321 | ||
2730 | 265 | 322 | ||
2731 | @@ -311,6 +368,33 @@ | |||
2732 | 311 | out.write('export OS_REGION_NAME=%s\n' % config('region')) | 368 | out.write('export OS_REGION_NAME=%s\n' % config('region')) |
2733 | 312 | 369 | ||
2734 | 313 | 370 | ||
2735 | 371 | def neutron_settings(): | ||
2736 | 372 | neutron_settings = {} | ||
2737 | 373 | if is_relation_made('neutron-api', 'neutron-plugin'): | ||
2738 | 374 | neutron_api_info = NeutronAPIContext()() | ||
2739 | 375 | neutron_settings.update({ | ||
2740 | 376 | # XXX: Rename these relations settings? | ||
2741 | 377 | 'quantum_plugin': neutron_api_info['neutron_plugin'], | ||
2742 | 378 | 'region': config('region'), | ||
2743 | 379 | 'quantum_security_groups': | ||
2744 | 380 | neutron_api_info['neutron_security_groups'], | ||
2745 | 381 | 'quantum_url': neutron_api_info['neutron_url'], | ||
2746 | 382 | }) | ||
2747 | 383 | else: | ||
2748 | 384 | neutron_settings.update({ | ||
2749 | 385 | # XXX: Rename these relations settings? | ||
2750 | 386 | 'quantum_plugin': neutron_plugin(), | ||
2751 | 387 | 'region': config('region'), | ||
2752 | 388 | 'quantum_security_groups': config('quantum-security-groups'), | ||
2753 | 389 | 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), | ||
2754 | 390 | str(api_port('neutron-server'))), | ||
2755 | 391 | }) | ||
2756 | 392 | neutron_url = urlparse(neutron_settings['quantum_url']) | ||
2757 | 393 | neutron_settings['quantum_host'] = neutron_url.hostname | ||
2758 | 394 | neutron_settings['quantum_port'] = neutron_url.port | ||
2759 | 395 | return neutron_settings | ||
2760 | 396 | |||
2761 | 397 | |||
2762 | 314 | def keystone_compute_settings(): | 398 | def keystone_compute_settings(): |
2763 | 315 | ks_auth_config = _auth_config() | 399 | ks_auth_config = _auth_config() |
2764 | 316 | rel_settings = {} | 400 | rel_settings = {} |
2765 | @@ -318,25 +402,45 @@ | |||
2766 | 318 | if network_manager() in ['quantum', 'neutron']: | 402 | if network_manager() in ['quantum', 'neutron']: |
2767 | 319 | if ks_auth_config: | 403 | if ks_auth_config: |
2768 | 320 | rel_settings.update(ks_auth_config) | 404 | rel_settings.update(ks_auth_config) |
2779 | 321 | 405 | rel_settings.update(neutron_settings()) | |
2770 | 322 | rel_settings.update({ | ||
2771 | 323 | # XXX: Rename these relations settings? | ||
2772 | 324 | 'quantum_plugin': neutron_plugin(), | ||
2773 | 325 | 'region': config('region'), | ||
2774 | 326 | 'quantum_security_groups': config('quantum-security-groups'), | ||
2775 | 327 | 'quantum_url': (canonical_url(CONFIGS) + ':' + | ||
2776 | 328 | str(api_port('neutron-server'))), | ||
2777 | 329 | }) | ||
2778 | 330 | |||
2780 | 331 | ks_ca = keystone_ca_cert_b64() | 406 | ks_ca = keystone_ca_cert_b64() |
2781 | 332 | if ks_auth_config and ks_ca: | 407 | if ks_auth_config and ks_ca: |
2782 | 333 | rel_settings['ca_cert'] = ks_ca | 408 | rel_settings['ca_cert'] = ks_ca |
2783 | 409 | return rel_settings | ||
2784 | 410 | |||
2785 | 411 | |||
2786 | 412 | def console_settings(): | ||
2787 | 413 | rel_settings = {} | ||
2788 | 414 | proto = console_attributes('protocol') | ||
2789 | 415 | if not proto: | ||
2790 | 416 | return {} | ||
2791 | 417 | rel_settings['console_keymap'] = config('console-keymap') | ||
2792 | 418 | rel_settings['console_access_protocol'] = proto | ||
2793 | 419 | if config('console-proxy-ip') == 'local': | ||
2794 | 420 | proxy_base_addr = canonical_url(CONFIGS, PUBLIC) | ||
2795 | 421 | else: | ||
2796 | 422 | proxy_base_addr = "http://" + config('console-proxy-ip') | ||
2797 | 423 | if proto == 'vnc': | ||
2798 | 424 | protocols = ['novnc', 'xvpvnc'] | ||
2799 | 425 | else: | ||
2800 | 426 | protocols = [proto] | ||
2801 | 427 | for _proto in protocols: | ||
2802 | 428 | rel_settings['console_proxy_%s_address' % (_proto)] = \ | ||
2803 | 429 | "%s:%s%s" % (proxy_base_addr, | ||
2804 | 430 | console_attributes('proxy-port', proto=_proto), | ||
2805 | 431 | console_attributes('proxy-page', proto=_proto)) | ||
2806 | 432 | rel_settings['console_proxy_%s_host' % (_proto)] = \ | ||
2807 | 433 | urlparse(proxy_base_addr).hostname | ||
2808 | 434 | rel_settings['console_proxy_%s_port' % (_proto)] = \ | ||
2809 | 435 | console_attributes('proxy-port', proto=_proto) | ||
2810 | 334 | 436 | ||
2811 | 335 | return rel_settings | 437 | return rel_settings |
2812 | 336 | 438 | ||
2813 | 337 | 439 | ||
2814 | 338 | @hooks.hook('cloud-compute-relation-joined') | 440 | @hooks.hook('cloud-compute-relation-joined') |
2815 | 339 | def compute_joined(rid=None, remote_restart=False): | 441 | def compute_joined(rid=None, remote_restart=False): |
2816 | 442 | cons_settings = console_settings() | ||
2817 | 443 | relation_set(relation_id=rid, **cons_settings) | ||
2818 | 340 | if not eligible_leader(CLUSTER_RES): | 444 | if not eligible_leader(CLUSTER_RES): |
2819 | 341 | return | 445 | return |
2820 | 342 | rel_settings = { | 446 | rel_settings = { |
2821 | @@ -346,7 +450,6 @@ | |||
2822 | 346 | # this may not even be needed. | 450 | # this may not even be needed. |
2823 | 347 | 'ec2_host': unit_get('private-address'), | 451 | 'ec2_host': unit_get('private-address'), |
2824 | 348 | } | 452 | } |
2825 | 349 | |||
2826 | 350 | # update relation setting if we're attempting to restart remote | 453 | # update relation setting if we're attempting to restart remote |
2827 | 351 | # services | 454 | # services |
2828 | 352 | if remote_restart: | 455 | if remote_restart: |
2829 | @@ -357,21 +460,63 @@ | |||
2830 | 357 | 460 | ||
2831 | 358 | 461 | ||
2832 | 359 | @hooks.hook('cloud-compute-relation-changed') | 462 | @hooks.hook('cloud-compute-relation-changed') |
2837 | 360 | def compute_changed(): | 463 | def compute_changed(rid=None, unit=None): |
2838 | 361 | migration_auth = relation_get('migration_auth_type') | 464 | rel_settings = relation_get(rid=rid, unit=unit) |
2839 | 362 | if migration_auth == 'ssh': | 465 | if 'migration_auth_type' not in rel_settings: |
2840 | 363 | key = relation_get('ssh_public_key') | 466 | return |
2841 | 467 | if rel_settings['migration_auth_type'] == 'ssh': | ||
2842 | 468 | key = rel_settings.get('ssh_public_key') | ||
2843 | 364 | if not key: | 469 | if not key: |
2844 | 365 | log('SSH migration set but peer did not publish key.') | 470 | log('SSH migration set but peer did not publish key.') |
2845 | 366 | return | 471 | return |
2854 | 367 | ssh_compute_add(key) | 472 | ssh_compute_add(key, rid=rid, unit=unit) |
2855 | 368 | relation_set(known_hosts=ssh_known_hosts_b64(), | 473 | index = 0 |
2856 | 369 | authorized_keys=ssh_authorized_keys_b64()) | 474 | for line in ssh_known_hosts_lines(unit=unit): |
2857 | 370 | if relation_get('nova_ssh_public_key'): | 475 | relation_set( |
2858 | 371 | key = relation_get('nova_ssh_public_key') | 476 | relation_id=rid, |
2859 | 372 | ssh_compute_add(key, user='nova') | 477 | relation_settings={ |
2860 | 373 | relation_set(nova_known_hosts=ssh_known_hosts_b64(user='nova'), | 478 | 'known_hosts_{}'.format(index): line}) |
2861 | 374 | nova_authorized_keys=ssh_authorized_keys_b64(user='nova')) | 479 | index += 1 |
2862 | 480 | relation_set(relation_id=rid, known_hosts_max_index=index) | ||
2863 | 481 | index = 0 | ||
2864 | 482 | for line in ssh_authorized_keys_lines(unit=unit): | ||
2865 | 483 | relation_set( | ||
2866 | 484 | relation_id=rid, | ||
2867 | 485 | relation_settings={ | ||
2868 | 486 | 'authorized_keys_{}'.format(index): line}) | ||
2869 | 487 | index += 1 | ||
2870 | 488 | relation_set(relation_id=rid, authorized_keys_max_index=index) | ||
2871 | 489 | if 'nova_ssh_public_key' not in rel_settings: | ||
2872 | 490 | return | ||
2873 | 491 | if rel_settings['nova_ssh_public_key']: | ||
2874 | 492 | ssh_compute_add(rel_settings['nova_ssh_public_key'], | ||
2875 | 493 | rid=rid, unit=unit, user='nova') | ||
2876 | 494 | index = 0 | ||
2877 | 495 | for line in ssh_known_hosts_lines(unit=unit, user='nova'): | ||
2878 | 496 | relation_set( | ||
2879 | 497 | relation_id=rid, | ||
2880 | 498 | relation_settings={ | ||
2881 | 499 | '{}_known_hosts_{}'.format( | ||
2882 | 500 | 'nova', | ||
2883 | 501 | index): line}) | ||
2884 | 502 | index += 1 | ||
2885 | 503 | relation_set( | ||
2886 | 504 | relation_id=rid, | ||
2887 | 505 | relation_settings={ | ||
2888 | 506 | '{}_known_hosts_max_index'.format('nova'): index}) | ||
2889 | 507 | index = 0 | ||
2890 | 508 | for line in ssh_authorized_keys_lines(unit=unit, user='nova'): | ||
2891 | 509 | relation_set( | ||
2892 | 510 | relation_id=rid, | ||
2893 | 511 | relation_settings={ | ||
2894 | 512 | '{}_authorized_keys_{}'.format( | ||
2895 | 513 | 'nova', | ||
2896 | 514 | index): line}) | ||
2897 | 515 | index += 1 | ||
2898 | 516 | relation_set( | ||
2899 | 517 | relation_id=rid, | ||
2900 | 518 | relation_settings={ | ||
2901 | 519 | '{}_authorized_keys_max_index'.format('nova'): index}) | ||
2902 | 375 | 520 | ||
2903 | 376 | 521 | ||
2904 | 377 | @hooks.hook('cloud-compute-relation-departed') | 522 | @hooks.hook('cloud-compute-relation-departed') |
2905 | @@ -385,15 +530,7 @@ | |||
2906 | 385 | if not eligible_leader(CLUSTER_RES): | 530 | if not eligible_leader(CLUSTER_RES): |
2907 | 386 | return | 531 | return |
2908 | 387 | 532 | ||
2918 | 388 | url = canonical_url(CONFIGS) + ':9696' | 533 | rel_settings = neutron_settings() |
2910 | 389 | # XXX: Can we rename to neutron_*? | ||
2911 | 390 | rel_settings = { | ||
2912 | 391 | 'quantum_host': urlparse(url).hostname, | ||
2913 | 392 | 'quantum_url': url, | ||
2914 | 393 | 'quantum_port': 9696, | ||
2915 | 394 | 'quantum_plugin': neutron_plugin(), | ||
2916 | 395 | 'region': config('region') | ||
2917 | 396 | } | ||
2919 | 397 | 534 | ||
2920 | 398 | # inform quantum about local keystone auth config | 535 | # inform quantum about local keystone auth config |
2921 | 399 | ks_auth_config = _auth_config() | 536 | ks_auth_config = _auth_config() |
2922 | @@ -403,7 +540,6 @@ | |||
2923 | 403 | ks_ca = keystone_ca_cert_b64() | 540 | ks_ca = keystone_ca_cert_b64() |
2924 | 404 | if ks_auth_config and ks_ca: | 541 | if ks_auth_config and ks_ca: |
2925 | 405 | rel_settings['ca_cert'] = ks_ca | 542 | rel_settings['ca_cert'] = ks_ca |
2926 | 406 | |||
2927 | 407 | relation_set(relation_id=rid, **rel_settings) | 543 | relation_set(relation_id=rid, **rel_settings) |
2928 | 408 | 544 | ||
2929 | 409 | 545 | ||
2930 | @@ -414,21 +550,44 @@ | |||
2931 | 414 | @restart_on_change(restart_map(), stopstart=True) | 550 | @restart_on_change(restart_map(), stopstart=True) |
2932 | 415 | def cluster_changed(): | 551 | def cluster_changed(): |
2933 | 416 | CONFIGS.write_all() | 552 | CONFIGS.write_all() |
2934 | 553 | if is_relation_made('cluster'): | ||
2935 | 554 | peer_echo(includes='dbsync_state') | ||
2936 | 555 | dbsync_state = peer_retrieve('dbsync_state') | ||
2937 | 556 | if dbsync_state == 'complete': | ||
2938 | 557 | enable_services() | ||
2939 | 558 | cmd_all_services('start') | ||
2940 | 559 | else: | ||
2941 | 560 | log('Database sync not ready. Shutting down services') | ||
2942 | 561 | disable_services() | ||
2943 | 562 | cmd_all_services('stop') | ||
2944 | 417 | 563 | ||
2945 | 418 | 564 | ||
2946 | 419 | @hooks.hook('ha-relation-joined') | 565 | @hooks.hook('ha-relation-joined') |
2947 | 420 | def ha_joined(): | 566 | def ha_joined(): |
2948 | 421 | config = get_hacluster_config() | 567 | config = get_hacluster_config() |
2949 | 422 | resources = { | 568 | resources = { |
2950 | 423 | 'res_nova_vip': 'ocf:heartbeat:IPaddr2', | ||
2951 | 424 | 'res_nova_haproxy': 'lsb:haproxy', | 569 | 'res_nova_haproxy': 'lsb:haproxy', |
2952 | 425 | } | 570 | } |
2953 | 426 | vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ | ||
2954 | 427 | (config['vip'], config['vip_cidr'], config['vip_iface']) | ||
2955 | 428 | resource_params = { | 571 | resource_params = { |
2956 | 429 | 'res_nova_vip': vip_params, | ||
2957 | 430 | 'res_nova_haproxy': 'op monitor interval="5s"' | 572 | 'res_nova_haproxy': 'op monitor interval="5s"' |
2958 | 431 | } | 573 | } |
2959 | 574 | vip_group = [] | ||
2960 | 575 | for vip in config['vip'].split(): | ||
2961 | 576 | iface = get_iface_for_address(vip) | ||
2962 | 577 | if iface is not None: | ||
2963 | 578 | vip_key = 'res_nova_{}_vip'.format(iface) | ||
2964 | 579 | resources[vip_key] = 'ocf:heartbeat:IPaddr2' | ||
2965 | 580 | resource_params[vip_key] = ( | ||
2966 | 581 | 'params ip="{vip}" cidr_netmask="{netmask}"' | ||
2967 | 582 | ' nic="{iface}"'.format(vip=vip, | ||
2968 | 583 | iface=iface, | ||
2969 | 584 | netmask=get_netmask_for_address(vip)) | ||
2970 | 585 | ) | ||
2971 | 586 | vip_group.append(vip_key) | ||
2972 | 587 | |||
2973 | 588 | if len(vip_group) >= 1: | ||
2974 | 589 | relation_set(groups={'grp_nova_vips': ' '.join(vip_group)}) | ||
2975 | 590 | |||
2976 | 432 | init_services = { | 591 | init_services = { |
2977 | 433 | 'res_nova_haproxy': 'haproxy' | 592 | 'res_nova_haproxy': 'haproxy' |
2978 | 434 | } | 593 | } |
2979 | @@ -449,6 +608,7 @@ | |||
2980 | 449 | if not clustered or clustered in [None, 'None', '']: | 608 | if not clustered or clustered in [None, 'None', '']: |
2981 | 450 | log('ha_changed: hacluster subordinate not fully clustered.') | 609 | log('ha_changed: hacluster subordinate not fully clustered.') |
2982 | 451 | return | 610 | return |
2983 | 611 | <<<<<<< TREE | ||
2984 | 452 | 612 | ||
2985 | 453 | CONFIGS.write(NOVA_CONF) | 613 | CONFIGS.write(NOVA_CONF) |
2986 | 454 | if network_manager() == 'quantum': | 614 | if network_manager() == 'quantum': |
2987 | @@ -456,6 +616,16 @@ | |||
2988 | 456 | if network_manager() == 'neutron': | 616 | if network_manager() == 'neutron': |
2989 | 457 | CONFIGS.write(NEUTRON_CONF) | 617 | CONFIGS.write(NEUTRON_CONF) |
2990 | 458 | 618 | ||
2991 | 619 | ======= | ||
2992 | 620 | |||
2993 | 621 | CONFIGS.write(NOVA_CONF) | ||
2994 | 622 | if not is_relation_made('neutron-api'): | ||
2995 | 623 | if network_manager() == 'quantum': | ||
2996 | 624 | CONFIGS.write(QUANTUM_CONF) | ||
2997 | 625 | if network_manager() == 'neutron': | ||
2998 | 626 | CONFIGS.write(NEUTRON_CONF) | ||
2999 | 627 | |||
3000 | 628 | >>>>>>> MERGE-SOURCE | ||
3001 | 459 | if not is_leader(CLUSTER_RES): | 629 | if not is_leader(CLUSTER_RES): |
3002 | 460 | log('ha_changed: hacluster complete but we are not leader.') | 630 | log('ha_changed: hacluster complete but we are not leader.') |
3003 | 461 | return | 631 | return |
3004 | @@ -465,13 +635,23 @@ | |||
3005 | 465 | identity_joined(rid=rid) | 635 | identity_joined(rid=rid) |
3006 | 466 | 636 | ||
3007 | 467 | 637 | ||
3008 | 638 | @hooks.hook('shared-db-relation-broken', | ||
3009 | 639 | 'pgsql-nova-db-relation-broken') | ||
3010 | 640 | @service_guard(guard_map(), CONFIGS, | ||
3011 | 641 | active=config('service-guard')) | ||
3012 | 642 | def db_departed(): | ||
3013 | 643 | CONFIGS.write_all() | ||
3014 | 644 | for r_id in relation_ids('cluster'): | ||
3015 | 645 | relation_set(relation_id=r_id, dbsync_state='incomplete') | ||
3016 | 646 | disable_services() | ||
3017 | 647 | cmd_all_services('stop') | ||
3018 | 648 | |||
3019 | 649 | |||
3020 | 468 | @hooks.hook('amqp-relation-broken', | 650 | @hooks.hook('amqp-relation-broken', |
3021 | 469 | 'cinder-volume-service-relation-broken', | 651 | 'cinder-volume-service-relation-broken', |
3022 | 470 | 'identity-service-relation-broken', | 652 | 'identity-service-relation-broken', |
3023 | 471 | 'image-service-relation-broken', | 653 | 'image-service-relation-broken', |
3024 | 472 | 'nova-volume-service-relation-broken', | 654 | 'nova-volume-service-relation-broken', |
3025 | 473 | 'shared-db-relation-broken', | ||
3026 | 474 | 'pgsql-nova-db-relation-broken', | ||
3027 | 475 | 'pgsql-neutron-db-relation-broken', | 655 | 'pgsql-neutron-db-relation-broken', |
3028 | 476 | 'quantum-network-service-relation-broken') | 656 | 'quantum-network-service-relation-broken') |
3029 | 477 | @service_guard(guard_map(), CONFIGS, | 657 | @service_guard(guard_map(), CONFIGS, |
3030 | @@ -509,8 +689,8 @@ | |||
3031 | 509 | rel_settings.update({ | 689 | rel_settings.update({ |
3032 | 510 | 'quantum_plugin': neutron_plugin(), | 690 | 'quantum_plugin': neutron_plugin(), |
3033 | 511 | 'quantum_security_groups': config('quantum-security-groups'), | 691 | 'quantum_security_groups': config('quantum-security-groups'), |
3036 | 512 | 'quantum_url': (canonical_url(CONFIGS) + ':' + | 692 | 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), |
3037 | 513 | str(api_port('neutron-server')))}) | 693 | str(api_port('neutron-server')))}) |
3038 | 514 | 694 | ||
3039 | 515 | relation_set(relation_id=rid, **rel_settings) | 695 | relation_set(relation_id=rid, **rel_settings) |
3040 | 516 | 696 | ||
3041 | @@ -525,10 +705,55 @@ | |||
3042 | 525 | 705 | ||
3043 | 526 | @hooks.hook('upgrade-charm') | 706 | @hooks.hook('upgrade-charm') |
3044 | 527 | def upgrade_charm(): | 707 | def upgrade_charm(): |
3045 | 708 | apt_install(filter_installed_packages(determine_packages()), | ||
3046 | 709 | fatal=True) | ||
3047 | 528 | for r_id in relation_ids('amqp'): | 710 | for r_id in relation_ids('amqp'): |
3048 | 529 | amqp_joined(relation_id=r_id) | 711 | amqp_joined(relation_id=r_id) |
3049 | 530 | for r_id in relation_ids('identity-service'): | 712 | for r_id in relation_ids('identity-service'): |
3050 | 531 | identity_joined(rid=r_id) | 713 | identity_joined(rid=r_id) |
3051 | 714 | for r_id in relation_ids('cloud-compute'): | ||
3052 | 715 | for unit in related_units(r_id): | ||
3053 | 716 | compute_changed(r_id, unit) | ||
3054 | 717 | |||
3055 | 718 | |||
3056 | 719 | @hooks.hook('neutron-api-relation-joined') | ||
3057 | 720 | def neutron_api_relation_joined(rid=None): | ||
3058 | 721 | with open('/etc/init/neutron-server.override', 'wb') as out: | ||
3059 | 722 | out.write('manual\n') | ||
3060 | 723 | if os.path.isfile(NEUTRON_CONF): | ||
3061 | 724 | os.rename(NEUTRON_CONF, NEUTRON_CONF + '_unused') | ||
3062 | 725 | if service_running('neutron-server'): | ||
3063 | 726 | service_stop('neutron-server') | ||
3064 | 727 | for id_rid in relation_ids('identity-service'): | ||
3065 | 728 | identity_joined(rid=id_rid) | ||
3066 | 729 | nova_url = canonical_url(CONFIGS, INTERNAL) + ":8774/v2" | ||
3067 | 730 | relation_set(relation_id=rid, nova_url=nova_url) | ||
3068 | 731 | |||
3069 | 732 | |||
3070 | 733 | @hooks.hook('neutron-api-relation-changed') | ||
3071 | 734 | @service_guard(guard_map(), CONFIGS, | ||
3072 | 735 | active=config('service-guard')) | ||
3073 | 736 | @restart_on_change(restart_map()) | ||
3074 | 737 | def neutron_api_relation_changed(): | ||
3075 | 738 | CONFIGS.write(NOVA_CONF) | ||
3076 | 739 | for rid in relation_ids('cloud-compute'): | ||
3077 | 740 | compute_joined(rid=rid) | ||
3078 | 741 | for rid in relation_ids('quantum-network-service'): | ||
3079 | 742 | quantum_joined(rid=rid) | ||
3080 | 743 | |||
3081 | 744 | |||
3082 | 745 | @hooks.hook('neutron-api-relation-broken') | ||
3083 | 746 | @service_guard(guard_map(), CONFIGS, | ||
3084 | 747 | active=config('service-guard')) | ||
3085 | 748 | @restart_on_change(restart_map()) | ||
3086 | 749 | def neutron_api_relation_broken(): | ||
3087 | 750 | if os.path.isfile('/etc/init/neutron-server.override'): | ||
3088 | 751 | os.remove('/etc/init/neutron-server.override') | ||
3089 | 752 | CONFIGS.write_all() | ||
3090 | 753 | for rid in relation_ids('cloud-compute'): | ||
3091 | 754 | compute_joined(rid=rid) | ||
3092 | 755 | for rid in relation_ids('quantum-network-service'): | ||
3093 | 756 | quantum_joined(rid=rid) | ||
3094 | 532 | 757 | ||
3095 | 533 | 758 | ||
3096 | 534 | def main(): | 759 | def main(): |
3097 | 535 | 760 | ||
3098 | === modified file 'hooks/nova_cc_utils.py' | |||
3099 | --- hooks/nova_cc_utils.py 2014-07-29 15:05:01 +0000 | |||
3100 | +++ hooks/nova_cc_utils.py 2014-09-16 09:08:32 +0000 | |||
3101 | @@ -12,6 +12,8 @@ | |||
3102 | 12 | 12 | ||
3103 | 13 | from charmhelpers.contrib.hahelpers.cluster import eligible_leader | 13 | from charmhelpers.contrib.hahelpers.cluster import eligible_leader |
3104 | 14 | 14 | ||
3105 | 15 | from charmhelpers.contrib.peerstorage import peer_store | ||
3106 | 16 | |||
3107 | 15 | from charmhelpers.contrib.openstack.utils import ( | 17 | from charmhelpers.contrib.openstack.utils import ( |
3108 | 16 | configure_installation_source, | 18 | configure_installation_source, |
3109 | 17 | get_host_ip, | 19 | get_host_ip, |
3110 | @@ -39,17 +41,23 @@ | |||
3111 | 39 | ) | 41 | ) |
3112 | 40 | 42 | ||
3113 | 41 | from charmhelpers.core.host import ( | 43 | from charmhelpers.core.host import ( |
3117 | 42 | service_start, | 44 | <<<<<<< TREE |
3118 | 43 | service_stop, | 45 | service_start, |
3119 | 44 | service_running | 46 | service_stop, |
3120 | 47 | service_running | ||
3121 | 48 | ======= | ||
3122 | 49 | service, | ||
3123 | 50 | service_start, | ||
3124 | 51 | service_stop, | ||
3125 | 52 | service_running | ||
3126 | 53 | >>>>>>> MERGE-SOURCE | ||
3127 | 45 | ) | 54 | ) |
3128 | 46 | 55 | ||
3129 | 47 | |||
3130 | 48 | import nova_cc_context | 56 | import nova_cc_context |
3131 | 49 | 57 | ||
3132 | 50 | TEMPLATES = 'templates/' | 58 | TEMPLATES = 'templates/' |
3133 | 51 | 59 | ||
3135 | 52 | CLUSTER_RES = 'res_nova_vip' | 60 | CLUSTER_RES = 'grp_nova_vips' |
3136 | 53 | 61 | ||
3137 | 54 | # removed from original: charm-helper-sh | 62 | # removed from original: charm-helper-sh |
3138 | 55 | BASE_PACKAGES = [ | 63 | BASE_PACKAGES = [ |
3139 | @@ -58,6 +66,7 @@ | |||
3140 | 58 | 'python-keystoneclient', | 66 | 'python-keystoneclient', |
3141 | 59 | 'python-mysqldb', | 67 | 'python-mysqldb', |
3142 | 60 | 'python-psycopg2', | 68 | 'python-psycopg2', |
3143 | 69 | 'python-psutil', | ||
3144 | 61 | 'uuid', | 70 | 'uuid', |
3145 | 62 | ] | 71 | ] |
3146 | 63 | 72 | ||
3147 | @@ -110,7 +119,8 @@ | |||
3148 | 110 | nova_cc_context.HAProxyContext(), | 119 | nova_cc_context.HAProxyContext(), |
3149 | 111 | nova_cc_context.IdentityServiceContext(), | 120 | nova_cc_context.IdentityServiceContext(), |
3150 | 112 | nova_cc_context.VolumeServiceContext(), | 121 | nova_cc_context.VolumeServiceContext(), |
3152 | 113 | nova_cc_context.NeutronCCContext()], | 122 | nova_cc_context.NeutronCCContext(), |
3153 | 123 | nova_cc_context.NovaConfigContext()], | ||
3154 | 114 | }), | 124 | }), |
3155 | 115 | (NOVA_API_PASTE, { | 125 | (NOVA_API_PASTE, { |
3156 | 116 | 'services': [s for s in BASE_SERVICES if 'api' in s], | 126 | 'services': [s for s in BASE_SERVICES if 'api' in s], |
3157 | @@ -150,7 +160,8 @@ | |||
3158 | 150 | nova_cc_context.IdentityServiceContext(), | 160 | nova_cc_context.IdentityServiceContext(), |
3159 | 151 | nova_cc_context.NeutronCCContext(), | 161 | nova_cc_context.NeutronCCContext(), |
3160 | 152 | nova_cc_context.HAProxyContext(), | 162 | nova_cc_context.HAProxyContext(), |
3162 | 153 | context.SyslogContext()], | 163 | context.SyslogContext(), |
3163 | 164 | nova_cc_context.NovaConfigContext()], | ||
3164 | 154 | }), | 165 | }), |
3165 | 155 | (NEUTRON_DEFAULT, { | 166 | (NEUTRON_DEFAULT, { |
3166 | 156 | 'services': ['neutron-server'], | 167 | 'services': ['neutron-server'], |
3167 | @@ -175,6 +186,27 @@ | |||
3168 | 175 | 186 | ||
3169 | 176 | NOVA_SSH_DIR = '/etc/nova/compute_ssh/' | 187 | NOVA_SSH_DIR = '/etc/nova/compute_ssh/' |
3170 | 177 | 188 | ||
3171 | 189 | CONSOLE_CONFIG = { | ||
3172 | 190 | 'spice': { | ||
3173 | 191 | 'packages': ['nova-spiceproxy', 'nova-consoleauth'], | ||
3174 | 192 | 'services': ['nova-spiceproxy', 'nova-consoleauth'], | ||
3175 | 193 | 'proxy-page': '/spice_auto.html', | ||
3176 | 194 | 'proxy-port': 6082, | ||
3177 | 195 | }, | ||
3178 | 196 | 'novnc': { | ||
3179 | 197 | 'packages': ['nova-novncproxy', 'nova-consoleauth'], | ||
3180 | 198 | 'services': ['nova-novncproxy', 'nova-consoleauth'], | ||
3181 | 199 | 'proxy-page': '/vnc_auto.html', | ||
3182 | 200 | 'proxy-port': 6080, | ||
3183 | 201 | }, | ||
3184 | 202 | 'xvpvnc': { | ||
3185 | 203 | 'packages': ['nova-xvpvncproxy', 'nova-consoleauth'], | ||
3186 | 204 | 'services': ['nova-xvpvncproxy', 'nova-consoleauth'], | ||
3187 | 205 | 'proxy-page': '/console', | ||
3188 | 206 | 'proxy-port': 6081, | ||
3189 | 207 | }, | ||
3190 | 208 | } | ||
3191 | 209 | |||
3192 | 178 | 210 | ||
3193 | 179 | def resource_map(): | 211 | def resource_map(): |
3194 | 180 | ''' | 212 | ''' |
3195 | @@ -191,44 +223,56 @@ | |||
3196 | 191 | 223 | ||
3197 | 192 | net_manager = network_manager() | 224 | net_manager = network_manager() |
3198 | 193 | 225 | ||
3199 | 194 | # pop out irrelevant resources from the OrderedDict (easier than adding | ||
3200 | 195 | # them late) | ||
3201 | 196 | if net_manager != 'quantum': | ||
3202 | 197 | [resource_map.pop(k) for k in list(resource_map.iterkeys()) | ||
3203 | 198 | if 'quantum' in k] | ||
3204 | 199 | if net_manager != 'neutron': | ||
3205 | 200 | [resource_map.pop(k) for k in list(resource_map.iterkeys()) | ||
3206 | 201 | if 'neutron' in k] | ||
3207 | 202 | |||
3208 | 203 | if os.path.exists('/etc/apache2/conf-available'): | 226 | if os.path.exists('/etc/apache2/conf-available'): |
3209 | 204 | resource_map.pop(APACHE_CONF) | 227 | resource_map.pop(APACHE_CONF) |
3210 | 205 | else: | 228 | else: |
3211 | 206 | resource_map.pop(APACHE_24_CONF) | 229 | resource_map.pop(APACHE_24_CONF) |
3212 | 207 | 230 | ||
3228 | 208 | # add neutron plugin requirements. nova-c-c only needs the neutron-server | 231 | if is_relation_made('neutron-api'): |
3229 | 209 | # associated with configs, not the plugin agent. | 232 | [resource_map.pop(k) for k in list(resource_map.iterkeys()) |
3230 | 210 | if net_manager in ['quantum', 'neutron']: | 233 | if 'quantum' in k or 'neutron' in k] |
3231 | 211 | plugin = neutron_plugin() | 234 | resource_map[NOVA_CONF]['contexts'].append( |
3232 | 212 | if plugin: | 235 | nova_cc_context.NeutronAPIContext()) |
3233 | 213 | conf = neutron_plugin_attribute(plugin, 'config', net_manager) | 236 | else: |
3234 | 214 | ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager) | 237 | resource_map[NOVA_CONF]['contexts'].append( |
3235 | 215 | or []) | 238 | nova_cc_context.NeutronCCContext()) |
3236 | 216 | services = neutron_plugin_attribute(plugin, 'server_services', | 239 | # pop out irrelevant resources from the OrderedDict (easier than adding |
3237 | 217 | net_manager) | 240 | # them late) |
3238 | 218 | resource_map[conf] = {} | 241 | if net_manager != 'quantum': |
3239 | 219 | resource_map[conf]['services'] = services | 242 | [resource_map.pop(k) for k in list(resource_map.iterkeys()) |
3240 | 220 | resource_map[conf]['contexts'] = ctxts | 243 | if 'quantum' in k] |
3241 | 221 | resource_map[conf]['contexts'].append( | 244 | if net_manager != 'neutron': |
3242 | 222 | nova_cc_context.NeutronCCContext()) | 245 | [resource_map.pop(k) for k in list(resource_map.iterkeys()) |
3243 | 246 | if 'neutron' in k] | ||
3244 | 247 | # add neutron plugin requirements. nova-c-c only needs the | ||
3245 | 248 | # neutron-server associated with configs, not the plugin agent. | ||
3246 | 249 | if net_manager in ['quantum', 'neutron']: | ||
3247 | 250 | plugin = neutron_plugin() | ||
3248 | 251 | if plugin: | ||
3249 | 252 | conf = neutron_plugin_attribute(plugin, 'config', net_manager) | ||
3250 | 253 | ctxts = (neutron_plugin_attribute(plugin, 'contexts', | ||
3251 | 254 | net_manager) | ||
3252 | 255 | or []) | ||
3253 | 256 | services = neutron_plugin_attribute(plugin, 'server_services', | ||
3254 | 257 | net_manager) | ||
3255 | 258 | resource_map[conf] = {} | ||
3256 | 259 | resource_map[conf]['services'] = services | ||
3257 | 260 | resource_map[conf]['contexts'] = ctxts | ||
3258 | 261 | resource_map[conf]['contexts'].append( | ||
3259 | 262 | nova_cc_context.NeutronCCContext()) | ||
3260 | 223 | 263 | ||
3264 | 224 | # update for postgres | 264 | # update for postgres |
3265 | 225 | resource_map[conf]['contexts'].append( | 265 | resource_map[conf]['contexts'].append( |
3266 | 226 | nova_cc_context.NeutronPostgresqlDBContext()) | 266 | nova_cc_context.NeutronPostgresqlDBContext()) |
3267 | 227 | 267 | ||
3268 | 228 | # nova-conductor for releases >= G. | 268 | # nova-conductor for releases >= G. |
3269 | 229 | if os_release('nova-common') not in ['essex', 'folsom']: | 269 | if os_release('nova-common') not in ['essex', 'folsom']: |
3270 | 230 | resource_map['/etc/nova/nova.conf']['services'] += ['nova-conductor'] | 270 | resource_map['/etc/nova/nova.conf']['services'] += ['nova-conductor'] |
3271 | 231 | 271 | ||
3272 | 272 | if console_attributes('services'): | ||
3273 | 273 | resource_map['/etc/nova/nova.conf']['services'] += \ | ||
3274 | 274 | console_attributes('services') | ||
3275 | 275 | |||
3276 | 232 | # also manage any configs that are being updated by subordinates. | 276 | # also manage any configs that are being updated by subordinates. |
3277 | 233 | vmware_ctxt = context.SubordinateConfigContext(interface='nova-vmware', | 277 | vmware_ctxt = context.SubordinateConfigContext(interface='nova-vmware', |
3278 | 234 | service='nova', | 278 | service='nova', |
3279 | @@ -238,6 +282,7 @@ | |||
3280 | 238 | for s in vmware_ctxt['services']: | 282 | for s in vmware_ctxt['services']: |
3281 | 239 | if s not in resource_map[NOVA_CONF]['services']: | 283 | if s not in resource_map[NOVA_CONF]['services']: |
3282 | 240 | resource_map[NOVA_CONF]['services'].append(s) | 284 | resource_map[NOVA_CONF]['services'].append(s) |
3283 | 285 | |||
3284 | 241 | return resource_map | 286 | return resource_map |
3285 | 242 | 287 | ||
3286 | 243 | 288 | ||
3287 | @@ -268,9 +313,9 @@ | |||
3288 | 268 | '''Assemble a list of API ports for services we are managing''' | 313 | '''Assemble a list of API ports for services we are managing''' |
3289 | 269 | ports = [] | 314 | ports = [] |
3290 | 270 | for services in restart_map().values(): | 315 | for services in restart_map().values(): |
3292 | 271 | for service in services: | 316 | for svc in services: |
3293 | 272 | try: | 317 | try: |
3295 | 273 | ports.append(API_PORTS[service]) | 318 | ports.append(API_PORTS[svc]) |
3296 | 274 | except KeyError: | 319 | except KeyError: |
3297 | 275 | pass | 320 | pass |
3298 | 276 | return list(set(ports)) | 321 | return list(set(ports)) |
3299 | @@ -280,6 +325,27 @@ | |||
3300 | 280 | return API_PORTS[service] | 325 | return API_PORTS[service] |
3301 | 281 | 326 | ||
3302 | 282 | 327 | ||
3303 | 328 | def console_attributes(attr, proto=None): | ||
3304 | 329 | '''Leave proto unset to query attributes of the protocal specified at | ||
3305 | 330 | runtime''' | ||
3306 | 331 | if proto: | ||
3307 | 332 | console_proto = proto | ||
3308 | 333 | else: | ||
3309 | 334 | console_proto = config('console-access-protocol') | ||
3310 | 335 | if attr == 'protocol': | ||
3311 | 336 | return console_proto | ||
3312 | 337 | # 'vnc' is a virtual type made up of novnc and xvpvnc | ||
3313 | 338 | if console_proto == 'vnc': | ||
3314 | 339 | if attr in ['packages', 'services']: | ||
3315 | 340 | return list(set(CONSOLE_CONFIG['novnc'][attr] + | ||
3316 | 341 | CONSOLE_CONFIG['xvpvnc'][attr])) | ||
3317 | 342 | else: | ||
3318 | 343 | return None | ||
3319 | 344 | if console_proto in CONSOLE_CONFIG: | ||
3320 | 345 | return CONSOLE_CONFIG[console_proto][attr] | ||
3321 | 346 | return None | ||
3322 | 347 | |||
3323 | 348 | |||
3324 | 283 | def determine_packages(): | 349 | def determine_packages(): |
3325 | 284 | # currently all packages match service names | 350 | # currently all packages match service names |
3326 | 285 | packages = [] + BASE_PACKAGES | 351 | packages = [] + BASE_PACKAGES |
3327 | @@ -289,6 +355,8 @@ | |||
3328 | 289 | pkgs = neutron_plugin_attribute(neutron_plugin(), 'server_packages', | 355 | pkgs = neutron_plugin_attribute(neutron_plugin(), 'server_packages', |
3329 | 290 | network_manager()) | 356 | network_manager()) |
3330 | 291 | packages.extend(pkgs) | 357 | packages.extend(pkgs) |
3331 | 358 | if console_attributes('packages'): | ||
3332 | 359 | packages.extend(console_attributes('packages')) | ||
3333 | 292 | return list(set(packages)) | 360 | return list(set(packages)) |
3334 | 293 | 361 | ||
3335 | 294 | 362 | ||
3336 | @@ -486,6 +554,12 @@ | |||
3337 | 486 | log('Migrating the nova database.', level=INFO) | 554 | log('Migrating the nova database.', level=INFO) |
3338 | 487 | cmd = ['nova-manage', 'db', 'sync'] | 555 | cmd = ['nova-manage', 'db', 'sync'] |
3339 | 488 | subprocess.check_output(cmd) | 556 | subprocess.check_output(cmd) |
3340 | 557 | if is_relation_made('cluster'): | ||
3341 | 558 | log('Informing peers that dbsync is complete', level=INFO) | ||
3342 | 559 | peer_store('dbsync_state', 'complete') | ||
3343 | 560 | log('Enabling services', level=INFO) | ||
3344 | 561 | enable_services() | ||
3345 | 562 | cmd_all_services('start') | ||
3346 | 489 | 563 | ||
3347 | 490 | 564 | ||
3348 | 491 | def auth_token_config(setting): | 565 | def auth_token_config(setting): |
3349 | @@ -512,8 +586,11 @@ | |||
3350 | 512 | return b64encode(_in.read()) | 586 | return b64encode(_in.read()) |
3351 | 513 | 587 | ||
3352 | 514 | 588 | ||
3355 | 515 | def ssh_directory_for_unit(user=None): | 589 | def ssh_directory_for_unit(unit=None, user=None): |
3356 | 516 | remote_service = remote_unit().split('/')[0] | 590 | if unit: |
3357 | 591 | remote_service = unit.split('/')[0] | ||
3358 | 592 | else: | ||
3359 | 593 | remote_service = remote_unit().split('/')[0] | ||
3360 | 517 | if user: | 594 | if user: |
3361 | 518 | remote_service = "{}_{}".format(remote_service, user) | 595 | remote_service = "{}_{}".format(remote_service, user) |
3362 | 519 | _dir = os.path.join(NOVA_SSH_DIR, remote_service) | 596 | _dir = os.path.join(NOVA_SSH_DIR, remote_service) |
3363 | @@ -527,29 +604,29 @@ | |||
3364 | 527 | return _dir | 604 | return _dir |
3365 | 528 | 605 | ||
3366 | 529 | 606 | ||
3377 | 530 | def known_hosts(user=None): | 607 | def known_hosts(unit=None, user=None): |
3378 | 531 | return os.path.join(ssh_directory_for_unit(user), 'known_hosts') | 608 | return os.path.join(ssh_directory_for_unit(unit, user), 'known_hosts') |
3379 | 532 | 609 | ||
3380 | 533 | 610 | ||
3381 | 534 | def authorized_keys(user=None): | 611 | def authorized_keys(unit=None, user=None): |
3382 | 535 | return os.path.join(ssh_directory_for_unit(user), 'authorized_keys') | 612 | return os.path.join(ssh_directory_for_unit(unit, user), 'authorized_keys') |
3383 | 536 | 613 | ||
3384 | 537 | 614 | ||
3385 | 538 | def ssh_known_host_key(host, user=None): | 615 | def ssh_known_host_key(host, unit=None, user=None): |
3386 | 539 | cmd = ['ssh-keygen', '-f', known_hosts(user), '-H', '-F', host] | 616 | cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-H', '-F', host] |
3387 | 540 | try: | 617 | try: |
3388 | 541 | return subprocess.check_output(cmd).strip() | 618 | return subprocess.check_output(cmd).strip() |
3389 | 542 | except subprocess.CalledProcessError: | 619 | except subprocess.CalledProcessError: |
3390 | 543 | return None | 620 | return None |
3391 | 544 | 621 | ||
3392 | 545 | 622 | ||
3394 | 546 | def remove_known_host(host, user=None): | 623 | def remove_known_host(host, unit=None, user=None): |
3395 | 547 | log('Removing SSH known host entry for compute host at %s' % host) | 624 | log('Removing SSH known host entry for compute host at %s' % host) |
3397 | 548 | cmd = ['ssh-keygen', '-f', known_hosts(user), '-R', host] | 625 | cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-R', host] |
3398 | 549 | subprocess.check_call(cmd) | 626 | subprocess.check_call(cmd) |
3399 | 550 | 627 | ||
3400 | 551 | 628 | ||
3402 | 552 | def add_known_host(host, user=None): | 629 | def add_known_host(host, unit=None, user=None): |
3403 | 553 | '''Add variations of host to a known hosts file.''' | 630 | '''Add variations of host to a known hosts file.''' |
3404 | 554 | cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] | 631 | cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] |
3405 | 555 | try: | 632 | try: |
3406 | @@ -558,34 +635,37 @@ | |||
3407 | 558 | log('Could not obtain SSH host key from %s' % host, level=ERROR) | 635 | log('Could not obtain SSH host key from %s' % host, level=ERROR) |
3408 | 559 | raise e | 636 | raise e |
3409 | 560 | 637 | ||
3411 | 561 | current_key = ssh_known_host_key(host, user) | 638 | current_key = ssh_known_host_key(host, unit, user) |
3412 | 562 | if current_key: | 639 | if current_key: |
3413 | 563 | if remote_key == current_key: | 640 | if remote_key == current_key: |
3414 | 564 | log('Known host key for compute host %s up to date.' % host) | 641 | log('Known host key for compute host %s up to date.' % host) |
3415 | 565 | return | 642 | return |
3416 | 566 | else: | 643 | else: |
3418 | 567 | remove_known_host(host, user) | 644 | remove_known_host(host, unit, user) |
3419 | 568 | 645 | ||
3420 | 569 | log('Adding SSH host key to known hosts for compute node at %s.' % host) | 646 | log('Adding SSH host key to known hosts for compute node at %s.' % host) |
3422 | 570 | with open(known_hosts(user), 'a') as out: | 647 | with open(known_hosts(unit, user), 'a') as out: |
3423 | 571 | out.write(remote_key + '\n') | 648 | out.write(remote_key + '\n') |
3424 | 572 | 649 | ||
3425 | 573 | 650 | ||
3428 | 574 | def ssh_authorized_key_exists(public_key, user=None): | 651 | def ssh_authorized_key_exists(public_key, unit=None, user=None): |
3429 | 575 | with open(authorized_keys(user)) as keys: | 652 | with open(authorized_keys(unit, user)) as keys: |
3430 | 576 | return (' %s ' % public_key) in keys.read() | 653 | return (' %s ' % public_key) in keys.read() |
3431 | 577 | 654 | ||
3432 | 578 | 655 | ||
3435 | 579 | def add_authorized_key(public_key, user=None): | 656 | def add_authorized_key(public_key, unit=None, user=None): |
3436 | 580 | with open(authorized_keys(user), 'a') as keys: | 657 | with open(authorized_keys(unit, user), 'a') as keys: |
3437 | 581 | keys.write(public_key + '\n') | 658 | keys.write(public_key + '\n') |
3438 | 582 | 659 | ||
3439 | 583 | 660 | ||
3441 | 584 | def ssh_compute_add(public_key, user=None): | 661 | def ssh_compute_add(public_key, rid=None, unit=None, user=None): |
3442 | 585 | # If remote compute node hands us a hostname, ensure we have a | 662 | # If remote compute node hands us a hostname, ensure we have a |
3443 | 586 | # known hosts entry for its IP, hostname and FQDN. | 663 | # known hosts entry for its IP, hostname and FQDN. |
3445 | 587 | private_address = relation_get('private-address') | 664 | private_address = relation_get(rid=rid, unit=unit, |
3446 | 665 | attribute='private-address') | ||
3447 | 588 | hosts = [private_address] | 666 | hosts = [private_address] |
3448 | 667 | if relation_get('hostname'): | ||
3449 | 668 | hosts.append(relation_get('hostname')) | ||
3450 | 589 | 669 | ||
3451 | 590 | if not is_ip(private_address): | 670 | if not is_ip(private_address): |
3452 | 591 | hosts.append(get_host_ip(private_address)) | 671 | hosts.append(get_host_ip(private_address)) |
3453 | @@ -596,31 +676,41 @@ | |||
3454 | 596 | hosts.append(hn.split('.')[0]) | 676 | hosts.append(hn.split('.')[0]) |
3455 | 597 | 677 | ||
3456 | 598 | for host in list(set(hosts)): | 678 | for host in list(set(hosts)): |
3459 | 599 | if not ssh_known_host_key(host, user): | 679 | if not ssh_known_host_key(host, unit, user): |
3460 | 600 | add_known_host(host, user) | 680 | add_known_host(host, unit, user) |
3461 | 601 | 681 | ||
3463 | 602 | if not ssh_authorized_key_exists(public_key, user): | 682 | if not ssh_authorized_key_exists(public_key, unit, user): |
3464 | 603 | log('Saving SSH authorized key for compute host at %s.' % | 683 | log('Saving SSH authorized key for compute host at %s.' % |
3465 | 604 | private_address) | 684 | private_address) |
3482 | 605 | add_authorized_key(public_key, user) | 685 | add_authorized_key(public_key, unit, user) |
3483 | 606 | 686 | ||
3484 | 607 | 687 | ||
3485 | 608 | def ssh_known_hosts_b64(user=None): | 688 | def ssh_known_hosts_lines(unit=None, user=None): |
3486 | 609 | with open(known_hosts(user)) as hosts: | 689 | known_hosts_list = [] |
3487 | 610 | return b64encode(hosts.read()) | 690 | |
3488 | 611 | 691 | with open(known_hosts(unit, user)) as hosts: | |
3489 | 612 | 692 | for hosts_line in hosts: | |
3490 | 613 | def ssh_authorized_keys_b64(user=None): | 693 | if hosts_line.rstrip(): |
3491 | 614 | with open(authorized_keys(user)) as keys: | 694 | known_hosts_list.append(hosts_line.rstrip()) |
3492 | 615 | return b64encode(keys.read()) | 695 | return(known_hosts_list) |
3493 | 616 | 696 | ||
3494 | 617 | 697 | ||
3495 | 618 | def ssh_compute_remove(public_key, user=None): | 698 | def ssh_authorized_keys_lines(unit=None, user=None): |
3496 | 619 | if not (os.path.isfile(authorized_keys(user)) or | 699 | authorized_keys_list = [] |
3497 | 620 | os.path.isfile(known_hosts(user))): | 700 | |
3498 | 701 | with open(authorized_keys(unit, user)) as keys: | ||
3499 | 702 | for authkey_line in keys: | ||
3500 | 703 | if authkey_line.rstrip(): | ||
3501 | 704 | authorized_keys_list.append(authkey_line.rstrip()) | ||
3502 | 705 | return(authorized_keys_list) | ||
3503 | 706 | |||
3504 | 707 | |||
3505 | 708 | def ssh_compute_remove(public_key, unit=None, user=None): | ||
3506 | 709 | if not (os.path.isfile(authorized_keys(unit, user)) or | ||
3507 | 710 | os.path.isfile(known_hosts(unit, user))): | ||
3508 | 621 | return | 711 | return |
3509 | 622 | 712 | ||
3511 | 623 | with open(authorized_keys(user)) as _keys: | 713 | with open(authorized_keys(unit, user)) as _keys: |
3512 | 624 | keys = [k.strip() for k in _keys.readlines()] | 714 | keys = [k.strip() for k in _keys.readlines()] |
3513 | 625 | 715 | ||
3514 | 626 | if public_key not in keys: | 716 | if public_key not in keys: |
3515 | @@ -628,67 +718,101 @@ | |||
3516 | 628 | 718 | ||
3517 | 629 | [keys.remove(key) for key in keys if key == public_key] | 719 | [keys.remove(key) for key in keys if key == public_key] |
3518 | 630 | 720 | ||
3520 | 631 | with open(authorized_keys(user), 'w') as _keys: | 721 | with open(authorized_keys(unit, user), 'w') as _keys: |
3521 | 632 | keys = '\n'.join(keys) | 722 | keys = '\n'.join(keys) |
3522 | 633 | if not keys.endswith('\n'): | 723 | if not keys.endswith('\n'): |
3523 | 634 | keys += '\n' | 724 | keys += '\n' |
3524 | 635 | _keys.write(keys) | 725 | _keys.write(keys) |
3525 | 636 | 726 | ||
3526 | 637 | 727 | ||
3528 | 638 | def determine_endpoints(url): | 728 | def determine_endpoints(public_url, internal_url, admin_url): |
3529 | 639 | '''Generates a dictionary containing all relevant endpoints to be | 729 | '''Generates a dictionary containing all relevant endpoints to be |
3530 | 640 | passed to keystone as relation settings.''' | 730 | passed to keystone as relation settings.''' |
3531 | 641 | region = config('region') | 731 | region = config('region') |
3532 | 642 | os_rel = os_release('nova-common') | 732 | os_rel = os_release('nova-common') |
3533 | 643 | 733 | ||
3534 | 644 | if os_rel >= 'grizzly': | 734 | if os_rel >= 'grizzly': |
3537 | 645 | nova_url = ('%s:%s/v2/$(tenant_id)s' % | 735 | nova_public_url = ('%s:%s/v2/$(tenant_id)s' % |
3538 | 646 | (url, api_port('nova-api-os-compute'))) | 736 | (public_url, api_port('nova-api-os-compute'))) |
3539 | 737 | nova_internal_url = ('%s:%s/v2/$(tenant_id)s' % | ||
3540 | 738 | (internal_url, api_port('nova-api-os-compute'))) | ||
3541 | 739 | nova_admin_url = ('%s:%s/v2/$(tenant_id)s' % | ||
3542 | 740 | (admin_url, api_port('nova-api-os-compute'))) | ||
3543 | 647 | else: | 741 | else: |
3551 | 648 | nova_url = ('%s:%s/v1.1/$(tenant_id)s' % | 742 | nova_public_url = ('%s:%s/v1.1/$(tenant_id)s' % |
3552 | 649 | (url, api_port('nova-api-os-compute'))) | 743 | (public_url, api_port('nova-api-os-compute'))) |
3553 | 650 | ec2_url = '%s:%s/services/Cloud' % (url, api_port('nova-api-ec2')) | 744 | nova_internal_url = ('%s:%s/v1.1/$(tenant_id)s' % |
3554 | 651 | nova_volume_url = ('%s:%s/v1/$(tenant_id)s' % | 745 | (internal_url, api_port('nova-api-os-compute'))) |
3555 | 652 | (url, api_port('nova-api-os-compute'))) | 746 | nova_admin_url = ('%s:%s/v1.1/$(tenant_id)s' % |
3556 | 653 | neutron_url = '%s:%s' % (url, api_port('neutron-server')) | 747 | (admin_url, api_port('nova-api-os-compute'))) |
3557 | 654 | s3_url = '%s:%s' % (url, api_port('nova-objectstore')) | 748 | |
3558 | 749 | ec2_public_url = '%s:%s/services/Cloud' % ( | ||
3559 | 750 | public_url, api_port('nova-api-ec2')) | ||
3560 | 751 | ec2_internal_url = '%s:%s/services/Cloud' % ( | ||
3561 | 752 | internal_url, api_port('nova-api-ec2')) | ||
3562 | 753 | ec2_admin_url = '%s:%s/services/Cloud' % (admin_url, | ||
3563 | 754 | api_port('nova-api-ec2')) | ||
3564 | 755 | |||
3565 | 756 | nova_volume_public_url = ('%s:%s/v1/$(tenant_id)s' % | ||
3566 | 757 | (public_url, api_port('nova-api-os-compute'))) | ||
3567 | 758 | nova_volume_internal_url = ('%s:%s/v1/$(tenant_id)s' % | ||
3568 | 759 | (internal_url, | ||
3569 | 760 | api_port('nova-api-os-compute'))) | ||
3570 | 761 | nova_volume_admin_url = ('%s:%s/v1/$(tenant_id)s' % | ||
3571 | 762 | (admin_url, api_port('nova-api-os-compute'))) | ||
3572 | 763 | |||
3573 | 764 | neutron_public_url = '%s:%s' % (public_url, api_port('neutron-server')) | ||
3574 | 765 | neutron_internal_url = '%s:%s' % (internal_url, api_port('neutron-server')) | ||
3575 | 766 | neutron_admin_url = '%s:%s' % (admin_url, api_port('neutron-server')) | ||
3576 | 767 | |||
3577 | 768 | s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore')) | ||
3578 | 769 | s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore')) | ||
3579 | 770 | s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore')) | ||
3580 | 655 | 771 | ||
3581 | 656 | # the base endpoints | 772 | # the base endpoints |
3582 | 657 | endpoints = { | 773 | endpoints = { |
3583 | 658 | 'nova_service': 'nova', | 774 | 'nova_service': 'nova', |
3584 | 659 | 'nova_region': region, | 775 | 'nova_region': region, |
3588 | 660 | 'nova_public_url': nova_url, | 776 | 'nova_public_url': nova_public_url, |
3589 | 661 | 'nova_admin_url': nova_url, | 777 | 'nova_admin_url': nova_admin_url, |
3590 | 662 | 'nova_internal_url': nova_url, | 778 | 'nova_internal_url': nova_internal_url, |
3591 | 663 | 'ec2_service': 'ec2', | 779 | 'ec2_service': 'ec2', |
3592 | 664 | 'ec2_region': region, | 780 | 'ec2_region': region, |
3596 | 665 | 'ec2_public_url': ec2_url, | 781 | 'ec2_public_url': ec2_public_url, |
3597 | 666 | 'ec2_admin_url': ec2_url, | 782 | 'ec2_admin_url': ec2_admin_url, |
3598 | 667 | 'ec2_internal_url': ec2_url, | 783 | 'ec2_internal_url': ec2_internal_url, |
3599 | 668 | 's3_service': 's3', | 784 | 's3_service': 's3', |
3600 | 669 | 's3_region': region, | 785 | 's3_region': region, |
3604 | 670 | 's3_public_url': s3_url, | 786 | 's3_public_url': s3_public_url, |
3605 | 671 | 's3_admin_url': s3_url, | 787 | 's3_admin_url': s3_admin_url, |
3606 | 672 | 's3_internal_url': s3_url, | 788 | 's3_internal_url': s3_internal_url, |
3607 | 673 | } | 789 | } |
3608 | 674 | 790 | ||
3609 | 675 | if relation_ids('nova-volume-service'): | 791 | if relation_ids('nova-volume-service'): |
3610 | 676 | endpoints.update({ | 792 | endpoints.update({ |
3611 | 677 | 'nova-volume_service': 'nova-volume', | 793 | 'nova-volume_service': 'nova-volume', |
3612 | 678 | 'nova-volume_region': region, | 794 | 'nova-volume_region': region, |
3616 | 679 | 'nova-volume_public_url': nova_volume_url, | 795 | 'nova-volume_public_url': nova_volume_public_url, |
3617 | 680 | 'nova-volume_admin_url': nova_volume_url, | 796 | 'nova-volume_admin_url': nova_volume_admin_url, |
3618 | 681 | 'nova-volume_internal_url': nova_volume_url, | 797 | 'nova-volume_internal_url': nova_volume_internal_url, |
3619 | 682 | }) | 798 | }) |
3620 | 683 | 799 | ||
3621 | 684 | # XXX: Keep these relations named quantum_*?? | 800 | # XXX: Keep these relations named quantum_*?? |
3623 | 685 | if network_manager() in ['quantum', 'neutron']: | 801 | if is_relation_made('neutron-api'): |
3624 | 802 | endpoints.update({ | ||
3625 | 803 | 'quantum_service': None, | ||
3626 | 804 | 'quantum_region': None, | ||
3627 | 805 | 'quantum_public_url': None, | ||
3628 | 806 | 'quantum_admin_url': None, | ||
3629 | 807 | 'quantum_internal_url': None, | ||
3630 | 808 | }) | ||
3631 | 809 | elif network_manager() in ['quantum', 'neutron']: | ||
3632 | 686 | endpoints.update({ | 810 | endpoints.update({ |
3633 | 687 | 'quantum_service': 'quantum', | 811 | 'quantum_service': 'quantum', |
3634 | 688 | 'quantum_region': region, | 812 | 'quantum_region': region, |
3638 | 689 | 'quantum_public_url': neutron_url, | 813 | 'quantum_public_url': neutron_public_url, |
3639 | 690 | 'quantum_admin_url': neutron_url, | 814 | 'quantum_admin_url': neutron_admin_url, |
3640 | 691 | 'quantum_internal_url': neutron_url, | 815 | 'quantum_internal_url': neutron_internal_url, |
3641 | 692 | }) | 816 | }) |
3642 | 693 | 817 | ||
3643 | 694 | return endpoints | 818 | return endpoints |
3644 | @@ -698,59 +822,141 @@ | |||
3645 | 698 | # quantum-plugin config setting can be safely overriden | 822 | # quantum-plugin config setting can be safely overriden |
3646 | 699 | # as we only supported OVS in G/neutron | 823 | # as we only supported OVS in G/neutron |
3647 | 700 | return config('neutron-plugin') or config('quantum-plugin') | 824 | return config('neutron-plugin') or config('quantum-plugin') |
3704 | 701 | 825 | <<<<<<< TREE | |
3705 | 702 | 826 | ||
3706 | 703 | def guard_map(): | 827 | |
3707 | 704 | '''Map of services and required interfaces that must be present before | 828 | def guard_map(): |
3708 | 705 | the service should be allowed to start''' | 829 | '''Map of services and required interfaces that must be present before |
3709 | 706 | gmap = {} | 830 | the service should be allowed to start''' |
3710 | 707 | nova_services = deepcopy(BASE_SERVICES) | 831 | gmap = {} |
3711 | 708 | if os_release('nova-common') not in ['essex', 'folsom']: | 832 | nova_services = deepcopy(BASE_SERVICES) |
3712 | 709 | nova_services.append('nova-conductor') | 833 | if os_release('nova-common') not in ['essex', 'folsom']: |
3713 | 710 | 834 | nova_services.append('nova-conductor') | |
3714 | 711 | nova_interfaces = ['identity-service', 'amqp'] | 835 | |
3715 | 712 | if relation_ids('pgsql-nova-db'): | 836 | nova_interfaces = ['identity-service', 'amqp'] |
3716 | 713 | nova_interfaces.append('pgsql-nova-db') | 837 | if relation_ids('pgsql-nova-db'): |
3717 | 714 | else: | 838 | nova_interfaces.append('pgsql-nova-db') |
3718 | 715 | nova_interfaces.append('shared-db') | 839 | else: |
3719 | 716 | 840 | nova_interfaces.append('shared-db') | |
3720 | 717 | for svc in nova_services: | 841 | |
3721 | 718 | gmap[svc] = nova_interfaces | 842 | for svc in nova_services: |
3722 | 719 | 843 | gmap[svc] = nova_interfaces | |
3723 | 720 | net_manager = network_manager() | 844 | |
3724 | 721 | if net_manager in ['neutron', 'quantum'] and \ | 845 | net_manager = network_manager() |
3725 | 722 | not is_relation_made('neutron-api'): | 846 | if net_manager in ['neutron', 'quantum'] and \ |
3726 | 723 | neutron_interfaces = ['identity-service', 'amqp'] | 847 | not is_relation_made('neutron-api'): |
3727 | 724 | if relation_ids('pgsql-neutron-db'): | 848 | neutron_interfaces = ['identity-service', 'amqp'] |
3728 | 725 | neutron_interfaces.append('pgsql-neutron-db') | 849 | if relation_ids('pgsql-neutron-db'): |
3729 | 726 | else: | 850 | neutron_interfaces.append('pgsql-neutron-db') |
3730 | 727 | neutron_interfaces.append('shared-db') | 851 | else: |
3731 | 728 | if network_manager() == 'quantum': | 852 | neutron_interfaces.append('shared-db') |
3732 | 729 | gmap['quantum-server'] = neutron_interfaces | 853 | if network_manager() == 'quantum': |
3733 | 730 | else: | 854 | gmap['quantum-server'] = neutron_interfaces |
3734 | 731 | gmap['neutron-server'] = neutron_interfaces | 855 | else: |
3735 | 732 | 856 | gmap['neutron-server'] = neutron_interfaces | |
3736 | 733 | return gmap | 857 | |
3737 | 734 | 858 | return gmap | |
3738 | 735 | 859 | ||
3739 | 736 | def service_guard(guard_map, contexts, active=False): | 860 | |
3740 | 737 | '''Inhibit services in guard_map from running unless | 861 | def service_guard(guard_map, contexts, active=False): |
3741 | 738 | required interfaces are found complete in contexts.''' | 862 | '''Inhibit services in guard_map from running unless |
3742 | 739 | def wrap(f): | 863 | required interfaces are found complete in contexts.''' |
3743 | 740 | def wrapped_f(*args): | 864 | def wrap(f): |
3744 | 741 | if active is True: | 865 | def wrapped_f(*args): |
3745 | 742 | incomplete_services = [] | 866 | if active is True: |
3746 | 743 | for svc in guard_map: | 867 | incomplete_services = [] |
3747 | 744 | for interface in guard_map[svc]: | 868 | for svc in guard_map: |
3748 | 745 | if interface not in contexts.complete_contexts(): | 869 | for interface in guard_map[svc]: |
3749 | 746 | incomplete_services.append(svc) | 870 | if interface not in contexts.complete_contexts(): |
3750 | 747 | f(*args) | 871 | incomplete_services.append(svc) |
3751 | 748 | for svc in incomplete_services: | 872 | f(*args) |
3752 | 749 | if service_running(svc): | 873 | for svc in incomplete_services: |
3753 | 750 | log('Service {} has unfulfilled ' | 874 | if service_running(svc): |
3754 | 751 | 'interface requirements, stopping.'.format(svc)) | 875 | log('Service {} has unfulfilled ' |
3755 | 752 | service_stop(svc) | 876 | 'interface requirements, stopping.'.format(svc)) |
3756 | 753 | else: | 877 | service_stop(svc) |
3757 | 754 | f(*args) | 878 | else: |
3758 | 755 | return wrapped_f | 879 | f(*args) |
3759 | 756 | return wrap | 880 | return wrapped_f |
3760 | 881 | return wrap | ||
3761 | 882 | ======= | ||
3762 | 883 | |||
3763 | 884 | |||
3764 | 885 | def guard_map(): | ||
3765 | 886 | '''Map of services and required interfaces that must be present before | ||
3766 | 887 | the service should be allowed to start''' | ||
3767 | 888 | gmap = {} | ||
3768 | 889 | nova_services = deepcopy(BASE_SERVICES) | ||
3769 | 890 | if os_release('nova-common') not in ['essex', 'folsom']: | ||
3770 | 891 | nova_services.append('nova-conductor') | ||
3771 | 892 | |||
3772 | 893 | nova_interfaces = ['identity-service', 'amqp'] | ||
3773 | 894 | if relation_ids('pgsql-nova-db'): | ||
3774 | 895 | nova_interfaces.append('pgsql-nova-db') | ||
3775 | 896 | else: | ||
3776 | 897 | nova_interfaces.append('shared-db') | ||
3777 | 898 | |||
3778 | 899 | for svc in nova_services: | ||
3779 | 900 | gmap[svc] = nova_interfaces | ||
3780 | 901 | |||
3781 | 902 | net_manager = network_manager() | ||
3782 | 903 | if net_manager in ['neutron', 'quantum'] and \ | ||
3783 | 904 | not is_relation_made('neutron-api'): | ||
3784 | 905 | neutron_interfaces = ['identity-service', 'amqp'] | ||
3785 | 906 | if relation_ids('pgsql-neutron-db'): | ||
3786 | 907 | neutron_interfaces.append('pgsql-neutron-db') | ||
3787 | 908 | else: | ||
3788 | 909 | neutron_interfaces.append('shared-db') | ||
3789 | 910 | if network_manager() == 'quantum': | ||
3790 | 911 | gmap['quantum-server'] = neutron_interfaces | ||
3791 | 912 | else: | ||
3792 | 913 | gmap['neutron-server'] = neutron_interfaces | ||
3793 | 914 | |||
3794 | 915 | return gmap | ||
3795 | 916 | |||
3796 | 917 | |||
3797 | 918 | def service_guard(guard_map, contexts, active=False): | ||
3798 | 919 | '''Inhibit services in guard_map from running unless | ||
3799 | 920 | required interfaces are found complete in contexts.''' | ||
3800 | 921 | def wrap(f): | ||
3801 | 922 | def wrapped_f(*args): | ||
3802 | 923 | if active is True: | ||
3803 | 924 | incomplete_services = [] | ||
3804 | 925 | for svc in guard_map: | ||
3805 | 926 | for interface in guard_map[svc]: | ||
3806 | 927 | if interface not in contexts.complete_contexts(): | ||
3807 | 928 | incomplete_services.append(svc) | ||
3808 | 929 | f(*args) | ||
3809 | 930 | for svc in incomplete_services: | ||
3810 | 931 | if service_running(svc): | ||
3811 | 932 | log('Service {} has unfulfilled ' | ||
3812 | 933 | 'interface requirements, stopping.'.format(svc)) | ||
3813 | 934 | service_stop(svc) | ||
3814 | 935 | else: | ||
3815 | 936 | f(*args) | ||
3816 | 937 | return wrapped_f | ||
3817 | 938 | return wrap | ||
3818 | 939 | |||
3819 | 940 | |||
3820 | 941 | def cmd_all_services(cmd): | ||
3821 | 942 | if cmd == 'start': | ||
3822 | 943 | for svc in services(): | ||
3823 | 944 | if not service_running(svc): | ||
3824 | 945 | service_start(svc) | ||
3825 | 946 | else: | ||
3826 | 947 | for svc in services(): | ||
3827 | 948 | service(cmd, svc) | ||
3828 | 949 | |||
3829 | 950 | |||
3830 | 951 | def disable_services(): | ||
3831 | 952 | for svc in services(): | ||
3832 | 953 | with open('/etc/init/{}.override'.format(svc), 'wb') as out: | ||
3833 | 954 | out.write('exec true\n') | ||
3834 | 955 | |||
3835 | 956 | |||
3836 | 957 | def enable_services(): | ||
3837 | 958 | for svc in services(): | ||
3838 | 959 | override_file = '/etc/init/{}.override'.format(svc) | ||
3839 | 960 | if os.path.isfile(override_file): | ||
3840 | 961 | os.remove(override_file) | ||
3841 | 962 | >>>>>>> MERGE-SOURCE | ||
3842 | 757 | 963 | ||
3843 | === modified file 'metadata.yaml' | |||
3844 | --- metadata.yaml 2014-03-31 11:56:09 +0000 | |||
3845 | +++ metadata.yaml 2014-09-16 09:08:32 +0000 | |||
3846 | @@ -30,6 +30,8 @@ | |||
3847 | 30 | interface: nova-volume | 30 | interface: nova-volume |
3848 | 31 | quantum-network-service: | 31 | quantum-network-service: |
3849 | 32 | interface: quantum | 32 | interface: quantum |
3850 | 33 | neutron-api: | ||
3851 | 34 | interface: neutron-api | ||
3852 | 33 | ha: | 35 | ha: |
3853 | 34 | interface: hacluster | 36 | interface: hacluster |
3854 | 35 | scope: container | 37 | scope: container |
3855 | 36 | 38 | ||
3856 | === modified file 'revision' | |||
3857 | --- revision 2014-04-16 08:25:14 +0000 | |||
3858 | +++ revision 2014-09-16 09:08:32 +0000 | |||
3859 | @@ -1,1 +1,1 @@ | |||
3861 | 1 | 315 | 1 | 500 |
3862 | 2 | 2 | ||
3863 | === modified file 'templates/havana/nova.conf' | |||
3864 | --- templates/havana/nova.conf 2014-08-01 11:04:31 +0000 | |||
3865 | +++ templates/havana/nova.conf 2014-09-16 09:08:32 +0000 | |||
3866 | @@ -20,8 +20,17 @@ | |||
3867 | 20 | enabled_apis=ec2,osapi_compute,metadata | 20 | enabled_apis=ec2,osapi_compute,metadata |
3868 | 21 | auth_strategy=keystone | 21 | auth_strategy=keystone |
3869 | 22 | compute_driver=libvirt.LibvirtDriver | 22 | compute_driver=libvirt.LibvirtDriver |
3872 | 23 | use_syslog={{ use_syslog }} | 23 | <<<<<<< TREE |
3873 | 24 | 24 | use_syslog={{ use_syslog }} | |
3874 | 25 | |||
3875 | 26 | ======= | ||
3876 | 27 | osapi_compute_workers = {{ workers }} | ||
3877 | 28 | ec2_workers = {{ workers }} | ||
3878 | 29 | scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter | ||
3879 | 30 | cpu_allocation_ratio = {{ cpu_allocation_ratio }} | ||
3880 | 31 | use_syslog={{ use_syslog }} | ||
3881 | 32 | |||
3882 | 33 | >>>>>>> MERGE-SOURCE | ||
3883 | 25 | {% if keystone_ec2_url -%} | 34 | {% if keystone_ec2_url -%} |
3884 | 26 | keystone_ec2_url = {{ keystone_ec2_url }} | 35 | keystone_ec2_url = {{ keystone_ec2_url }} |
3885 | 27 | {% endif -%} | 36 | {% endif -%} |
3886 | 28 | 37 | ||
3887 | === modified file 'templates/icehouse/neutron.conf' | |||
3888 | --- templates/icehouse/neutron.conf 2014-08-01 11:04:31 +0000 | |||
3889 | +++ templates/icehouse/neutron.conf 2014-09-16 09:08:32 +0000 | |||
3890 | @@ -8,7 +8,12 @@ | |||
3891 | 8 | bind_host = 0.0.0.0 | 8 | bind_host = 0.0.0.0 |
3892 | 9 | auth_strategy = keystone | 9 | auth_strategy = keystone |
3893 | 10 | notification_driver = neutron.openstack.common.notifier.rpc_notifier | 10 | notification_driver = neutron.openstack.common.notifier.rpc_notifier |
3894 | 11 | <<<<<<< TREE | ||
3895 | 11 | use_syslog={{ use_syslog }} | 12 | use_syslog={{ use_syslog }} |
3896 | 13 | ======= | ||
3897 | 14 | api_workers = {{ workers }} | ||
3898 | 15 | use_syslog = {{ use_syslog }} | ||
3899 | 16 | >>>>>>> MERGE-SOURCE | ||
3900 | 12 | 17 | ||
3901 | 13 | {% if neutron_bind_port -%} | 18 | {% if neutron_bind_port -%} |
3902 | 14 | bind_port = {{ neutron_bind_port }} | 19 | bind_port = {{ neutron_bind_port }} |
3903 | 15 | 20 | ||
3904 | === modified file 'templates/icehouse/nova.conf' | |||
3905 | --- templates/icehouse/nova.conf 2014-08-01 11:04:31 +0000 | |||
3906 | +++ templates/icehouse/nova.conf 2014-09-16 09:08:32 +0000 | |||
3907 | @@ -1,3 +1,4 @@ | |||
3908 | 1 | # icehouse | ||
3909 | 1 | ############################################################################### | 2 | ############################################################################### |
3910 | 2 | # [ WARNING ] | 3 | # [ WARNING ] |
3911 | 3 | # Configuration file maintained by Juju. Local changes may be overwritten. | 4 | # Configuration file maintained by Juju. Local changes may be overwritten. |
3912 | @@ -20,8 +21,21 @@ | |||
3913 | 20 | enabled_apis=ec2,osapi_compute,metadata | 21 | enabled_apis=ec2,osapi_compute,metadata |
3914 | 21 | auth_strategy=keystone | 22 | auth_strategy=keystone |
3915 | 22 | compute_driver=libvirt.LibvirtDriver | 23 | compute_driver=libvirt.LibvirtDriver |
3918 | 23 | use_syslog={{ use_syslog }} | 24 | <<<<<<< TREE |
3919 | 24 | 25 | use_syslog={{ use_syslog }} | |
3920 | 26 | |||
3921 | 27 | ======= | ||
3922 | 28 | |||
3923 | 29 | osapi_compute_workers = {{ workers }} | ||
3924 | 30 | ec2_workers = {{ workers }} | ||
3925 | 31 | |||
3926 | 32 | scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter | ||
3927 | 33 | cpu_allocation_ratio = {{ cpu_allocation_ratio }} | ||
3928 | 34 | ram_allocation_ratio = {{ ram_allocation_ratio }} | ||
3929 | 35 | |||
3930 | 36 | use_syslog={{ use_syslog }} | ||
3931 | 37 | |||
3932 | 38 | >>>>>>> MERGE-SOURCE | ||
3933 | 25 | {% if keystone_ec2_url -%} | 39 | {% if keystone_ec2_url -%} |
3934 | 26 | keystone_ec2_url = {{ keystone_ec2_url }} | 40 | keystone_ec2_url = {{ keystone_ec2_url }} |
3935 | 27 | {% endif -%} | 41 | {% endif -%} |
3936 | @@ -130,3 +144,5 @@ | |||
3937 | 130 | [osapi_v3] | 144 | [osapi_v3] |
3938 | 131 | enabled=True | 145 | enabled=True |
3939 | 132 | 146 | ||
3940 | 147 | [conductor] | ||
3941 | 148 | workers = {{ workers }} | ||
3942 | 133 | 149 | ||
3943 | === added directory 'tests' | |||
3944 | === added file 'tests/00-setup' | |||
3945 | --- tests/00-setup 1970-01-01 00:00:00 +0000 | |||
3946 | +++ tests/00-setup 2014-09-16 09:08:32 +0000 | |||
3947 | @@ -0,0 +1,10 @@ | |||
3948 | 1 | #!/bin/bash | ||
3949 | 2 | |||
3950 | 3 | set -ex | ||
3951 | 4 | |||
3952 | 5 | sudo add-apt-repository --yes ppa:juju/stable | ||
3953 | 6 | sudo apt-get update --yes | ||
3954 | 7 | sudo apt-get install --yes python-amulet | ||
3955 | 8 | sudo apt-get install --yes python-glanceclient | ||
3956 | 9 | sudo apt-get install --yes python-keystoneclient | ||
3957 | 10 | sudo apt-get install --yes python-novaclient | ||
3958 | 0 | 11 | ||
3959 | === added file 'tests/10-basic-precise-essex' | |||
3960 | --- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000 | |||
3961 | +++ tests/10-basic-precise-essex 2014-09-16 09:08:32 +0000 | |||
3962 | @@ -0,0 +1,10 @@ | |||
3963 | 1 | #!/usr/bin/python | ||
3964 | 2 | |||
3965 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
3966 | 4 | precise-essex.""" | ||
3967 | 5 | |||
3968 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
3969 | 7 | |||
3970 | 8 | if __name__ == '__main__': | ||
3971 | 9 | deployment = NovaCCBasicDeployment(series='precise') | ||
3972 | 10 | deployment.run_tests() | ||
3973 | 0 | 11 | ||
3974 | === added file 'tests/11-basic-precise-folsom' | |||
3975 | --- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000 | |||
3976 | +++ tests/11-basic-precise-folsom 2014-09-16 09:08:32 +0000 | |||
3977 | @@ -0,0 +1,18 @@ | |||
3978 | 1 | #!/usr/bin/python | ||
3979 | 2 | |||
3980 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
3981 | 4 | precise-folsom.""" | ||
3982 | 5 | |||
3983 | 6 | import amulet | ||
3984 | 7 | from basic_deployment import NovaCCBasicDeployment | ||
3985 | 8 | |||
3986 | 9 | if __name__ == '__main__': | ||
3987 | 10 | # NOTE(coreycb): Skipping failing test until resolved. 'nova-manage db sync' | ||
3988 | 11 | # fails in shared-db-relation-changed (only fails on folsom) | ||
3989 | 12 | message = "Skipping failing test until resolved" | ||
3990 | 13 | amulet.raise_status(amulet.SKIP, msg=message) | ||
3991 | 14 | |||
3992 | 15 | deployment = NovaCCBasicDeployment(series='precise', | ||
3993 | 16 | openstack='cloud:precise-folsom', | ||
3994 | 17 | source='cloud:precise-updates/folsom') | ||
3995 | 18 | deployment.run_tests() | ||
3996 | 0 | 19 | ||
3997 | === added file 'tests/12-basic-precise-grizzly' | |||
3998 | --- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000 | |||
3999 | +++ tests/12-basic-precise-grizzly 2014-09-16 09:08:32 +0000 | |||
4000 | @@ -0,0 +1,12 @@ | |||
4001 | 1 | #!/usr/bin/python | ||
4002 | 2 | |||
4003 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
4004 | 4 | precise-grizzly.""" | ||
4005 | 5 | |||
4006 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
4007 | 7 | |||
4008 | 8 | if __name__ == '__main__': | ||
4009 | 9 | deployment = NovaCCBasicDeployment(series='precise', | ||
4010 | 10 | openstack='cloud:precise-grizzly', | ||
4011 | 11 | source='cloud:precise-updates/grizzly') | ||
4012 | 12 | deployment.run_tests() | ||
4013 | 0 | 13 | ||
4014 | === added file 'tests/13-basic-precise-havana' | |||
4015 | --- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000 | |||
4016 | +++ tests/13-basic-precise-havana 2014-09-16 09:08:32 +0000 | |||
4017 | @@ -0,0 +1,12 @@ | |||
4018 | 1 | #!/usr/bin/python | ||
4019 | 2 | |||
4020 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
4021 | 4 | precise-havana.""" | ||
4022 | 5 | |||
4023 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
4024 | 7 | |||
4025 | 8 | if __name__ == '__main__': | ||
4026 | 9 | deployment = NovaCCBasicDeployment(series='precise', | ||
4027 | 10 | openstack='cloud:precise-havana', | ||
4028 | 11 | source='cloud:precise-updates/havana') | ||
4029 | 12 | deployment.run_tests() | ||
4030 | 0 | 13 | ||
4031 | === added file 'tests/14-basic-precise-icehouse' | |||
4032 | --- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000 | |||
4033 | +++ tests/14-basic-precise-icehouse 2014-09-16 09:08:32 +0000 | |||
4034 | @@ -0,0 +1,12 @@ | |||
4035 | 1 | #!/usr/bin/python | ||
4036 | 2 | |||
4037 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
4038 | 4 | precise-icehouse.""" | ||
4039 | 5 | |||
4040 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
4041 | 7 | |||
4042 | 8 | if __name__ == '__main__': | ||
4043 | 9 | deployment = NovaCCBasicDeployment(series='precise', | ||
4044 | 10 | openstack='cloud:precise-icehouse', | ||
4045 | 11 | source='cloud:precise-updates/icehouse') | ||
4046 | 12 | deployment.run_tests() | ||
4047 | 0 | 13 | ||
4048 | === added file 'tests/15-basic-trusty-icehouse' | |||
4049 | --- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000 | |||
4050 | +++ tests/15-basic-trusty-icehouse 2014-09-16 09:08:32 +0000 | |||
4051 | @@ -0,0 +1,10 @@ | |||
4052 | 1 | #!/usr/bin/python | ||
4053 | 2 | |||
4054 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
4055 | 4 | trusty-icehouse.""" | ||
4056 | 5 | |||
4057 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
4058 | 7 | |||
4059 | 8 | if __name__ == '__main__': | ||
4060 | 9 | deployment = NovaCCBasicDeployment(series='trusty') | ||
4061 | 10 | deployment.run_tests() | ||
4062 | 0 | 11 | ||
4063 | === added file 'tests/README' | |||
4064 | --- tests/README 1970-01-01 00:00:00 +0000 | |||
4065 | +++ tests/README 2014-09-16 09:08:32 +0000 | |||
4066 | @@ -0,0 +1,47 @@ | |||
4067 | 1 | This directory provides Amulet tests that focus on verification of Nova Cloud | ||
4068 | 2 | Controller deployments. | ||
4069 | 3 | |||
4070 | 4 | If you use a web proxy server to access the web, you'll need to set the | ||
4071 | 5 | AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. | ||
4072 | 6 | |||
4073 | 7 | The following examples demonstrate different ways that tests can be executed. | ||
4074 | 8 | All examples are run from the charm's root directory. | ||
4075 | 9 | |||
4076 | 10 | * To run all tests (starting with 00-setup): | ||
4077 | 11 | |||
4078 | 12 | make test | ||
4079 | 13 | |||
4080 | 14 | * To run a specific test module (or modules): | ||
4081 | 15 | |||
4082 | 16 | juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse | ||
4083 | 17 | |||
4084 | 18 | * To run a specific test module (or modules), and keep the environment | ||
4085 | 19 | deployed after a failure: | ||
4086 | 20 | |||
4087 | 21 | juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse | ||
4088 | 22 | |||
4089 | 23 | * To re-run a test module against an already deployed environment (one | ||
4090 | 24 | that was deployed by a previous call to 'juju test --set-e'): | ||
4091 | 25 | |||
4092 | 26 | ./tests/15-basic-trusty-icehouse | ||
4093 | 27 | |||
4094 | 28 | For debugging and test development purposes, all code should be idempotent. | ||
4095 | 29 | In other words, the code should have the ability to be re-run without changing | ||
4096 | 30 | the results beyond the initial run. This enables editing and re-running of a | ||
4097 | 31 | test module against an already deployed environment, as described above. | ||
4098 | 32 | |||
4099 | 33 | Manual debugging tips: | ||
4100 | 34 | |||
4101 | 35 | * Set the following env vars before using the OpenStack CLI as admin: | ||
4102 | 36 | export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 | ||
4103 | 37 | export OS_TENANT_NAME=admin | ||
4104 | 38 | export OS_USERNAME=admin | ||
4105 | 39 | export OS_PASSWORD=openstack | ||
4106 | 40 | export OS_REGION_NAME=RegionOne | ||
4107 | 41 | |||
4108 | 42 | * Set the following env vars before using the OpenStack CLI as demoUser: | ||
4109 | 43 | export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 | ||
4110 | 44 | export OS_TENANT_NAME=demoTenant | ||
4111 | 45 | export OS_USERNAME=demoUser | ||
4112 | 46 | export OS_PASSWORD=password | ||
4113 | 47 | export OS_REGION_NAME=RegionOne | ||
4114 | 0 | 48 | ||
4115 | === added file 'tests/basic_deployment.py' | |||
4116 | --- tests/basic_deployment.py 1970-01-01 00:00:00 +0000 | |||
4117 | +++ tests/basic_deployment.py 2014-09-16 09:08:32 +0000 | |||
4118 | @@ -0,0 +1,520 @@ | |||
4119 | 1 | #!/usr/bin/python | ||
4120 | 2 | |||
4121 | 3 | import amulet | ||
4122 | 4 | |||
4123 | 5 | from charmhelpers.contrib.openstack.amulet.deployment import ( | ||
4124 | 6 | OpenStackAmuletDeployment | ||
4125 | 7 | ) | ||
4126 | 8 | |||
4127 | 9 | from charmhelpers.contrib.openstack.amulet.utils import ( | ||
4128 | 10 | OpenStackAmuletUtils, | ||
4129 | 11 | DEBUG, # flake8: noqa | ||
4130 | 12 | ERROR | ||
4131 | 13 | ) | ||
4132 | 14 | |||
4133 | 15 | # Use DEBUG to turn on debug logging | ||
4134 | 16 | u = OpenStackAmuletUtils(ERROR) | ||
4135 | 17 | |||
4136 | 18 | |||
4137 | 19 | class NovaCCBasicDeployment(OpenStackAmuletDeployment): | ||
4138 | 20 | """Amulet tests on a basic nova cloud controller deployment.""" | ||
4139 | 21 | |||
4140 | 22 | def __init__(self, series=None, openstack=None, source=None): | ||
4141 | 23 | """Deploy the entire test environment.""" | ||
4142 | 24 | super(NovaCCBasicDeployment, self).__init__(series, openstack, source) | ||
4143 | 25 | self._add_services() | ||
4144 | 26 | self._add_relations() | ||
4145 | 27 | self._configure_services() | ||
4146 | 28 | self._deploy() | ||
4147 | 29 | self._initialize_tests() | ||
4148 | 30 | |||
4149 | 31 | def _add_services(self): | ||
4150 | 32 | """Add the service that we're testing, including the number of units, | ||
4151 | 33 | where nova-cloud-controller is local, and the other charms are from | ||
4152 | 34 | the charm store.""" | ||
4153 | 35 | this_service = ('nova-cloud-controller', 1) | ||
4154 | 36 | other_services = [('mysql', 1), ('rabbitmq-server', 1), | ||
4155 | 37 | ('nova-compute', 2), ('keystone', 1), ('glance', 1)] | ||
4156 | 38 | super(NovaCCBasicDeployment, self)._add_services(this_service, | ||
4157 | 39 | other_services) | ||
4158 | 40 | |||
4159 | 41 | def _add_relations(self): | ||
4160 | 42 | """Add all of the relations for the services.""" | ||
4161 | 43 | relations = { | ||
4162 | 44 | 'nova-cloud-controller:shared-db': 'mysql:shared-db', | ||
4163 | 45 | 'nova-cloud-controller:identity-service': 'keystone:identity-service', | ||
4164 | 46 | 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', | ||
4165 | 47 | 'nova-cloud-controller:cloud-compute': 'nova-compute:cloud-compute', | ||
4166 | 48 | 'nova-cloud-controller:image-service': 'glance:image-service', | ||
4167 | 49 | 'nova-compute:image-service': 'glance:image-service', | ||
4168 | 50 | 'nova-compute:shared-db': 'mysql:shared-db', | ||
4169 | 51 | 'nova-compute:amqp': 'rabbitmq-server:amqp', | ||
4170 | 52 | 'keystone:shared-db': 'mysql:shared-db', | ||
4171 | 53 | 'glance:identity-service': 'keystone:identity-service', | ||
4172 | 54 | 'glance:shared-db': 'mysql:shared-db', | ||
4173 | 55 | 'glance:amqp': 'rabbitmq-server:amqp' | ||
4174 | 56 | } | ||
4175 | 57 | super(NovaCCBasicDeployment, self)._add_relations(relations) | ||
4176 | 58 | |||
4177 | 59 | def _configure_services(self): | ||
4178 | 60 | """Configure all of the services.""" | ||
4179 | 61 | keystone_config = {'admin-password': 'openstack', | ||
4180 | 62 | 'admin-token': 'ubuntutesting'} | ||
4181 | 63 | configs = {'keystone': keystone_config} | ||
4182 | 64 | super(NovaCCBasicDeployment, self)._configure_services(configs) | ||
4183 | 65 | |||
4184 | 66 | def _initialize_tests(self): | ||
4185 | 67 | """Perform final initialization before tests get run.""" | ||
4186 | 68 | # Access the sentries for inspecting service units | ||
4187 | 69 | self.mysql_sentry = self.d.sentry.unit['mysql/0'] | ||
4188 | 70 | self.keystone_sentry = self.d.sentry.unit['keystone/0'] | ||
4189 | 71 | self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] | ||
4190 | 72 | self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0'] | ||
4191 | 73 | self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] | ||
4192 | 74 | self.glance_sentry = self.d.sentry.unit['glance/0'] | ||
4193 | 75 | |||
4194 | 76 | # Authenticate admin with keystone | ||
4195 | 77 | self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, | ||
4196 | 78 | user='admin', | ||
4197 | 79 | password='openstack', | ||
4198 | 80 | tenant='admin') | ||
4199 | 81 | |||
4200 | 82 | # Authenticate admin with glance endpoint | ||
4201 | 83 | self.glance = u.authenticate_glance_admin(self.keystone) | ||
4202 | 84 | |||
4203 | 85 | # Create a demo tenant/role/user | ||
4204 | 86 | self.demo_tenant = 'demoTenant' | ||
4205 | 87 | self.demo_role = 'demoRole' | ||
4206 | 88 | self.demo_user = 'demoUser' | ||
4207 | 89 | if not u.tenant_exists(self.keystone, self.demo_tenant): | ||
4208 | 90 | tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, | ||
4209 | 91 | description='demo tenant', | ||
4210 | 92 | enabled=True) | ||
4211 | 93 | self.keystone.roles.create(name=self.demo_role) | ||
4212 | 94 | self.keystone.users.create(name=self.demo_user, | ||
4213 | 95 | password='password', | ||
4214 | 96 | tenant_id=tenant.id, | ||
4215 | 97 | email='demo@demo.com') | ||
4216 | 98 | |||
4217 | 99 | # Authenticate demo user with keystone | ||
4218 | 100 | self.keystone_demo = \ | ||
4219 | 101 | u.authenticate_keystone_user(self.keystone, user=self.demo_user, | ||
4220 | 102 | password='password', | ||
4221 | 103 | tenant=self.demo_tenant) | ||
4222 | 104 | |||
4223 | 105 | # Authenticate demo user with nova-api | ||
4224 | 106 | self.nova_demo = u.authenticate_nova_user(self.keystone, | ||
4225 | 107 | user=self.demo_user, | ||
4226 | 108 | password='password', | ||
4227 | 109 | tenant=self.demo_tenant) | ||
4228 | 110 | |||
4229 | 111 | def test_services(self): | ||
4230 | 112 | """Verify the expected services are running on the corresponding | ||
4231 | 113 | service units.""" | ||
4232 | 114 | commands = { | ||
4233 | 115 | self.mysql_sentry: ['status mysql'], | ||
4234 | 116 | self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], | ||
4235 | 117 | self.nova_cc_sentry: ['status nova-api-ec2', | ||
4236 | 118 | 'status nova-api-os-compute', | ||
4237 | 119 | 'status nova-objectstore', | ||
4238 | 120 | 'status nova-cert', | ||
4239 | 121 | 'status nova-scheduler'], | ||
4240 | 122 | self.nova_compute_sentry: ['status nova-compute', | ||
4241 | 123 | 'status nova-network', | ||
4242 | 124 | 'status nova-api'], | ||
4243 | 125 | self.keystone_sentry: ['status keystone'], | ||
4244 | 126 | self.glance_sentry: ['status glance-registry', 'status glance-api'] | ||
4245 | 127 | } | ||
4246 | 128 | if self._get_openstack_release() >= self.precise_grizzly: | ||
4247 | 129 | commands[self.nova_cc_sentry] = ['status nova-conductor'] | ||
4248 | 130 | |||
4249 | 131 | ret = u.validate_services(commands) | ||
4250 | 132 | if ret: | ||
4251 | 133 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
4252 | 134 | |||
4253 | 135 | def test_service_catalog(self): | ||
4254 | 136 | """Verify that the service catalog endpoint data is valid.""" | ||
4255 | 137 | endpoint_vol = {'adminURL': u.valid_url, | ||
4256 | 138 | 'region': 'RegionOne', | ||
4257 | 139 | 'publicURL': u.valid_url, | ||
4258 | 140 | 'internalURL': u.valid_url} | ||
4259 | 141 | endpoint_id = {'adminURL': u.valid_url, | ||
4260 | 142 | 'region': 'RegionOne', | ||
4261 | 143 | 'publicURL': u.valid_url, | ||
4262 | 144 | 'internalURL': u.valid_url} | ||
4263 | 145 | if self._get_openstack_release() >= self.precise_folsom: | ||
4264 | 146 | endpoint_vol['id'] = u.not_null | ||
4265 | 147 | endpoint_id['id'] = u.not_null | ||
4266 | 148 | expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol], | ||
4267 | 149 | 'ec2': [endpoint_vol], 'identity': [endpoint_id]} | ||
4268 | 150 | actual = self.keystone_demo.service_catalog.get_endpoints() | ||
4269 | 151 | |||
4270 | 152 | ret = u.validate_svc_catalog_endpoint_data(expected, actual) | ||
4271 | 153 | if ret: | ||
4272 | 154 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
4273 | 155 | |||
4274 | 156 | def test_openstack_compute_api_endpoint(self): | ||
4275 | 157 | """Verify the openstack compute api (osapi) endpoint data.""" | ||
4276 | 158 | endpoints = self.keystone.endpoints.list() | ||
4277 | 159 | admin_port = internal_port = public_port = '8774' | ||
4278 | 160 | expected = {'id': u.not_null, | ||
4279 | 161 | 'region': 'RegionOne', | ||
4280 | 162 | 'adminurl': u.valid_url, | ||
4281 | 163 | 'internalurl': u.valid_url, | ||
4282 | 164 | 'publicurl': u.valid_url, | ||
4283 | 165 | 'service_id': u.not_null} | ||
4284 | 166 | |||
4285 | 167 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
4286 | 168 | public_port, expected) | ||
4287 | 169 | if ret: | ||
4288 | 170 | message = 'osapi endpoint: {}'.format(ret) | ||
4289 | 171 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4290 | 172 | |||
4291 | 173 | def test_ec2_api_endpoint(self): | ||
4292 | 174 | """Verify the EC2 api endpoint data.""" | ||
4293 | 175 | endpoints = self.keystone.endpoints.list() | ||
4294 | 176 | admin_port = internal_port = public_port = '8773' | ||
4295 | 177 | expected = {'id': u.not_null, | ||
4296 | 178 | 'region': 'RegionOne', | ||
4297 | 179 | 'adminurl': u.valid_url, | ||
4298 | 180 | 'internalurl': u.valid_url, | ||
4299 | 181 | 'publicurl': u.valid_url, | ||
4300 | 182 | 'service_id': u.not_null} | ||
4301 | 183 | |||
4302 | 184 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
4303 | 185 | public_port, expected) | ||
4304 | 186 | if ret: | ||
4305 | 187 | message = 'EC2 endpoint: {}'.format(ret) | ||
4306 | 188 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4307 | 189 | |||
4308 | 190 | def test_s3_api_endpoint(self): | ||
4309 | 191 | """Verify the S3 api endpoint data.""" | ||
4310 | 192 | endpoints = self.keystone.endpoints.list() | ||
4311 | 193 | admin_port = internal_port = public_port = '3333' | ||
4312 | 194 | expected = {'id': u.not_null, | ||
4313 | 195 | 'region': 'RegionOne', | ||
4314 | 196 | 'adminurl': u.valid_url, | ||
4315 | 197 | 'internalurl': u.valid_url, | ||
4316 | 198 | 'publicurl': u.valid_url, | ||
4317 | 199 | 'service_id': u.not_null} | ||
4318 | 200 | |||
4319 | 201 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
4320 | 202 | public_port, expected) | ||
4321 | 203 | if ret: | ||
4322 | 204 | message = 'S3 endpoint: {}'.format(ret) | ||
4323 | 205 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4324 | 206 | |||
4325 | 207 | def test_nova_cc_shared_db_relation(self): | ||
4326 | 208 | """Verify the nova-cc to mysql shared-db relation data""" | ||
4327 | 209 | unit = self.nova_cc_sentry | ||
4328 | 210 | relation = ['shared-db', 'mysql:shared-db'] | ||
4329 | 211 | expected = { | ||
4330 | 212 | 'private-address': u.valid_ip, | ||
4331 | 213 | 'nova_database': 'nova', | ||
4332 | 214 | 'nova_username': 'nova', | ||
4333 | 215 | 'nova_hostname': u.valid_ip | ||
4334 | 216 | } | ||
4335 | 217 | |||
4336 | 218 | ret = u.validate_relation_data(unit, relation, expected) | ||
4337 | 219 | if ret: | ||
4338 | 220 | message = u.relation_error('nova-cc shared-db', ret) | ||
4339 | 221 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4340 | 222 | |||
4341 | 223 | def test_mysql_shared_db_relation(self): | ||
4342 | 224 | """Verify the mysql to nova-cc shared-db relation data""" | ||
4343 | 225 | unit = self.mysql_sentry | ||
4344 | 226 | relation = ['shared-db', 'nova-cloud-controller:shared-db'] | ||
4345 | 227 | expected = { | ||
4346 | 228 | 'private-address': u.valid_ip, | ||
4347 | 229 | 'nova_password': u.not_null, | ||
4348 | 230 | 'db_host': u.valid_ip | ||
4349 | 231 | } | ||
4350 | 232 | |||
4351 | 233 | ret = u.validate_relation_data(unit, relation, expected) | ||
4352 | 234 | if ret: | ||
4353 | 235 | message = u.relation_error('mysql shared-db', ret) | ||
4354 | 236 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4355 | 237 | |||
4356 | 238 | def test_nova_cc_identity_service_relation(self): | ||
4357 | 239 | """Verify the nova-cc to keystone identity-service relation data""" | ||
4358 | 240 | unit = self.nova_cc_sentry | ||
4359 | 241 | relation = ['identity-service', 'keystone:identity-service'] | ||
4360 | 242 | expected = { | ||
4361 | 243 | 'nova_internal_url': u.valid_url, | ||
4362 | 244 | 'nova_public_url': u.valid_url, | ||
4363 | 245 | 's3_public_url': u.valid_url, | ||
4364 | 246 | 's3_service': 's3', | ||
4365 | 247 | 'ec2_admin_url': u.valid_url, | ||
4366 | 248 | 'ec2_internal_url': u.valid_url, | ||
4367 | 249 | 'nova_service': 'nova', | ||
4368 | 250 | 's3_region': 'RegionOne', | ||
4369 | 251 | 'private-address': u.valid_ip, | ||
4370 | 252 | 'nova_region': 'RegionOne', | ||
4371 | 253 | 'ec2_public_url': u.valid_url, | ||
4372 | 254 | 'ec2_region': 'RegionOne', | ||
4373 | 255 | 's3_internal_url': u.valid_url, | ||
4374 | 256 | 's3_admin_url': u.valid_url, | ||
4375 | 257 | 'nova_admin_url': u.valid_url, | ||
4376 | 258 | 'ec2_service': 'ec2' | ||
4377 | 259 | } | ||
4378 | 260 | |||
4379 | 261 | ret = u.validate_relation_data(unit, relation, expected) | ||
4380 | 262 | if ret: | ||
4381 | 263 | message = u.relation_error('nova-cc identity-service', ret) | ||
4382 | 264 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4383 | 265 | |||
4384 | 266 | def test_keystone_identity_service_relation(self): | ||
4385 | 267 | """Verify the keystone to nova-cc identity-service relation data""" | ||
4386 | 268 | unit = self.keystone_sentry | ||
4387 | 269 | relation = ['identity-service', | ||
4388 | 270 | 'nova-cloud-controller:identity-service'] | ||
4389 | 271 | expected = { | ||
4390 | 272 | 'service_protocol': 'http', | ||
4391 | 273 | 'service_tenant': 'services', | ||
4392 | 274 | 'admin_token': 'ubuntutesting', | ||
4393 | 275 | 'service_password': u.not_null, | ||
4394 | 276 | 'service_port': '5000', | ||
4395 | 277 | 'auth_port': '35357', | ||
4396 | 278 | 'auth_protocol': 'http', | ||
4397 | 279 | 'private-address': u.valid_ip, | ||
4398 | 280 | 'https_keystone': 'False', | ||
4399 | 281 | 'auth_host': u.valid_ip, | ||
4400 | 282 | 'service_username': 's3_ec2_nova', | ||
4401 | 283 | 'service_tenant_id': u.not_null, | ||
4402 | 284 | 'service_host': u.valid_ip | ||
4403 | 285 | } | ||
4404 | 286 | |||
4405 | 287 | ret = u.validate_relation_data(unit, relation, expected) | ||
4406 | 288 | if ret: | ||
4407 | 289 | message = u.relation_error('keystone identity-service', ret) | ||
4408 | 290 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4409 | 291 | |||
4410 | 292 | def test_nova_cc_amqp_relation(self): | ||
4411 | 293 | """Verify the nova-cc to rabbitmq-server amqp relation data""" | ||
4412 | 294 | unit = self.nova_cc_sentry | ||
4413 | 295 | relation = ['amqp', 'rabbitmq-server:amqp'] | ||
4414 | 296 | expected = { | ||
4415 | 297 | 'username': 'nova', | ||
4416 | 298 | 'private-address': u.valid_ip, | ||
4417 | 299 | 'vhost': 'openstack' | ||
4418 | 300 | } | ||
4419 | 301 | |||
4420 | 302 | ret = u.validate_relation_data(unit, relation, expected) | ||
4421 | 303 | if ret: | ||
4422 | 304 | message = u.relation_error('nova-cc amqp', ret) | ||
4423 | 305 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4424 | 306 | |||
4425 | 307 | def test_rabbitmq_amqp_relation(self): | ||
4426 | 308 | """Verify the rabbitmq-server to nova-cc amqp relation data""" | ||
4427 | 309 | unit = self.rabbitmq_sentry | ||
4428 | 310 | relation = ['amqp', 'nova-cloud-controller:amqp'] | ||
4429 | 311 | expected = { | ||
4430 | 312 | 'private-address': u.valid_ip, | ||
4431 | 313 | 'password': u.not_null, | ||
4432 | 314 | 'hostname': u.valid_ip | ||
4433 | 315 | } | ||
4434 | 316 | |||
4435 | 317 | ret = u.validate_relation_data(unit, relation, expected) | ||
4436 | 318 | if ret: | ||
4437 | 319 | message = u.relation_error('rabbitmq amqp', ret) | ||
4438 | 320 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4439 | 321 | |||
4440 | 322 | def test_nova_cc_cloud_compute_relation(self): | ||
4441 | 323 | """Verify the nova-cc to nova-compute cloud-compute relation data""" | ||
4442 | 324 | unit = self.nova_cc_sentry | ||
4443 | 325 | relation = ['cloud-compute', 'nova-compute:cloud-compute'] | ||
4444 | 326 | expected = { | ||
4445 | 327 | 'volume_service': 'cinder', | ||
4446 | 328 | 'network_manager': 'flatdhcpmanager', | ||
4447 | 329 | 'ec2_host': u.valid_ip, | ||
4448 | 330 | 'private-address': u.valid_ip, | ||
4449 | 331 | 'restart_trigger': u.not_null | ||
4450 | 332 | } | ||
4451 | 333 | if self._get_openstack_release() == self.precise_essex: | ||
4452 | 334 | expected['volume_service'] = 'nova-volume' | ||
4453 | 335 | |||
4454 | 336 | ret = u.validate_relation_data(unit, relation, expected) | ||
4455 | 337 | if ret: | ||
4456 | 338 | message = u.relation_error('nova-cc cloud-compute', ret) | ||
4457 | 339 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4458 | 340 | |||
4459 | 341 | def test_nova_cloud_compute_relation(self): | ||
4460 | 342 | """Verify the nova-compute to nova-cc cloud-compute relation data""" | ||
4461 | 343 | unit = self.nova_compute_sentry | ||
4462 | 344 | relation = ['cloud-compute', 'nova-cloud-controller:cloud-compute'] | ||
4463 | 345 | expected = { | ||
4464 | 346 | 'private-address': u.valid_ip, | ||
4465 | 347 | } | ||
4466 | 348 | |||
4467 | 349 | ret = u.validate_relation_data(unit, relation, expected) | ||
4468 | 350 | if ret: | ||
4469 | 351 | message = u.relation_error('nova-compute cloud-compute', ret) | ||
4470 | 352 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4471 | 353 | |||
4472 | 354 | def test_nova_cc_image_service_relation(self): | ||
4473 | 355 | """Verify the nova-cc to glance image-service relation data""" | ||
4474 | 356 | unit = self.nova_cc_sentry | ||
4475 | 357 | relation = ['image-service', 'glance:image-service'] | ||
4476 | 358 | expected = { | ||
4477 | 359 | 'private-address': u.valid_ip, | ||
4478 | 360 | } | ||
4479 | 361 | |||
4480 | 362 | ret = u.validate_relation_data(unit, relation, expected) | ||
4481 | 363 | if ret: | ||
4482 | 364 | message = u.relation_error('nova-cc image-service', ret) | ||
4483 | 365 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4484 | 366 | |||
4485 | 367 | def test_glance_image_service_relation(self): | ||
4486 | 368 | """Verify the glance to nova-cc image-service relation data""" | ||
4487 | 369 | unit = self.glance_sentry | ||
4488 | 370 | relation = ['image-service', 'nova-cloud-controller:image-service'] | ||
4489 | 371 | expected = { | ||
4490 | 372 | 'private-address': u.valid_ip, | ||
4491 | 373 | 'glance-api-server': u.valid_url | ||
4492 | 374 | } | ||
4493 | 375 | |||
4494 | 376 | ret = u.validate_relation_data(unit, relation, expected) | ||
4495 | 377 | if ret: | ||
4496 | 378 | message = u.relation_error('glance image-service', ret) | ||
4497 | 379 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4498 | 380 | |||
4499 | 381 | def test_restart_on_config_change(self): | ||
4500 | 382 | """Verify that the specified services are restarted when the config | ||
4501 | 383 | is changed.""" | ||
4502 | 384 | # NOTE(coreycb): Skipping failing test on essex until resolved. | ||
4503 | 385 | # config-flags don't take effect on essex. | ||
4504 | 386 | if self._get_openstack_release() == self.precise_essex: | ||
4505 | 387 | u.log.error("Skipping failing test until resolved") | ||
4506 | 388 | return | ||
4507 | 389 | |||
4508 | 390 | services = ['nova-api-ec2', 'nova-api-os-compute', 'nova-objectstore', | ||
4509 | 391 | 'nova-cert', 'nova-scheduler', 'nova-conductor'] | ||
4510 | 392 | self.d.configure('nova-cloud-controller', | ||
4511 | 393 | {'config-flags': 'quota_cores=20,quota_instances=40,quota_ram=102400'}) | ||
4512 | 394 | pgrep_full = True | ||
4513 | 395 | |||
4514 | 396 | time = 20 | ||
4515 | 397 | conf = '/etc/nova/nova.conf' | ||
4516 | 398 | for s in services: | ||
4517 | 399 | if not u.service_restarted(self.nova_cc_sentry, s, conf, | ||
4518 | 400 | pgrep_full=True, sleep_time=time): | ||
4519 | 401 | msg = "service {} didn't restart after config change".format(s) | ||
4520 | 402 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
4521 | 403 | time = 0 | ||
4522 | 404 | |||
4523 | 405 | def test_nova_default_config(self): | ||
4524 | 406 | """Verify the data in the nova config file's default section.""" | ||
4525 | 407 | # NOTE(coreycb): Currently no way to test on essex because config file | ||
4526 | 408 | # has no section headers. | ||
4527 | 409 | if self._get_openstack_release() == self.precise_essex: | ||
4528 | 410 | return | ||
4529 | 411 | |||
4530 | 412 | unit = self.nova_cc_sentry | ||
4531 | 413 | conf = '/etc/nova/nova.conf' | ||
4532 | 414 | rabbitmq_relation = self.rabbitmq_sentry.relation('amqp', | ||
4533 | 415 | 'nova-cloud-controller:amqp') | ||
4534 | 416 | glance_relation = self.glance_sentry.relation('image-service', | ||
4535 | 417 | 'nova-cloud-controller:image-service') | ||
4536 | 418 | mysql_relation = self.mysql_sentry.relation('shared-db', | ||
4537 | 419 | 'nova-cloud-controller:shared-db') | ||
4538 | 420 | db_uri = "mysql://{}:{}@{}/{}".format('nova', | ||
4539 | 421 | mysql_relation['nova_password'], | ||
4540 | 422 | mysql_relation['db_host'], | ||
4541 | 423 | 'nova') | ||
4542 | 424 | keystone_ep = self.keystone_demo.service_catalog.url_for(\ | ||
4543 | 425 | service_type='identity', | ||
4544 | 426 | endpoint_type='publicURL') | ||
4545 | 427 | keystone_ec2 = "{}/ec2tokens".format(keystone_ep) | ||
4546 | 428 | |||
4547 | 429 | expected = {'dhcpbridge_flagfile': '/etc/nova/nova.conf', | ||
4548 | 430 | 'dhcpbridge': '/usr/bin/nova-dhcpbridge', | ||
4549 | 431 | 'logdir': '/var/log/nova', | ||
4550 | 432 | 'state_path': '/var/lib/nova', | ||
4551 | 433 | 'lock_path': '/var/lock/nova', | ||
4552 | 434 | 'force_dhcp_release': 'True', | ||
4553 | 435 | 'iscsi_helper': 'tgtadm', | ||
4554 | 436 | 'libvirt_use_virtio_for_bridges': 'True', | ||
4555 | 437 | 'connection_type': 'libvirt', | ||
4556 | 438 | 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf', | ||
4557 | 439 | 'verbose': 'True', | ||
4558 | 440 | 'ec2_private_dns_show_ip': 'True', | ||
4559 | 441 | 'api_paste_config': '/etc/nova/api-paste.ini', | ||
4560 | 442 | 'volumes_path': '/var/lib/nova/volumes', | ||
4561 | 443 | 'enabled_apis': 'ec2,osapi_compute,metadata', | ||
4562 | 444 | 'auth_strategy': 'keystone', | ||
4563 | 445 | 'compute_driver': 'libvirt.LibvirtDriver', | ||
4564 | 446 | 'keystone_ec2_url': keystone_ec2, | ||
4565 | 447 | 'sql_connection': db_uri, | ||
4566 | 448 | 'rabbit_userid': 'nova', | ||
4567 | 449 | 'rabbit_virtual_host': 'openstack', | ||
4568 | 450 | 'rabbit_password': rabbitmq_relation['password'], | ||
4569 | 451 | 'rabbit_host': rabbitmq_relation['hostname'], | ||
4570 | 452 | 'glance_api_servers': glance_relation['glance-api-server'], | ||
4571 | 453 | 'network_manager': 'nova.network.manager.FlatDHCPManager', | ||
4572 | 454 | 's3_listen_port': '3333', | ||
4573 | 455 | 'osapi_compute_listen_port': '8774', | ||
4574 | 456 | 'ec2_listen_port': '8773'} | ||
4575 | 457 | |||
4576 | 458 | ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) | ||
4577 | 459 | if ret: | ||
4578 | 460 | message = "nova config error: {}".format(ret) | ||
4579 | 461 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4580 | 462 | |||
4581 | 463 | |||
4582 | 464 | def test_nova_keystone_authtoken_config(self): | ||
4583 | 465 | """Verify the data in the nova config file's keystone_authtoken | ||
4584 | 466 | section. This data only exists since icehouse.""" | ||
4585 | 467 | if self._get_openstack_release() < self.precise_icehouse: | ||
4586 | 468 | return | ||
4587 | 469 | |||
4588 | 470 | unit = self.nova_cc_sentry | ||
4589 | 471 | conf = '/etc/nova/nova.conf' | ||
4590 | 472 | keystone_relation = self.keystone_sentry.relation('identity-service', | ||
4591 | 473 | 'nova-cloud-controller:identity-service') | ||
4592 | 474 | keystone_uri = "http://{}:{}/".format(keystone_relation['service_host'], | ||
4593 | 475 | keystone_relation['service_port']) | ||
4594 | 476 | expected = {'auth_uri': keystone_uri, | ||
4595 | 477 | 'auth_host': keystone_relation['service_host'], | ||
4596 | 478 | 'auth_port': keystone_relation['auth_port'], | ||
4597 | 479 | 'auth_protocol': keystone_relation['auth_protocol'], | ||
4598 | 480 | 'admin_tenant_name': keystone_relation['service_tenant'], | ||
4599 | 481 | 'admin_user': keystone_relation['service_username'], | ||
4600 | 482 | 'admin_password': keystone_relation['service_password']} | ||
4601 | 483 | |||
4602 | 484 | ret = u.validate_config_data(unit, conf, 'keystone_authtoken', expected) | ||
4603 | 485 | if ret: | ||
4604 | 486 | message = "nova config error: {}".format(ret) | ||
4605 | 487 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4606 | 488 | |||
4607 | 489 | def test_image_instance_create(self): | ||
4608 | 490 | """Create an image/instance, verify they exist, and delete them.""" | ||
4609 | 491 | # NOTE(coreycb): Skipping failing test on essex until resolved. essex | ||
4610 | 492 | # nova API calls are getting "Malformed request url (HTTP | ||
4611 | 493 | # 400)". | ||
4612 | 494 | if self._get_openstack_release() == self.precise_essex: | ||
4613 | 495 | u.log.error("Skipping failing test until resolved") | ||
4614 | 496 | return | ||
4615 | 497 | |||
4616 | 498 | image = u.create_cirros_image(self.glance, "cirros-image") | ||
4617 | 499 | if not image: | ||
4618 | 500 | amulet.raise_status(amulet.FAIL, msg="Image create failed") | ||
4619 | 501 | |||
4620 | 502 | instance = u.create_instance(self.nova_demo, "cirros-image", "cirros", | ||
4621 | 503 | "m1.tiny") | ||
4622 | 504 | if not instance: | ||
4623 | 505 | amulet.raise_status(amulet.FAIL, msg="Instance create failed") | ||
4624 | 506 | |||
4625 | 507 | found = False | ||
4626 | 508 | for instance in self.nova_demo.servers.list(): | ||
4627 | 509 | if instance.name == 'cirros': | ||
4628 | 510 | found = True | ||
4629 | 511 | if instance.status != 'ACTIVE': | ||
4630 | 512 | msg = "cirros instance is not active" | ||
4631 | 513 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4632 | 514 | |||
4633 | 515 | if not found: | ||
4634 | 516 | message = "nova cirros instance does not exist" | ||
4635 | 517 | amulet.raise_status(amulet.FAIL, msg=message) | ||
4636 | 518 | |||
4637 | 519 | u.delete_image(self.glance, image) | ||
4638 | 520 | u.delete_instance(self.nova_demo, instance) | ||
4639 | 0 | 521 | ||
4640 | === added directory 'tests/charmhelpers' | |||
4641 | === added file 'tests/charmhelpers/__init__.py' | |||
4642 | === added directory 'tests/charmhelpers/contrib' | |||
4643 | === added file 'tests/charmhelpers/contrib/__init__.py' | |||
4644 | === added directory 'tests/charmhelpers/contrib/amulet' | |||
4645 | === added file 'tests/charmhelpers/contrib/amulet/__init__.py' | |||
4646 | === added file 'tests/charmhelpers/contrib/amulet/deployment.py' | |||
4647 | --- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
4648 | +++ tests/charmhelpers/contrib/amulet/deployment.py 2014-09-16 09:08:32 +0000 | |||
4649 | @@ -0,0 +1,71 @@ | |||
4650 | 1 | import amulet | ||
4651 | 2 | |||
4652 | 3 | import os | ||
4653 | 4 | |||
4654 | 5 | |||
4655 | 6 | class AmuletDeployment(object): | ||
4656 | 7 | """Amulet deployment. | ||
4657 | 8 | |||
4658 | 9 | This class provides generic Amulet deployment and test runner | ||
4659 | 10 | methods. | ||
4660 | 11 | """ | ||
4661 | 12 | |||
4662 | 13 | def __init__(self, series=None): | ||
4663 | 14 | """Initialize the deployment environment.""" | ||
4664 | 15 | self.series = None | ||
4665 | 16 | |||
4666 | 17 | if series: | ||
4667 | 18 | self.series = series | ||
4668 | 19 | self.d = amulet.Deployment(series=self.series) | ||
4669 | 20 | else: | ||
4670 | 21 | self.d = amulet.Deployment() | ||
4671 | 22 | |||
4672 | 23 | def _add_services(self, this_service, other_services): | ||
4673 | 24 | """Add services. | ||
4674 | 25 | |||
4675 | 26 | Add services to the deployment where this_service is the local charm | ||
4676 | 27 | that we're focused on testing and other_services are the other | ||
4677 | 28 | charms that come from the charm store. | ||
4678 | 29 | """ | ||
4679 | 30 | name, units = range(2) | ||
4680 | 31 | |||
4681 | 32 | if this_service[name] != os.path.basename(os.getcwd()): | ||
4682 | 33 | s = this_service[name] | ||
4683 | 34 | msg = "The charm's root directory name needs to be {}".format(s) | ||
4684 | 35 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
4685 | 36 | |||
4686 | 37 | self.d.add(this_service[name], units=this_service[units]) | ||
4687 | 38 | |||
4688 | 39 | for svc in other_services: | ||
4689 | 40 | if self.series: | ||
4690 | 41 | self.d.add(svc[name], | ||
4691 | 42 | charm='cs:{}/{}'.format(self.series, svc[name]), | ||
4692 | 43 | units=svc[units]) | ||
4693 | 44 | else: | ||
4694 | 45 | self.d.add(svc[name], units=svc[units]) | ||
4695 | 46 | |||
4696 | 47 | def _add_relations(self, relations): | ||
4697 | 48 | """Add all of the relations for the services.""" | ||
4698 | 49 | for k, v in relations.iteritems(): | ||
4699 | 50 | self.d.relate(k, v) | ||
4700 | 51 | |||
4701 | 52 | def _configure_services(self, configs): | ||
4702 | 53 | """Configure all of the services.""" | ||
4703 | 54 | for service, config in configs.iteritems(): | ||
4704 | 55 | self.d.configure(service, config) | ||
4705 | 56 | |||
4706 | 57 | def _deploy(self): | ||
4707 | 58 | """Deploy environment and wait for all hooks to finish executing.""" | ||
4708 | 59 | try: | ||
4709 | 60 | self.d.setup() | ||
4710 | 61 | self.d.sentry.wait(timeout=900) | ||
4711 | 62 | except amulet.helpers.TimeoutError: | ||
4712 | 63 | amulet.raise_status(amulet.FAIL, msg="Deployment timed out") | ||
4713 | 64 | except Exception: | ||
4714 | 65 | raise | ||
4715 | 66 | |||
4716 | 67 | def run_tests(self): | ||
4717 | 68 | """Run all of the methods that are prefixed with 'test_'.""" | ||
4718 | 69 | for test in dir(self): | ||
4719 | 70 | if test.startswith('test_'): | ||
4720 | 71 | getattr(self, test)() | ||
4721 | 0 | 72 | ||
4722 | === added file 'tests/charmhelpers/contrib/amulet/utils.py' | |||
4723 | --- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
4724 | +++ tests/charmhelpers/contrib/amulet/utils.py 2014-09-16 09:08:32 +0000 | |||
4725 | @@ -0,0 +1,176 @@ | |||
4726 | 1 | import ConfigParser | ||
4727 | 2 | import io | ||
4728 | 3 | import logging | ||
4729 | 4 | import re | ||
4730 | 5 | import sys | ||
4731 | 6 | import time | ||
4732 | 7 | |||
4733 | 8 | |||
4734 | 9 | class AmuletUtils(object): | ||
4735 | 10 | """Amulet utilities. | ||
4736 | 11 | |||
4737 | 12 | This class provides common utility functions that are used by Amulet | ||
4738 | 13 | tests. | ||
4739 | 14 | """ | ||
4740 | 15 | |||
4741 | 16 | def __init__(self, log_level=logging.ERROR): | ||
4742 | 17 | self.log = self.get_logger(level=log_level) | ||
4743 | 18 | |||
4744 | 19 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): | ||
4745 | 20 | """Get a logger object that will log to stdout.""" | ||
4746 | 21 | log = logging | ||
4747 | 22 | logger = log.getLogger(name) | ||
4748 | 23 | fmt = log.Formatter("%(asctime)s %(funcName)s " | ||
4749 | 24 | "%(levelname)s: %(message)s") | ||
4750 | 25 | |||
4751 | 26 | handler = log.StreamHandler(stream=sys.stdout) | ||
4752 | 27 | handler.setLevel(level) | ||
4753 | 28 | handler.setFormatter(fmt) | ||
4754 | 29 | |||
4755 | 30 | logger.addHandler(handler) | ||
4756 | 31 | logger.setLevel(level) | ||
4757 | 32 | |||
4758 | 33 | return logger | ||
4759 | 34 | |||
4760 | 35 | def valid_ip(self, ip): | ||
4761 | 36 | if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): | ||
4762 | 37 | return True | ||
4763 | 38 | else: | ||
4764 | 39 | return False | ||
4765 | 40 | |||
4766 | 41 | def valid_url(self, url): | ||
4767 | 42 | p = re.compile( | ||
4768 | 43 | r'^(?:http|ftp)s?://' | ||
4769 | 44 | r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa | ||
4770 | 45 | r'localhost|' | ||
4771 | 46 | r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' | ||
4772 | 47 | r'(?::\d+)?' | ||
4773 | 48 | r'(?:/?|[/?]\S+)$', | ||
4774 | 49 | re.IGNORECASE) | ||
4775 | 50 | if p.match(url): | ||
4776 | 51 | return True | ||
4777 | 52 | else: | ||
4778 | 53 | return False | ||
4779 | 54 | |||
4780 | 55 | def validate_services(self, commands): | ||
4781 | 56 | """Validate services. | ||
4782 | 57 | |||
4783 | 58 | Verify the specified services are running on the corresponding | ||
4784 | 59 | service units. | ||
4785 | 60 | """ | ||
4786 | 61 | for k, v in commands.iteritems(): | ||
4787 | 62 | for cmd in v: | ||
4788 | 63 | output, code = k.run(cmd) | ||
4789 | 64 | if code != 0: | ||
4790 | 65 | return "command `{}` returned {}".format(cmd, str(code)) | ||
4791 | 66 | return None | ||
4792 | 67 | |||
4793 | 68 | def _get_config(self, unit, filename): | ||
4794 | 69 | """Get a ConfigParser object for parsing a unit's config file.""" | ||
4795 | 70 | file_contents = unit.file_contents(filename) | ||
4796 | 71 | config = ConfigParser.ConfigParser() | ||
4797 | 72 | config.readfp(io.StringIO(file_contents)) | ||
4798 | 73 | return config | ||
4799 | 74 | |||
4800 | 75 | def validate_config_data(self, sentry_unit, config_file, section, | ||
4801 | 76 | expected): | ||
4802 | 77 | """Validate config file data. | ||
4803 | 78 | |||
4804 | 79 | Verify that the specified section of the config file contains | ||
4805 | 80 | the expected option key:value pairs. | ||
4806 | 81 | """ | ||
4807 | 82 | config = self._get_config(sentry_unit, config_file) | ||
4808 | 83 | |||
4809 | 84 | if section != 'DEFAULT' and not config.has_section(section): | ||
4810 | 85 | return "section [{}] does not exist".format(section) | ||
4811 | 86 | |||
4812 | 87 | for k in expected.keys(): | ||
4813 | 88 | if not config.has_option(section, k): | ||
4814 | 89 | return "section [{}] is missing option {}".format(section, k) | ||
4815 | 90 | if config.get(section, k) != expected[k]: | ||
4816 | 91 | return "section [{}] {}:{} != expected {}:{}".format( | ||
4817 | 92 | section, k, config.get(section, k), k, expected[k]) | ||
4818 | 93 | return None | ||
4819 | 94 | |||
4820 | 95 | def _validate_dict_data(self, expected, actual): | ||
4821 | 96 | """Validate dictionary data. | ||
4822 | 97 | |||
4823 | 98 | Compare expected dictionary data vs actual dictionary data. | ||
4824 | 99 | The values in the 'expected' dictionary can be strings, bools, ints, | ||
4825 | 100 | longs, or can be a function that evaluate a variable and returns a | ||
4826 | 101 | bool. | ||
4827 | 102 | """ | ||
4828 | 103 | for k, v in expected.iteritems(): | ||
4829 | 104 | if k in actual: | ||
4830 | 105 | if (isinstance(v, basestring) or | ||
4831 | 106 | isinstance(v, bool) or | ||
4832 | 107 | isinstance(v, (int, long))): | ||
4833 | 108 | if v != actual[k]: | ||
4834 | 109 | return "{}:{}".format(k, actual[k]) | ||
4835 | 110 | elif not v(actual[k]): | ||
4836 | 111 | return "{}:{}".format(k, actual[k]) | ||
4837 | 112 | else: | ||
4838 | 113 | return "key '{}' does not exist".format(k) | ||
4839 | 114 | return None | ||
4840 | 115 | |||
4841 | 116 | def validate_relation_data(self, sentry_unit, relation, expected): | ||
4842 | 117 | """Validate actual relation data based on expected relation data.""" | ||
4843 | 118 | actual = sentry_unit.relation(relation[0], relation[1]) | ||
4844 | 119 | self.log.debug('actual: {}'.format(repr(actual))) | ||
4845 | 120 | return self._validate_dict_data(expected, actual) | ||
4846 | 121 | |||
4847 | 122 | def _validate_list_data(self, expected, actual): | ||
4848 | 123 | """Compare expected list vs actual list data.""" | ||
4849 | 124 | for e in expected: | ||
4850 | 125 | if e not in actual: | ||
4851 | 126 | return "expected item {} not found in actual list".format(e) | ||
4852 | 127 | return None | ||
4853 | 128 | |||
4854 | 129 | def not_null(self, string): | ||
4855 | 130 | if string is not None: | ||
4856 | 131 | return True | ||
4857 | 132 | else: | ||
4858 | 133 | return False | ||
4859 | 134 | |||
4860 | 135 | def _get_file_mtime(self, sentry_unit, filename): | ||
4861 | 136 | """Get last modification time of file.""" | ||
4862 | 137 | return sentry_unit.file_stat(filename)['mtime'] | ||
4863 | 138 | |||
4864 | 139 | def _get_dir_mtime(self, sentry_unit, directory): | ||
4865 | 140 | """Get last modification time of directory.""" | ||
4866 | 141 | return sentry_unit.directory_stat(directory)['mtime'] | ||
4867 | 142 | |||
4868 | 143 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): | ||
4869 | 144 | """Get process' start time. | ||
4870 | 145 | |||
4871 | 146 | Determine start time of the process based on the last modification | ||
4872 | 147 | time of the /proc/pid directory. If pgrep_full is True, the process | ||
4873 | 148 | name is matched against the full command line. | ||
4874 | 149 | """ | ||
4875 | 150 | if pgrep_full: | ||
4876 | 151 | cmd = 'pgrep -o -f {}'.format(service) | ||
4877 | 152 | else: | ||
4878 | 153 | cmd = 'pgrep -o {}'.format(service) | ||
4879 | 154 | proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) | ||
4880 | 155 | return self._get_dir_mtime(sentry_unit, proc_dir) | ||
4881 | 156 | |||
4882 | 157 | def service_restarted(self, sentry_unit, service, filename, | ||
4883 | 158 | pgrep_full=False, sleep_time=20): | ||
4884 | 159 | """Check if service was restarted. | ||
4885 | 160 | |||
4886 | 161 | Compare a service's start time vs a file's last modification time | ||
4887 | 162 | (such as a config file for that service) to determine if the service | ||
4888 | 163 | has been restarted. | ||
4889 | 164 | """ | ||
4890 | 165 | time.sleep(sleep_time) | ||
4891 | 166 | if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= | ||
4892 | 167 | self._get_file_mtime(sentry_unit, filename)): | ||
4893 | 168 | return True | ||
4894 | 169 | else: | ||
4895 | 170 | return False | ||
4896 | 171 | |||
4897 | 172 | def relation_error(self, name, data): | ||
4898 | 173 | return 'unexpected relation data in {} - {}'.format(name, data) | ||
4899 | 174 | |||
4900 | 175 | def endpoint_error(self, name, data): | ||
4901 | 176 | return 'unexpected endpoint data in {} - {}'.format(name, data) | ||
4902 | 0 | 177 | ||
4903 | === added directory 'tests/charmhelpers/contrib/openstack' | |||
4904 | === added file 'tests/charmhelpers/contrib/openstack/__init__.py' | |||
4905 | === added directory 'tests/charmhelpers/contrib/openstack/amulet' | |||
4906 | === added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
4907 | === added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
4908 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
4909 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-16 09:08:32 +0000 | |||
4910 | @@ -0,0 +1,61 @@ | |||
4911 | 1 | from charmhelpers.contrib.amulet.deployment import ( | ||
4912 | 2 | AmuletDeployment | ||
4913 | 3 | ) | ||
4914 | 4 | |||
4915 | 5 | |||
4916 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
4917 | 7 | """OpenStack amulet deployment. | ||
4918 | 8 | |||
4919 | 9 | This class inherits from AmuletDeployment and has additional support | ||
4920 | 10 | that is specifically for use by OpenStack charms. | ||
4921 | 11 | """ | ||
4922 | 12 | |||
4923 | 13 | def __init__(self, series=None, openstack=None, source=None): | ||
4924 | 14 | """Initialize the deployment environment.""" | ||
4925 | 15 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
4926 | 16 | self.openstack = openstack | ||
4927 | 17 | self.source = source | ||
4928 | 18 | |||
4929 | 19 | def _add_services(self, this_service, other_services): | ||
4930 | 20 | """Add services to the deployment and set openstack-origin.""" | ||
4931 | 21 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
4932 | 22 | other_services) | ||
4933 | 23 | name = 0 | ||
4934 | 24 | services = other_services | ||
4935 | 25 | services.append(this_service) | ||
4936 | 26 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
4937 | 27 | |||
4938 | 28 | if self.openstack: | ||
4939 | 29 | for svc in services: | ||
4940 | 30 | if svc[name] not in use_source: | ||
4941 | 31 | config = {'openstack-origin': self.openstack} | ||
4942 | 32 | self.d.configure(svc[name], config) | ||
4943 | 33 | |||
4944 | 34 | if self.source: | ||
4945 | 35 | for svc in services: | ||
4946 | 36 | if svc[name] in use_source: | ||
4947 | 37 | config = {'source': self.source} | ||
4948 | 38 | self.d.configure(svc[name], config) | ||
4949 | 39 | |||
4950 | 40 | def _configure_services(self, configs): | ||
4951 | 41 | """Configure all of the services.""" | ||
4952 | 42 | for service, config in configs.iteritems(): | ||
4953 | 43 | self.d.configure(service, config) | ||
4954 | 44 | |||
4955 | 45 | def _get_openstack_release(self): | ||
4956 | 46 | """Get openstack release. | ||
4957 | 47 | |||
4958 | 48 | Return an integer representing the enum value of the openstack | ||
4959 | 49 | release. | ||
4960 | 50 | """ | ||
4961 | 51 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, | ||
4962 | 52 | self.precise_havana, self.precise_icehouse, | ||
4963 | 53 | self.trusty_icehouse) = range(6) | ||
4964 | 54 | releases = { | ||
4965 | 55 | ('precise', None): self.precise_essex, | ||
4966 | 56 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
4967 | 57 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
4968 | 58 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
4969 | 59 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
4970 | 60 | ('trusty', None): self.trusty_icehouse} | ||
4971 | 61 | return releases[(self.series, self.openstack)] | ||
4972 | 0 | 62 | ||
4973 | === added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' | |||
4974 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
4975 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-16 09:08:32 +0000 | |||
4976 | @@ -0,0 +1,275 @@ | |||
4977 | 1 | import logging | ||
4978 | 2 | import os | ||
4979 | 3 | import time | ||
4980 | 4 | import urllib | ||
4981 | 5 | |||
4982 | 6 | import glanceclient.v1.client as glance_client | ||
4983 | 7 | import keystoneclient.v2_0 as keystone_client | ||
4984 | 8 | import novaclient.v1_1.client as nova_client | ||
4985 | 9 | |||
4986 | 10 | from charmhelpers.contrib.amulet.utils import ( | ||
4987 | 11 | AmuletUtils | ||
4988 | 12 | ) | ||
4989 | 13 | |||
4990 | 14 | DEBUG = logging.DEBUG | ||
4991 | 15 | ERROR = logging.ERROR | ||
4992 | 16 | |||
4993 | 17 | |||
4994 | 18 | class OpenStackAmuletUtils(AmuletUtils): | ||
4995 | 19 | """OpenStack amulet utilities. | ||
4996 | 20 | |||
4997 | 21 | This class inherits from AmuletUtils and has additional support | ||
4998 | 22 | that is specifically for use by OpenStack charms. | ||
4999 | 23 | """ | ||
5000 | 24 |
The diff has been truncated for viewing.