Merge lp:~mthaddon/mojo/juju-intro-mojo-specs into lp:mojo/mojo-specs
- juju-intro-mojo-specs
- Merge into mojo-specs
Proposed by
Tom Haddon
Status: | Merged |
---|---|
Merged at revision: | 9 |
Proposed branch: | lp:~mthaddon/mojo/juju-intro-mojo-specs |
Merge into: | lp:mojo/mojo-specs |
Diff against target: |
2179 lines (+2033/-27) 21 files modified
juju-intro/README (+5/-0) juju-intro/collect (+5/-0) juju-intro/deploy (+14/-0) juju-intro/manifest (+10/-0) juju-intro/manifest-verify (+4/-0) juju-intro/verify-installed (+10/-0) mojo-how-to/devel/verify (+0/-26) mojo-how-to/manifest-verify (+3/-1) mojo-spec-helpers/tests/check-juju (+5/-0) mojo-spec-helpers/tests/verify-nrpe (+37/-0) mojo-spec-helpers/utils/add-floating-ip (+114/-0) mojo-spec-helpers/utils/cache_managers.py (+56/-0) mojo-spec-helpers/utils/container_managers.py (+265/-0) mojo-spec-helpers/utils/mojo_os_utils.py (+554/-0) mojo-spec-helpers/utils/mojo_utils.py (+356/-0) mojo-spec-helpers/utils/shyaml.py (+219/-0) mojo-spec-helpers/utils/tests/README.md (+67/-0) mojo-spec-helpers/utils/tests/run_tests.py (+18/-0) mojo-spec-helpers/utils/tests/test_cache_managers.py (+80/-0) mojo-spec-helpers/utils/tests/test_container_managers.py (+188/-0) mojo-spec-helpers/utils/tests/test_mojo_utils.py (+23/-0) |
To merge this branch: | bzr merge lp:~mthaddon/mojo/juju-intro-mojo-specs |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Paul Collins | Approve | ||
Review via email: mp+270005@code.launchpad.net |
Commit message
Description of the change
Add a "juju-intro" service which uses the charms described on https:/
To post a comment you must log in.
Revision history for this message
Tom Haddon (mthaddon) wrote : | # |
Revision history for this message
Tom Haddon (mthaddon) wrote : | # |
This is now ready for review. Has run as follows: http://
- 70. By Tom Haddon
-
Just use the current promulgated series of each charm
- 71. By Tom Haddon
-
Add an e2e check for the site being up after initial install
Revision history for this message
Paul Collins (pjdc) wrote : | # |
Approving, although I noticed:
- running this spec with the local provider fails here due to mysql's default dataset-size of 80%; not sure how likely it is for someone to be using it, however
- mojo-spec-helpers may need another refresh (or is itself stale); add-floating-ip was rewritten in 100% Python recently (see internal trunk)
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added directory 'juju-intro' | |||
2 | === added file 'juju-intro/README' | |||
3 | --- juju-intro/README 1970-01-01 00:00:00 +0000 | |||
4 | +++ juju-intro/README 2015-09-08 08:23:55 +0000 | |||
5 | @@ -0,0 +1,5 @@ | |||
6 | 1 | This spec contains the services that are included in the introduction to Juju | ||
7 | 2 | as part of [1], and is intended to be used with CI to confirm that the charms | ||
8 | 3 | we're pointing new users at for Juju are always in a working state. | ||
9 | 4 | |||
10 | 5 | [1] https://jujucharms.com/docs/stable/getting-started | ||
11 | 0 | 6 | ||
12 | === added symlink 'juju-intro/check-juju' | |||
13 | === target is u'../mojo-spec-helpers/tests/check-juju' | |||
14 | === added file 'juju-intro/collect' | |||
15 | --- juju-intro/collect 1970-01-01 00:00:00 +0000 | |||
16 | +++ juju-intro/collect 2015-09-08 08:23:55 +0000 | |||
17 | @@ -0,0 +1,5 @@ | |||
18 | 1 | wordpress lp:charms/wordpress | ||
19 | 2 | mysql lp:charms/mysql | ||
20 | 3 | |||
21 | 4 | # subordinates | ||
22 | 5 | nrpe lp:charms/nrpe-external-master | ||
23 | 0 | 6 | ||
24 | === added file 'juju-intro/deploy' | |||
25 | --- juju-intro/deploy 1970-01-01 00:00:00 +0000 | |||
26 | +++ juju-intro/deploy 2015-09-08 08:23:55 +0000 | |||
27 | @@ -0,0 +1,14 @@ | |||
28 | 1 | wordpress: | ||
29 | 2 | series: {{ series }} | ||
30 | 3 | services: | ||
31 | 4 | wordpress: | ||
32 | 5 | charm: wordpress | ||
33 | 6 | expose: true | ||
34 | 7 | mysql: | ||
35 | 8 | charm: mysql | ||
36 | 9 | nrpe: | ||
37 | 10 | charm: nrpe | ||
38 | 11 | relations: | ||
39 | 12 | - ["wordpress", "mysql"] | ||
40 | 13 | - ["wordpress", "nrpe"] | ||
41 | 14 | - ["mysql", "nrpe"] | ||
42 | 0 | 15 | ||
43 | === added file 'juju-intro/manifest' | |||
44 | --- juju-intro/manifest 1970-01-01 00:00:00 +0000 | |||
45 | +++ juju-intro/manifest 2015-09-08 08:23:55 +0000 | |||
46 | @@ -0,0 +1,10 @@ | |||
47 | 1 | collect | ||
48 | 2 | deploy delay=0 | ||
49 | 3 | include config=manifest-verify | ||
50 | 4 | |||
51 | 5 | ## This isn't included in the main verify manifest because after we've installed | ||
52 | 6 | ## the service this won't work as expected so it's a one time thing for the | ||
53 | 7 | ## initial deployment | ||
54 | 8 | |||
55 | 9 | # Verify the site is installed | ||
56 | 10 | verify config=verify-installed | ||
57 | 0 | 11 | ||
58 | === added file 'juju-intro/manifest-verify' | |||
59 | --- juju-intro/manifest-verify 1970-01-01 00:00:00 +0000 | |||
60 | +++ juju-intro/manifest-verify 2015-09-08 08:23:55 +0000 | |||
61 | @@ -0,0 +1,4 @@ | |||
62 | 1 | # Checking juju status | ||
63 | 2 | verify config=check-juju | ||
64 | 3 | # Running all naigos checks to confirm service is working as expected | ||
65 | 4 | verify config=verify-nrpe | ||
66 | 0 | 5 | ||
67 | === added file 'juju-intro/verify-installed' | |||
68 | --- juju-intro/verify-installed 1970-01-01 00:00:00 +0000 | |||
69 | +++ juju-intro/verify-installed 2015-09-08 08:23:55 +0000 | |||
70 | @@ -0,0 +1,10 @@ | |||
71 | 1 | #!/bin/bash | ||
72 | 2 | |||
73 | 3 | set -e | ||
74 | 4 | set -u | ||
75 | 5 | |||
76 | 6 | # Check the service is actually up and we can get to the install page | ||
77 | 7 | # This isn't included in the main verify manifest because after we've installed | ||
78 | 8 | # the service this won't work as expected so it's a one time thing for the | ||
79 | 9 | # initial deployment | ||
80 | 10 | juju ssh wordpress/0 "/usr/lib/nagios/plugins/check_http -I 127.0.0.1 -H localhost -f follow -s '<title>WordPress › Installation</title>'" | ||
81 | 0 | 11 | ||
82 | === added symlink 'juju-intro/verify-nrpe' | |||
83 | === target is u'../mojo-spec-helpers/tests/verify-nrpe' | |||
84 | === removed file 'mojo-how-to/devel/verify' | |||
85 | --- mojo-how-to/devel/verify 2015-01-26 16:23:38 +0000 | |||
86 | +++ mojo-how-to/devel/verify 1970-01-01 00:00:00 +0000 | |||
87 | @@ -1,26 +0,0 @@ | |||
88 | 1 | #!/bin/bash | ||
89 | 2 | |||
90 | 3 | set -e | ||
91 | 4 | |||
92 | 5 | # If we have any etc bzr nagios checks, we need to wait up to 15 minutes | ||
93 | 6 | # for the cron to run to populate the check file, so just ignore those | ||
94 | 7 | NAGIOS_OUTPUT=$(juju status | sed -rn 's/^ {8}public-address: //p'| xargs -I% ssh ubuntu@% 'egrep -oh /usr.*lib.* /etc/nagios/nrpe.d/check_* |grep -v check_etc_bzr.py |sed "s/.*/(set -x; &) || echo MOJO_NAGIOS_FAIL /"|sudo -u nagios -s bash |& sed "s/^/%: /"' 2>/dev/null) | ||
95 | 8 | |||
96 | 9 | echo "${NAGIOS_OUTPUT}" | ||
97 | 10 | |||
98 | 11 | NAGIOS_FAIL=$(echo "${NAGIOS_OUTPUT}" | grep MOJO_NAGIOS_FAIL) || true | ||
99 | 12 | |||
100 | 13 | if [ -n "${NAGIOS_FAIL}" ]; then | ||
101 | 14 | echo "########################" | ||
102 | 15 | echo "# Nagios Checks Failed #" | ||
103 | 16 | echo "########################" | ||
104 | 17 | exit 1 | ||
105 | 18 | else | ||
106 | 19 | echo "########################" | ||
107 | 20 | echo "# Nagios Checks Passed #" | ||
108 | 21 | echo "########################" | ||
109 | 22 | fi | ||
110 | 23 | |||
111 | 24 | echo "#########################" | ||
112 | 25 | echo "# Successfully verified #" | ||
113 | 26 | echo "#########################" | ||
114 | 27 | 0 | ||
115 | === modified file 'mojo-how-to/manifest-verify' | |||
116 | --- mojo-how-to/manifest-verify 2015-01-21 15:39:29 +0000 | |||
117 | +++ mojo-how-to/manifest-verify 2015-09-08 08:23:55 +0000 | |||
118 | @@ -1,2 +1,4 @@ | |||
119 | 1 | # Check juju | ||
120 | 2 | verify config=check-juju | ||
121 | 1 | # The service is up and running, let's verify it | 3 | # The service is up and running, let's verify it |
123 | 2 | verify | 4 | verify config=verify-nrpe |
124 | 3 | 5 | ||
125 | === removed symlink 'mojo-how-to/production/verify' | |||
126 | === target was u'../devel/verify' | |||
127 | === added directory 'mojo-spec-helpers' | |||
128 | === added directory 'mojo-spec-helpers/tests' | |||
129 | === added file 'mojo-spec-helpers/tests/check-juju' | |||
130 | --- mojo-spec-helpers/tests/check-juju 1970-01-01 00:00:00 +0000 | |||
131 | +++ mojo-spec-helpers/tests/check-juju 2015-09-08 08:23:55 +0000 | |||
132 | @@ -0,0 +1,5 @@ | |||
133 | 1 | #!/usr/bin/python | ||
134 | 2 | import utils.mojo_utils as mojo_utils | ||
135 | 3 | |||
136 | 4 | mojo_utils.juju_check_hooks_complete() | ||
137 | 5 | mojo_utils.juju_status_check_and_wait() | ||
138 | 0 | 6 | ||
139 | === added symlink 'mojo-spec-helpers/tests/utils' | |||
140 | === target is u'../utils' | |||
141 | === added file 'mojo-spec-helpers/tests/verify-nrpe' | |||
142 | --- mojo-spec-helpers/tests/verify-nrpe 1970-01-01 00:00:00 +0000 | |||
143 | +++ mojo-spec-helpers/tests/verify-nrpe 2015-09-08 08:23:55 +0000 | |||
144 | @@ -0,0 +1,37 @@ | |||
145 | 1 | #!/bin/bash | ||
146 | 2 | |||
147 | 3 | set -e | ||
148 | 4 | |||
149 | 5 | # If we have any etc bzr nagios checks, we need to wait up to 15 minutes | ||
150 | 6 | # for the cron to run to populate the check file, so just ignore those | ||
151 | 7 | check() { | ||
152 | 8 | juju ssh $1 'egrep -oh /usr.*lib.* /etc/nagios/nrpe.d/check_* |\ | ||
153 | 9 | grep -v check_etc_bzr.py |sed "s/.*/(set -x; &) || \ | ||
154 | 10 | echo MOJO_NAGIOS_FAIL /"|sudo -u nagios -s bash' 2>/dev/null | ||
155 | 11 | } | ||
156 | 12 | |||
157 | 13 | NRPE_UNITS=$(juju status | sed -rn 's/^ *(nrpe\/[0-9]*):$/\1/p') | ||
158 | 14 | NAGIOS_OUTPUT=$( | ||
159 | 15 | for unit in $NRPE_UNITS; do | ||
160 | 16 | check $unit | sed --e "s#^#$unit: #" | ||
161 | 17 | done | ||
162 | 18 | ) | ||
163 | 19 | |||
164 | 20 | echo "${NAGIOS_OUTPUT}" | ||
165 | 21 | |||
166 | 22 | NAGIOS_FAIL=$(echo "${NAGIOS_OUTPUT}" | grep MOJO_NAGIOS_FAIL) || true | ||
167 | 23 | |||
168 | 24 | if [ -n "${NAGIOS_FAIL}" ]; then | ||
169 | 25 | echo "########################" | ||
170 | 26 | echo "# Nagios Checks Failed #" | ||
171 | 27 | echo "########################" | ||
172 | 28 | exit 1 | ||
173 | 29 | else | ||
174 | 30 | echo "########################" | ||
175 | 31 | echo "# Nagios Checks Passed #" | ||
176 | 32 | echo "########################" | ||
177 | 33 | fi | ||
178 | 34 | |||
179 | 35 | echo "########################" | ||
180 | 36 | echo "# Succesfully verified #" | ||
181 | 37 | echo "########################" | ||
182 | 0 | 38 | ||
183 | === added directory 'mojo-spec-helpers/utils' | |||
184 | === added file 'mojo-spec-helpers/utils/__init__.py' | |||
185 | === added file 'mojo-spec-helpers/utils/add-floating-ip' | |||
186 | --- mojo-spec-helpers/utils/add-floating-ip 1970-01-01 00:00:00 +0000 | |||
187 | +++ mojo-spec-helpers/utils/add-floating-ip 2015-09-08 08:23:55 +0000 | |||
188 | @@ -0,0 +1,114 @@ | |||
189 | 1 | #!/bin/sh | ||
190 | 2 | # | ||
191 | 3 | # Author: Paul Gear | ||
192 | 4 | # Description: Manage floating IP allocations in mojo local directory for a juju service or unit. | ||
193 | 5 | # NOTE: $MOJO_PROJECT and $MOJO_STAGE must be set before calling this script. | ||
194 | 6 | # | ||
195 | 7 | |||
196 | 8 | set -e | ||
197 | 9 | set -u | ||
198 | 10 | |||
199 | 11 | |||
200 | 12 | SECRETS_DIR="/srv/mojo/LOCAL/$MOJO_PROJECT/$MOJO_STAGE/" | ||
201 | 13 | |||
202 | 14 | # Echo to standard error | ||
203 | 15 | # Useful for printing from functions without polluting the "returned" output | ||
204 | 16 | echo_stderr() { echo "$@" 1>&2; } | ||
205 | 17 | |||
206 | 18 | # print the juju unit followed by the machine instance id | ||
207 | 19 | get_juju_units() | ||
208 | 20 | { | ||
209 | 21 | juju status "$@" | python -c ' | ||
210 | 22 | import sys, yaml | ||
211 | 23 | status = yaml.safe_load(sys.stdin) | ||
212 | 24 | for serv in status["services"]: | ||
213 | 25 | if status["services"][serv].get("units"): | ||
214 | 26 | for unit in status["services"][serv]["units"]: | ||
215 | 27 | machine = status["services"][serv]["units"][unit]["machine"] | ||
216 | 28 | instance = status["machines"][machine]["instance-id"] | ||
217 | 29 | print unit, instance | ||
218 | 30 | ' | ||
219 | 31 | } | ||
220 | 32 | |||
221 | 33 | |||
222 | 34 | # get the existing floating IP for this unit, or create a new one | ||
223 | 35 | get_unit_floating_ip() | ||
224 | 36 | { | ||
225 | 37 | UNIT="$1" | ||
226 | 38 | MACHINE="$2" | ||
227 | 39 | UNIT_FILE_NAME=$(echo "$UNIT" | tr '/' '_') | ||
228 | 40 | FLOATING_IP_FILE="$SECRETS_DIR/$UNIT_FILE_NAME" | ||
229 | 41 | IP="" | ||
230 | 42 | if [ -s "$FLOATING_IP_FILE" ]; then | ||
231 | 43 | IP=$(head -n 1 "$FLOATING_IP_FILE") | ||
232 | 44 | |||
233 | 45 | echo_stderr "- Found IP "$IP" for "$UNIT | ||
234 | 46 | |||
235 | 47 | # check how the IP is used now | ||
236 | 48 | ALLOCATION=$(nova floating-ip-list | awk -v IP="$IP" '$2 == IP {print $5}') | ||
237 | 49 | case $ALLOCATION in | ||
238 | 50 | "-") | ||
239 | 51 | # unallocated - we can use it | ||
240 | 52 | echo_stderr "- IP "$IP" is currently unallocated" | ||
241 | 53 | ;; | ||
242 | 54 | "") | ||
243 | 55 | # non-existent - we'll create one below | ||
244 | 56 | IP="" | ||
245 | 57 | echo_stderr "- No IP found for "$UNIT | ||
246 | 58 | ;; | ||
247 | 59 | *) | ||
248 | 60 | # allocated to a unit already | ||
249 | 61 | if nova show $MACHINE | awk '$3 == "network"' | sed -re 's/,? +/\n/g' | grep -q "^$IP$"; then | ||
250 | 62 | # it's allocated to us; do nothing | ||
251 | 63 | echo_stderr "- IP "$IP" is already allocated to "$UNIT | ||
252 | 64 | return | ||
253 | 65 | fi | ||
254 | 66 | # it's allocated to another unit - create one below | ||
255 | 67 | IP="" | ||
256 | 68 | echo_stderr "- IP "$IP" is allocated to another unit" | ||
257 | 69 | ;; | ||
258 | 70 | esac | ||
259 | 71 | fi | ||
260 | 72 | if [ -z "$IP" ]; then | ||
261 | 73 | IP=$(nova floating-ip-create | grep -wo '[0-9.a-f:]*') | ||
262 | 74 | echo "$IP" > "$FLOATING_IP_FILE" | ||
263 | 75 | echo_stderr "- Created new IP "$IP | ||
264 | 76 | fi | ||
265 | 77 | echo "$IP" | ||
266 | 78 | } | ||
267 | 79 | |||
268 | 80 | |||
269 | 81 | usage() | ||
270 | 82 | { | ||
271 | 83 | cat <<EOF | ||
272 | 84 | Usage: $0 {service|unit} | ||
273 | 85 | |||
274 | 86 | # Add a floating IP to the apache2/0 unit: | ||
275 | 87 | add-floating-ip apache2/0 | ||
276 | 88 | |||
277 | 89 | # Add floating IPs to all units the jenkins-slave service: | ||
278 | 90 | add-floating-ip jenkins-slave | ||
279 | 91 | |||
280 | 92 | # Add floating IPs to all units in the haproxy and squid services: | ||
281 | 93 | add-floating-ip haproxy squid | ||
282 | 94 | |||
283 | 95 | EOF | ||
284 | 96 | exit 2 | ||
285 | 97 | } | ||
286 | 98 | |||
287 | 99 | |||
288 | 100 | if [ "$#" -lt 1 ]; then | ||
289 | 101 | usage | ||
290 | 102 | fi | ||
291 | 103 | |||
292 | 104 | for i in "$@"; do | ||
293 | 105 | get_juju_units "$i" | while read unit machine; do | ||
294 | 106 | echo_stderr "" | ||
295 | 107 | echo_stderr "Assigning IPs for "$unit | ||
296 | 108 | IP=$(get_unit_floating_ip "$unit" "$machine") | ||
297 | 109 | if [ -n "$IP" ]; then | ||
298 | 110 | echo_stderr "- Assigning IP "$IP" to "$unit | ||
299 | 111 | nova floating-ip-associate "$machine" "$IP" | ||
300 | 112 | fi | ||
301 | 113 | done | ||
302 | 114 | done | ||
303 | 0 | 115 | ||
304 | === added file 'mojo-spec-helpers/utils/cache_managers.py' | |||
305 | --- mojo-spec-helpers/utils/cache_managers.py 1970-01-01 00:00:00 +0000 | |||
306 | +++ mojo-spec-helpers/utils/cache_managers.py 2015-09-08 08:23:55 +0000 | |||
307 | @@ -0,0 +1,56 @@ | |||
308 | 1 | # System | ||
309 | 2 | import os | ||
310 | 3 | import json | ||
311 | 4 | |||
312 | 5 | |||
313 | 6 | class JsonCache: | ||
314 | 7 | """ | ||
315 | 8 | Store key value pairs in a JSON file | ||
316 | 9 | """ | ||
317 | 10 | |||
318 | 11 | def __init__(self, cache_path): | ||
319 | 12 | self.cache_path = cache_path | ||
320 | 13 | |||
321 | 14 | def get_cache(self): | ||
322 | 15 | """ | ||
323 | 16 | Get the dictionary from the cache file | ||
324 | 17 | or if it doesn't exist, return an empty dictionary | ||
325 | 18 | """ | ||
326 | 19 | |||
327 | 20 | if os.path.isfile(self.cache_path): | ||
328 | 21 | with open(self.cache_path) as cache_file: | ||
329 | 22 | return json.load(cache_file) | ||
330 | 23 | else: | ||
331 | 24 | return {} | ||
332 | 25 | |||
333 | 26 | def put_cache(self, cache): | ||
334 | 27 | """ | ||
335 | 28 | Save a dictionary to the JSON cache file | ||
336 | 29 | """ | ||
337 | 30 | |||
338 | 31 | with open(self.cache_path, 'w') as cache_file: | ||
339 | 32 | json.dump(cache, cache_file) | ||
340 | 33 | |||
341 | 34 | def set(self, name, value): | ||
342 | 35 | """ | ||
343 | 36 | Set a key value pair to the cache | ||
344 | 37 | """ | ||
345 | 38 | |||
346 | 39 | cache = self.get_cache() | ||
347 | 40 | cache[name] = value | ||
348 | 41 | self.put_cache(cache) | ||
349 | 42 | |||
350 | 43 | def get(self, name): | ||
351 | 44 | """ | ||
352 | 45 | Retrieve a value from the cache by key | ||
353 | 46 | """ | ||
354 | 47 | |||
355 | 48 | return self.get_cache().get(name) | ||
356 | 49 | |||
357 | 50 | def wipe(self): | ||
358 | 51 | """ | ||
359 | 52 | Remove the cache file | ||
360 | 53 | """ | ||
361 | 54 | |||
362 | 55 | if os.path.isfile(self.cache_path): | ||
363 | 56 | os.remove(self.cache_path) | ||
364 | 0 | 57 | ||
365 | === added file 'mojo-spec-helpers/utils/container_managers.py' | |||
366 | --- mojo-spec-helpers/utils/container_managers.py 1970-01-01 00:00:00 +0000 | |||
367 | +++ mojo-spec-helpers/utils/container_managers.py 2015-09-08 08:23:55 +0000 | |||
368 | @@ -0,0 +1,265 @@ | |||
369 | 1 | # Modules | ||
370 | 2 | import requests | ||
371 | 3 | |||
372 | 4 | |||
373 | 5 | class LocalEnvironmentSwiftContainer: | ||
374 | 6 | """ | ||
375 | 7 | Manage a Swift container for the local deployment environment | ||
376 | 8 | for storing deployment settings | ||
377 | 9 | |||
378 | 10 | swift_connection: A working swiftclient.client.Connection object | ||
379 | 11 | container_name: The name of the swift container to use | ||
380 | 12 | """ | ||
381 | 13 | |||
382 | 14 | previous_build_obj = 'previous-build-label' | ||
383 | 15 | deployed_build_obj = 'deployed-build-label' | ||
384 | 16 | deployed_revno_obj = 'deployed-spec-revno' | ||
385 | 17 | code_upgrade_succeeded_template = 'code-upgrade-{}-{}-succeeded' | ||
386 | 18 | mojo_run_succeeded_template = 'mojo-run-{}-succeeded' | ||
387 | 19 | |||
388 | 20 | def __init__(self, swift_connection, container_name): | ||
389 | 21 | self.swift_connection = swift_connection | ||
390 | 22 | self.container_name = container_name | ||
391 | 23 | |||
392 | 24 | def previous_build_label(self): | ||
393 | 25 | """ | ||
394 | 26 | Get the build label that was previously deployed on this environment | ||
395 | 27 | From the Swift account associated with this environment | ||
396 | 28 | through environment variables "OS_AUTH_URL" etc. | ||
397 | 29 | """ | ||
398 | 30 | |||
399 | 31 | return self.swift_connection.get_object( | ||
400 | 32 | container=self.container_name, | ||
401 | 33 | obj=self.previous_build_obj | ||
402 | 34 | )[1].strip() | ||
403 | 35 | |||
404 | 36 | def deployed_build_label(self): | ||
405 | 37 | """ | ||
406 | 38 | Get the build label that is currently deployed on this environment | ||
407 | 39 | From the Swift account associated with this environment | ||
408 | 40 | through environment variables "OS_AUTH_URL" etc. | ||
409 | 41 | """ | ||
410 | 42 | |||
411 | 43 | return self.swift_connection.get_object( | ||
412 | 44 | container=self.container_name, | ||
413 | 45 | obj=self.deployed_build_obj | ||
414 | 46 | )[1].strip() | ||
415 | 47 | |||
416 | 48 | def deployed_spec_revno(self): | ||
417 | 49 | """ | ||
418 | 50 | Get the mojo spec revision number that is currently deployed | ||
419 | 51 | on this environment from the Swift account associated with this | ||
420 | 52 | environment through environment variables "OS_AUTH_URL" etc. | ||
421 | 53 | """ | ||
422 | 54 | |||
423 | 55 | return self.swift_connection.get_object( | ||
424 | 56 | container=self.container_name, | ||
425 | 57 | obj=self.deployed_revno_obj | ||
426 | 58 | )[1].strip() | ||
427 | 59 | |||
428 | 60 | def save_code_upgrade_succeeded( | ||
429 | 61 | self, from_build_label, to_build_label | ||
430 | 62 | ): | ||
431 | 63 | """ | ||
432 | 64 | Save an object into Swift | ||
433 | 65 | to signify that a code-upgrade succeeded | ||
434 | 66 | from one build_label to another | ||
435 | 67 | """ | ||
436 | 68 | |||
437 | 69 | upgrade_object_name = self.code_upgrade_succeeded_template.format( | ||
438 | 70 | from_build_label, to_build_label | ||
439 | 71 | ) | ||
440 | 72 | |||
441 | 73 | self.swift_connection.put_object( | ||
442 | 74 | container=self.container_name, | ||
443 | 75 | obj=upgrade_object_name, | ||
444 | 76 | contents='true' | ||
445 | 77 | ) | ||
446 | 78 | |||
447 | 79 | def save_mojo_run_succeeded(self, spec_revno): | ||
448 | 80 | """ | ||
449 | 81 | Save an object into Swift | ||
450 | 82 | to signifiy that a mojo-run succeeded with | ||
451 | 83 | a given revision number of the mojo spec | ||
452 | 84 | """ | ||
453 | 85 | |||
454 | 86 | run_object_name = self.mojo_run_succeeded_template.format( | ||
455 | 87 | spec_revno | ||
456 | 88 | ) | ||
457 | 89 | |||
458 | 90 | self.swift_connection.put_object( | ||
459 | 91 | container=self.container_name, | ||
460 | 92 | obj=run_object_name, | ||
461 | 93 | contents='true' | ||
462 | 94 | ) | ||
463 | 95 | |||
464 | 96 | def save_deployed_build_label(self, build_label): | ||
465 | 97 | """ | ||
466 | 98 | Save an object into swift containing | ||
467 | 99 | the build_label which is deployed to this environment | ||
468 | 100 | """ | ||
469 | 101 | |||
470 | 102 | self.swift_connection.put_object( | ||
471 | 103 | container=self.container_name, | ||
472 | 104 | obj='deployed-build-label', | ||
473 | 105 | contents=build_label | ||
474 | 106 | ) | ||
475 | 107 | |||
476 | 108 | def save_previous_build_label(self, build_label): | ||
477 | 109 | """ | ||
478 | 110 | Save an object into swift | ||
479 | 111 | to record the previously deployed build_label | ||
480 | 112 | """ | ||
481 | 113 | |||
482 | 114 | self.swift_connection.put_object( | ||
483 | 115 | container=self.container_name, | ||
484 | 116 | obj='previous-build-label', | ||
485 | 117 | contents=build_label | ||
486 | 118 | ) | ||
487 | 119 | |||
488 | 120 | def save_mojo_spec_revno(self, spec_revno): | ||
489 | 121 | """ | ||
490 | 122 | Save an object into swift containing | ||
491 | 123 | the current mojo spec revision number | ||
492 | 124 | """ | ||
493 | 125 | |||
494 | 126 | self.swift_connection.put_object( | ||
495 | 127 | container=self.container_name, | ||
496 | 128 | obj='deployed-spec-revno', | ||
497 | 129 | contents=spec_revno | ||
498 | 130 | ) | ||
499 | 131 | |||
500 | 132 | |||
501 | 133 | class HttpContainer: | ||
502 | 134 | """ | ||
503 | 135 | Methods for retrieving objects | ||
504 | 136 | from a Swift HTTP storage container | ||
505 | 137 | """ | ||
506 | 138 | |||
507 | 139 | def __init__(self, container_url): | ||
508 | 140 | """ | ||
509 | 141 | container_url: The storage URL path for the swift container | ||
510 | 142 | """ | ||
511 | 143 | |||
512 | 144 | self.container_url = container_url | ||
513 | 145 | |||
514 | 146 | def get(self, object_name): | ||
515 | 147 | """ | ||
516 | 148 | Retrieve the contents of an object | ||
517 | 149 | """ | ||
518 | 150 | |||
519 | 151 | object_url = '{}/{}'.format(self.container_url, object_name) | ||
520 | 152 | |||
521 | 153 | response = requests.get(object_url) | ||
522 | 154 | |||
523 | 155 | try: | ||
524 | 156 | response.raise_for_status() | ||
525 | 157 | except requests.exceptions.HTTPError as http_error: | ||
526 | 158 | http_error.message += '. URL: {}'.format(object_url) | ||
527 | 159 | http_error.args += ('URL: {}'.format(object_url),) | ||
528 | 160 | raise http_error | ||
529 | 161 | |||
530 | 162 | response.raise_for_status() | ||
531 | 163 | |||
532 | 164 | return response.text | ||
533 | 165 | |||
534 | 166 | def head(self, object_name): | ||
535 | 167 | """ | ||
536 | 168 | Retrieve the HEAD of an object | ||
537 | 169 | """ | ||
538 | 170 | |||
539 | 171 | object_url = '{}/{}'.format(self.container_url, object_name) | ||
540 | 172 | |||
541 | 173 | return requests.head(object_url) | ||
542 | 174 | |||
543 | 175 | def exists(self, object_name): | ||
544 | 176 | """ | ||
545 | 177 | Check an object exists | ||
546 | 178 | """ | ||
547 | 179 | |||
548 | 180 | return self.head(object_name).ok | ||
549 | 181 | |||
550 | 182 | |||
551 | 183 | class CIContainer(HttpContainer): | ||
552 | 184 | """ | ||
553 | 185 | Methods for retrieving continuous integration | ||
554 | 186 | resources from an http swift store | ||
555 | 187 | """ | ||
556 | 188 | |||
557 | 189 | code_upgrade_succeeded_template = 'code-upgrade-{}-{}-succeeded' | ||
558 | 190 | mojo_run_succeeded_template = 'mojo-run-{}-succeeded' | ||
559 | 191 | |||
560 | 192 | def has_code_upgrade_been_tested( | ||
561 | 193 | self, | ||
562 | 194 | from_build_label, | ||
563 | 195 | to_build_label | ||
564 | 196 | ): | ||
565 | 197 | """ | ||
566 | 198 | Check if a specific code upgrade has been tested | ||
567 | 199 | (from one build_label to another) | ||
568 | 200 | by checking if a specially named Swift object exists | ||
569 | 201 | in the CI system's Swift HTTP store. | ||
570 | 202 | (This object will have been created | ||
571 | 203 | by self.save_code_upgrade_succeeded) | ||
572 | 204 | """ | ||
573 | 205 | |||
574 | 206 | return self.exists( | ||
575 | 207 | self.code_upgrade_succeeded_template.format( | ||
576 | 208 | from_build_label, | ||
577 | 209 | to_build_label | ||
578 | 210 | ) | ||
579 | 211 | ) | ||
580 | 212 | |||
581 | 213 | def has_mojo_run_been_tested(self, spec_revno): | ||
582 | 214 | """ | ||
583 | 215 | Check if a specific mojo spec revision number has been tested | ||
584 | 216 | by checking if a specially named Swift object exists | ||
585 | 217 | in the CI system's Swift HTTP store. | ||
586 | 218 | (This object will have been created by self.save_mojo_run_succeeded) | ||
587 | 219 | """ | ||
588 | 220 | |||
589 | 221 | return self.exists( | ||
590 | 222 | self.mojo_run_succeeded_template.format( | ||
591 | 223 | spec_revno | ||
592 | 224 | ) | ||
593 | 225 | ) | ||
594 | 226 | |||
595 | 227 | |||
596 | 228 | class BuildContainer(HttpContainer): | ||
597 | 229 | """ | ||
598 | 230 | Methods for interacting with the HTTP Swift store | ||
599 | 231 | containing code builds | ||
600 | 232 | """ | ||
601 | 233 | |||
602 | 234 | def latest_build_label(self): | ||
603 | 235 | """ | ||
604 | 236 | Get the latest build label from the webteam's Swift HTTP object store | ||
605 | 237 | """ | ||
606 | 238 | |||
607 | 239 | return self.get('latest-build-label').strip() | ||
608 | 240 | |||
609 | 241 | |||
610 | 242 | class DeployedEnvironmentContainer(HttpContainer): | ||
611 | 243 | """ | ||
612 | 244 | Methods for requesting information | ||
613 | 245 | from the HTTP swift store for a | ||
614 | 246 | depoyed environment (e.g. production, staging) | ||
615 | 247 | """ | ||
616 | 248 | |||
617 | 249 | def deployed_build_label(self): | ||
618 | 250 | """ | ||
619 | 251 | Get the build label which was most recently | ||
620 | 252 | deployed to this environment | ||
621 | 253 | from their Swift HTTP object store | ||
622 | 254 | """ | ||
623 | 255 | |||
624 | 256 | return self.get('deployed-build-label').strip() | ||
625 | 257 | |||
626 | 258 | def deployed_spec_revno(self): | ||
627 | 259 | """ | ||
628 | 260 | Get the version of the mojo spec | ||
629 | 261 | which was most recently run on this environment | ||
630 | 262 | from their Swift HTTP object store | ||
631 | 263 | """ | ||
632 | 264 | |||
633 | 265 | return self.get('deployed-spec-revno').strip() | ||
634 | 0 | 266 | ||
635 | === added file 'mojo-spec-helpers/utils/mojo_os_utils.py' | |||
636 | --- mojo-spec-helpers/utils/mojo_os_utils.py 1970-01-01 00:00:00 +0000 | |||
637 | +++ mojo-spec-helpers/utils/mojo_os_utils.py 2015-09-08 08:23:55 +0000 | |||
638 | @@ -0,0 +1,554 @@ | |||
639 | 1 | #!/usr/bin/python | ||
640 | 2 | |||
641 | 3 | import swiftclient | ||
642 | 4 | import glanceclient | ||
643 | 5 | from keystoneclient.v2_0 import client as keystoneclient | ||
644 | 6 | import mojo_utils | ||
645 | 7 | from novaclient.v1_1 import client as novaclient | ||
646 | 8 | from neutronclient.v2_0 import client as neutronclient | ||
647 | 9 | import logging | ||
648 | 10 | import re | ||
649 | 11 | import sys | ||
650 | 12 | import tempfile | ||
651 | 13 | import urllib | ||
652 | 14 | import os | ||
653 | 15 | import time | ||
654 | 16 | import subprocess | ||
655 | 17 | import paramiko | ||
656 | 18 | import StringIO | ||
657 | 19 | |||
658 | 20 | |||
659 | 21 | # Openstack Client helpers | ||
660 | 22 | def get_nova_creds(cloud_creds): | ||
661 | 23 | auth = { | ||
662 | 24 | 'username': cloud_creds['OS_USERNAME'], | ||
663 | 25 | 'api_key': cloud_creds['OS_PASSWORD'], | ||
664 | 26 | 'auth_url': cloud_creds['OS_AUTH_URL'], | ||
665 | 27 | 'project_id': cloud_creds['OS_TENANT_NAME'], | ||
666 | 28 | 'region_name': cloud_creds['OS_REGION_NAME'], | ||
667 | 29 | } | ||
668 | 30 | return auth | ||
669 | 31 | |||
670 | 32 | |||
671 | 33 | def get_ks_creds(cloud_creds): | ||
672 | 34 | auth = { | ||
673 | 35 | 'username': cloud_creds['OS_USERNAME'], | ||
674 | 36 | 'password': cloud_creds['OS_PASSWORD'], | ||
675 | 37 | 'auth_url': cloud_creds['OS_AUTH_URL'], | ||
676 | 38 | 'tenant_name': cloud_creds['OS_TENANT_NAME'], | ||
677 | 39 | 'region_name': cloud_creds['OS_REGION_NAME'], | ||
678 | 40 | } | ||
679 | 41 | return auth | ||
680 | 42 | |||
681 | 43 | |||
682 | 44 | def get_swift_creds(cloud_creds): | ||
683 | 45 | auth = { | ||
684 | 46 | 'user': cloud_creds['OS_USERNAME'], | ||
685 | 47 | 'key': cloud_creds['OS_PASSWORD'], | ||
686 | 48 | 'authurl': cloud_creds['OS_AUTH_URL'], | ||
687 | 49 | 'tenant_name': cloud_creds['OS_TENANT_NAME'], | ||
688 | 50 | 'auth_version': '2.0', | ||
689 | 51 | } | ||
690 | 52 | return auth | ||
691 | 53 | |||
692 | 54 | |||
693 | 55 | def get_nova_client(novarc_creds): | ||
694 | 56 | nova_creds = get_nova_creds(novarc_creds) | ||
695 | 57 | return novaclient.Client(**nova_creds) | ||
696 | 58 | |||
697 | 59 | |||
698 | 60 | def get_neutron_client(novarc_creds): | ||
699 | 61 | neutron_creds = get_ks_creds(novarc_creds) | ||
700 | 62 | return neutronclient.Client(**neutron_creds) | ||
701 | 63 | |||
702 | 64 | |||
703 | 65 | def get_keystone_client(novarc_creds): | ||
704 | 66 | keystone_creds = get_ks_creds(novarc_creds) | ||
705 | 67 | keystone_creds['insecure'] = True | ||
706 | 68 | return keystoneclient.Client(**keystone_creds) | ||
707 | 69 | |||
708 | 70 | |||
709 | 71 | def get_swift_client(novarc_creds, insecure=True): | ||
710 | 72 | swift_creds = get_swift_creds(novarc_creds) | ||
711 | 73 | swift_creds['insecure'] = insecure | ||
712 | 74 | return swiftclient.client.Connection(**swift_creds) | ||
713 | 75 | |||
714 | 76 | |||
715 | 77 | def get_glance_client(novarc_creds): | ||
716 | 78 | kc = get_keystone_client(novarc_creds) | ||
717 | 79 | glance_endpoint = kc.service_catalog.url_for(service_type='image', | ||
718 | 80 | endpoint_type='publicURL') | ||
719 | 81 | return glanceclient.Client('1', glance_endpoint, token=kc.auth_token) | ||
720 | 82 | |||
721 | 83 | |||
722 | 84 | # Glance Helpers | ||
723 | 85 | def download_image(image, image_glance_name=None): | ||
724 | 86 | logging.info('Downloading ' + image) | ||
725 | 87 | tmp_dir = tempfile.mkdtemp(dir='/tmp') | ||
726 | 88 | if not image_glance_name: | ||
727 | 89 | image_glance_name = image.split('/')[-1] | ||
728 | 90 | local_file = os.path.join(tmp_dir, image_glance_name) | ||
729 | 91 | urllib.urlretrieve(image, local_file) | ||
730 | 92 | return local_file | ||
731 | 93 | |||
732 | 94 | |||
733 | 95 | def upload_image(gclient, ifile, image_name, public, disk_format, | ||
734 | 96 | container_format): | ||
735 | 97 | logging.info('Uploading %s to glance ' % (image_name)) | ||
736 | 98 | with open(ifile) as fimage: | ||
737 | 99 | gclient.images.create( | ||
738 | 100 | name=image_name, | ||
739 | 101 | is_public=public, | ||
740 | 102 | disk_format=disk_format, | ||
741 | 103 | container_format=container_format, | ||
742 | 104 | data=fimage) | ||
743 | 105 | |||
744 | 106 | |||
745 | 107 | def get_images_list(gclient): | ||
746 | 108 | return [image.name for image in gclient.images.list()] | ||
747 | 109 | |||
748 | 110 | |||
749 | 111 | # Keystone helpers | ||
750 | 112 | def tenant_create(kclient, tenants): | ||
751 | 113 | current_tenants = [tenant.name for tenant in kclient.tenants.list()] | ||
752 | 114 | for tenant in tenants: | ||
753 | 115 | if tenant in current_tenants: | ||
754 | 116 | logging.warning('Not creating tenant %s it already' | ||
755 | 117 | 'exists' % (tenant)) | ||
756 | 118 | else: | ||
757 | 119 | logging.info('Creating tenant %s' % (tenant)) | ||
758 | 120 | kclient.tenants.create(tenant_name=tenant) | ||
759 | 121 | |||
760 | 122 | |||
761 | 123 | def user_create(kclient, users): | ||
762 | 124 | current_users = [user.name for user in kclient.users.list()] | ||
763 | 125 | for user in users: | ||
764 | 126 | if user['username'] in current_users: | ||
765 | 127 | logging.warning('Not creating user %s it already' | ||
766 | 128 | 'exists' % (user['username'])) | ||
767 | 129 | else: | ||
768 | 130 | logging.info('Creating user %s' % (user['username'])) | ||
769 | 131 | tenant_id = get_tenant_id(kclient, user['tenant']) | ||
770 | 132 | kclient.users.create(name=user['username'], | ||
771 | 133 | password=user['password'], | ||
772 | 134 | email=user['email'], | ||
773 | 135 | tenant_id=tenant_id) | ||
774 | 136 | |||
775 | 137 | |||
776 | 138 | def get_roles_for_user(kclient, user_id, tenant_id): | ||
777 | 139 | roles = [] | ||
778 | 140 | ksuser_roles = kclient.roles.roles_for_user(user_id, tenant_id) | ||
779 | 141 | for role in ksuser_roles: | ||
780 | 142 | roles.append(role.id) | ||
781 | 143 | return roles | ||
782 | 144 | |||
783 | 145 | |||
784 | 146 | def add_users_to_roles(kclient, users): | ||
785 | 147 | for user_details in users: | ||
786 | 148 | tenant_id = get_tenant_id(kclient, user_details['tenant']) | ||
787 | 149 | for role_name in user_details['roles']: | ||
788 | 150 | role = kclient.roles.find(name=role_name) | ||
789 | 151 | user = kclient.users.find(name=user_details['username']) | ||
790 | 152 | users_roles = get_roles_for_user(kclient, user, tenant_id) | ||
791 | 153 | if role.id in users_roles: | ||
792 | 154 | logging.warning('Not adding role %s to %s it already has ' | ||
793 | 155 | 'it' % (user_details['username'], role_name)) | ||
794 | 156 | else: | ||
795 | 157 | logging.info('Adding %s to role %s for tenant' | ||
796 | 158 | '%s' % (user_details['username'], role_name, | ||
797 | 159 | tenant_id)) | ||
798 | 160 | kclient.roles.add_user_role(user_details['username'], role, | ||
799 | 161 | tenant_id) | ||
800 | 162 | |||
801 | 163 | |||
802 | 164 | def get_tenant_id(ks_client, tenant_name): | ||
803 | 165 | for t in ks_client.tenants.list(): | ||
804 | 166 | if t._info['name'] == tenant_name: | ||
805 | 167 | return t._info['id'] | ||
806 | 168 | return None | ||
807 | 169 | |||
808 | 170 | |||
809 | 171 | # Neutron Helpers | ||
810 | 172 | def get_gateway_uuids(): | ||
811 | 173 | gateway_config = mojo_utils.get_juju_status('neutron-gateway') | ||
812 | 174 | uuids = [] | ||
813 | 175 | for machine in gateway_config['machines']: | ||
814 | 176 | uuids.append(gateway_config['machines'][machine]['instance-id']) | ||
815 | 177 | return uuids | ||
816 | 178 | |||
817 | 179 | |||
818 | 180 | def get_net_uuid(neutron_client, net_name): | ||
819 | 181 | network = neutron_client.list_networks(name=net_name)['networks'][0] | ||
820 | 182 | return network['id'] | ||
821 | 183 | |||
822 | 184 | |||
823 | 185 | def configure_gateway_ext_port(novaclient): | ||
824 | 186 | uuids = get_gateway_uuids() | ||
825 | 187 | for uuid in uuids: | ||
826 | 188 | server = novaclient.servers.get(uuid) | ||
827 | 189 | mac_addrs = [a.mac_addr for a in server.interface_list()] | ||
828 | 190 | if len(mac_addrs) < 2: | ||
829 | 191 | logging.info('Adding additional port to Neutron Gateway') | ||
830 | 192 | server.interface_attach(port_id=None, net_id=None, fixed_ip=None) | ||
831 | 193 | else: | ||
832 | 194 | logging.warning('Neutron Gateway already has additional port') | ||
833 | 195 | if uuids: | ||
834 | 196 | logging.info('Seting Neutron Gateway external port to eth1') | ||
835 | 197 | mojo_utils.juju_set('neutron-gateway', 'ext-port=eth1') | ||
836 | 198 | |||
837 | 199 | |||
838 | 200 | def create_tenant_network(neutron_client, tenant_id, net_name='private', | ||
839 | 201 | shared=False, network_type='gre'): | ||
840 | 202 | networks = neutron_client.list_networks(name=net_name) | ||
841 | 203 | if len(networks['networks']) == 0: | ||
842 | 204 | logging.info('Creating network: %s', | ||
843 | 205 | net_name) | ||
844 | 206 | network_msg = { | ||
845 | 207 | 'network': { | ||
846 | 208 | 'name': net_name, | ||
847 | 209 | 'shared': shared, | ||
848 | 210 | 'tenant_id': tenant_id, | ||
849 | 211 | } | ||
850 | 212 | } | ||
851 | 213 | if network_type == 'vxlan': | ||
852 | 214 | network_msg['network']['provider:segmentation_id'] = 1233 | ||
853 | 215 | network_msg['network']['provider:network_type'] = network_type | ||
854 | 216 | network = neutron_client.create_network(network_msg)['network'] | ||
855 | 217 | else: | ||
856 | 218 | logging.warning('Network %s already exists.', net_name) | ||
857 | 219 | network = networks['networks'][0] | ||
858 | 220 | return network | ||
859 | 221 | |||
860 | 222 | |||
861 | 223 | def create_external_network(neutron_client, tenant_id, net_name='ext_net', | ||
862 | 224 | network_type='gre'): | ||
863 | 225 | networks = neutron_client.list_networks(name=net_name) | ||
864 | 226 | if len(networks['networks']) == 0: | ||
865 | 227 | logging.info('Configuring external bridge') | ||
866 | 228 | network_msg = { | ||
867 | 229 | 'name': net_name, | ||
868 | 230 | 'router:external': True, | ||
869 | 231 | 'tenant_id': tenant_id, | ||
870 | 232 | } | ||
871 | 233 | if network_type == 'vxlan': | ||
872 | 234 | network_msg['provider:segmentation_id'] = 1234 | ||
873 | 235 | network_msg['provider:network_type'] = network_type | ||
874 | 236 | |||
875 | 237 | logging.info('Creating new external network definition: %s', | ||
876 | 238 | net_name) | ||
877 | 239 | network = neutron_client.create_network( | ||
878 | 240 | {'network': network_msg})['network'] | ||
879 | 241 | logging.info('New external network created: %s', network['id']) | ||
880 | 242 | else: | ||
881 | 243 | logging.warning('Network %s already exists.', net_name) | ||
882 | 244 | network = networks['networks'][0] | ||
883 | 245 | return network | ||
884 | 246 | |||
885 | 247 | |||
886 | 248 | def create_tenant_subnet(neutron_client, tenant_id, network, cidr, dhcp=True, | ||
887 | 249 | subnet_name='private_subnet'): | ||
888 | 250 | # Create subnet | ||
889 | 251 | subnets = neutron_client.list_subnets(name=subnet_name) | ||
890 | 252 | if len(subnets['subnets']) == 0: | ||
891 | 253 | logging.info('Creating subnet') | ||
892 | 254 | subnet_msg = { | ||
893 | 255 | 'subnet': { | ||
894 | 256 | 'name': subnet_name, | ||
895 | 257 | 'network_id': network['id'], | ||
896 | 258 | 'enable_dhcp': dhcp, | ||
897 | 259 | 'cidr': cidr, | ||
898 | 260 | 'ip_version': 4, | ||
899 | 261 | 'tenant_id': tenant_id | ||
900 | 262 | } | ||
901 | 263 | } | ||
902 | 264 | subnet = neutron_client.create_subnet(subnet_msg)['subnet'] | ||
903 | 265 | else: | ||
904 | 266 | logging.warning('Subnet %s already exists.', subnet_name) | ||
905 | 267 | subnet = subnets['subnets'][0] | ||
906 | 268 | return subnet | ||
907 | 269 | |||
908 | 270 | |||
909 | 271 | def create_external_subnet(neutron_client, tenant_id, network, | ||
910 | 272 | default_gateway=None, cidr=None, | ||
911 | 273 | start_floating_ip=None, end_floating_ip=None, | ||
912 | 274 | subnet_name='ext_net_subnet'): | ||
913 | 275 | subnets = neutron_client.list_subnets(name=subnet_name) | ||
914 | 276 | if len(subnets['subnets']) == 0: | ||
915 | 277 | subnet_msg = { | ||
916 | 278 | 'name': subnet_name, | ||
917 | 279 | 'network_id': network['id'], | ||
918 | 280 | 'enable_dhcp': False, | ||
919 | 281 | 'ip_version': 4, | ||
920 | 282 | 'tenant_id': tenant_id | ||
921 | 283 | } | ||
922 | 284 | |||
923 | 285 | if default_gateway: | ||
924 | 286 | subnet_msg['gateway_ip'] = default_gateway | ||
925 | 287 | if cidr: | ||
926 | 288 | subnet_msg['cidr'] = cidr | ||
927 | 289 | if (start_floating_ip and end_floating_ip): | ||
928 | 290 | allocation_pool = { | ||
929 | 291 | 'start': start_floating_ip, | ||
930 | 292 | 'end': end_floating_ip, | ||
931 | 293 | } | ||
932 | 294 | subnet_msg['allocation_pools'] = [allocation_pool] | ||
933 | 295 | |||
934 | 296 | logging.info('Creating new subnet') | ||
935 | 297 | subnet = neutron_client.create_subnet({'subnet': subnet_msg})['subnet'] | ||
936 | 298 | logging.info('New subnet created: %s', subnet['id']) | ||
937 | 299 | else: | ||
938 | 300 | logging.warning('Subnet %s already exists.', subnet_name) | ||
939 | 301 | subnet = subnets['subnets'][0] | ||
940 | 302 | return subnet | ||
941 | 303 | |||
942 | 304 | |||
943 | 305 | def update_subnet_dns(neutron_client, subnet, dns_servers): | ||
944 | 306 | msg = { | ||
945 | 307 | 'subnet': { | ||
946 | 308 | 'dns_nameservers': dns_servers.split(',') | ||
947 | 309 | } | ||
948 | 310 | } | ||
949 | 311 | logging.info('Updating dns_nameservers (%s) for subnet', | ||
950 | 312 | dns_servers) | ||
951 | 313 | neutron_client.update_subnet(subnet['id'], msg) | ||
952 | 314 | |||
953 | 315 | |||
954 | 316 | def create_provider_router(neutron_client, tenant_id): | ||
955 | 317 | routers = neutron_client.list_routers(name='provider-router') | ||
956 | 318 | if len(routers['routers']) == 0: | ||
957 | 319 | logging.info('Creating provider router for external network access') | ||
958 | 320 | router_info = { | ||
959 | 321 | 'router': { | ||
960 | 322 | 'name': 'provider-router', | ||
961 | 323 | 'tenant_id': tenant_id | ||
962 | 324 | } | ||
963 | 325 | } | ||
964 | 326 | router = neutron_client.create_router(router_info)['router'] | ||
965 | 327 | logging.info('New router created: %s', (router['id'])) | ||
966 | 328 | else: | ||
967 | 329 | logging.warning('Router provider-router already exists.') | ||
968 | 330 | router = routers['routers'][0] | ||
969 | 331 | return router | ||
970 | 332 | |||
971 | 333 | |||
972 | 334 | def plug_extnet_into_router(neutron_client, router, network): | ||
973 | 335 | ports = neutron_client.list_ports(device_owner='network:router_gateway', | ||
974 | 336 | network_id=network['id']) | ||
975 | 337 | if len(ports['ports']) == 0: | ||
976 | 338 | logging.info('Plugging router into ext_net') | ||
977 | 339 | router = neutron_client.add_gateway_router( | ||
978 | 340 | router=router['id'], | ||
979 | 341 | body={'network_id': network['id']}) | ||
980 | 342 | logging.info('Router connected') | ||
981 | 343 | else: | ||
982 | 344 | logging.warning('Router already connected') | ||
983 | 345 | |||
984 | 346 | |||
985 | 347 | def plug_subnet_into_router(neutron_client, router, network, subnet): | ||
986 | 348 | routers = neutron_client.list_routers(name=router) | ||
987 | 349 | if len(routers['routers']) == 0: | ||
988 | 350 | logging.error('Unable to locate provider router %s', router) | ||
989 | 351 | sys.exit(1) | ||
990 | 352 | else: | ||
991 | 353 | # Check to see if subnet already plugged into router | ||
992 | 354 | ports = neutron_client.list_ports( | ||
993 | 355 | device_owner='network:router_interface', | ||
994 | 356 | network_id=network['id']) | ||
995 | 357 | if len(ports['ports']) == 0: | ||
996 | 358 | logging.info('Adding interface from subnet to %s' % (router)) | ||
997 | 359 | router = routers['routers'][0] | ||
998 | 360 | neutron_client.add_interface_router(router['id'], | ||
999 | 361 | {'subnet_id': subnet['id']}) | ||
1000 | 362 | else: | ||
1001 | 363 | logging.warning('Router already connected to subnet') | ||
1002 | 364 | |||
1003 | 365 | |||
1004 | 366 | # Nova Helpers | ||
1005 | 367 | def create_keypair(nova_client, keypair_name): | ||
1006 | 368 | if nova_client.keypairs.findall(name=keypair_name): | ||
1007 | 369 | _oldkey = nova_client.keypairs.find(name=keypair_name) | ||
1008 | 370 | logging.info('Deleting key %s' % (keypair_name)) | ||
1009 | 371 | nova_client.keypairs.delete(_oldkey) | ||
1010 | 372 | logging.info('Creating key %s' % (keypair_name)) | ||
1011 | 373 | new_key = nova_client.keypairs.create(name=keypair_name) | ||
1012 | 374 | return new_key.private_key | ||
1013 | 375 | |||
1014 | 376 | |||
1015 | 377 | def boot_instance(nova_client, image_name, flavor_name, key_name): | ||
1016 | 378 | image = nova_client.images.find(name=image_name) | ||
1017 | 379 | flavor = nova_client.flavors.find(name=flavor_name) | ||
1018 | 380 | net = nova_client.networks.find(label="private") | ||
1019 | 381 | nics = [{'net-id': net.id}] | ||
1020 | 382 | # Obviously time may not produce a unique name | ||
1021 | 383 | vm_name = time.strftime("%Y%m%d%H%M%S") | ||
1022 | 384 | logging.info('Creating %s %s ' | ||
1023 | 385 | 'instance %s' % (flavor_name, image_name, vm_name)) | ||
1024 | 386 | instance = nova_client.servers.create(name=vm_name, | ||
1025 | 387 | image=image, | ||
1026 | 388 | flavor=flavor, | ||
1027 | 389 | key_name=key_name, | ||
1028 | 390 | nics=nics) | ||
1029 | 391 | return instance | ||
1030 | 392 | |||
1031 | 393 | |||
1032 | 394 | def wait_for_active(nova_client, vm_name, wait_time): | ||
1033 | 395 | logging.info('Waiting %is for %s to reach ACTIVE ' | ||
1034 | 396 | 'state' % (wait_time, vm_name)) | ||
1035 | 397 | for counter in range(wait_time): | ||
1036 | 398 | instance = nova_client.servers.find(name=vm_name) | ||
1037 | 399 | if instance.status == 'ACTIVE': | ||
1038 | 400 | logging.info('%s is ACTIVE' % (vm_name)) | ||
1039 | 401 | return True | ||
1040 | 402 | elif instance.status != 'BUILD': | ||
1041 | 403 | logging.error('instance %s in unknown ' | ||
1042 | 404 | 'state %s' % (instance.name, instance.status)) | ||
1043 | 405 | return False | ||
1044 | 406 | time.sleep(1) | ||
1045 | 407 | logging.error('instance %s failed to reach ' | ||
1046 | 408 | 'active state in %is' % (instance.name, wait_time)) | ||
1047 | 409 | return False | ||
1048 | 410 | |||
1049 | 411 | |||
1050 | 412 | def wait_for_cloudinit(nova_client, vm_name, bootstring, wait_time): | ||
1051 | 413 | logging.info('Waiting %is for cloudinit on %s to ' | ||
1052 | 414 | 'complete' % (wait_time, vm_name)) | ||
1053 | 415 | instance = nova_client.servers.find(name=vm_name) | ||
1054 | 416 | for counter in range(wait_time): | ||
1055 | 417 | instance = nova_client.servers.find(name=vm_name) | ||
1056 | 418 | console_log = instance.get_console_output() | ||
1057 | 419 | if bootstring in console_log: | ||
1058 | 420 | logging.info('Cloudinit for %s is complete' % (vm_name)) | ||
1059 | 421 | return True | ||
1060 | 422 | time.sleep(1) | ||
1061 | 423 | logging.error('cloudinit for instance %s failed ' | ||
1062 | 424 | 'to complete in %is' % (instance.name, wait_time)) | ||
1063 | 425 | return False | ||
1064 | 426 | |||
1065 | 427 | |||
1066 | 428 | def wait_for_boot(nova_client, vm_name, bootstring, active_wait, | ||
1067 | 429 | cloudinit_wait): | ||
1068 | 430 | if not wait_for_active(nova_client, vm_name, active_wait): | ||
1069 | 431 | raise Exception('Error initialising %s' % vm_name) | ||
1070 | 432 | if not wait_for_cloudinit(nova_client, vm_name, bootstring, | ||
1071 | 433 | cloudinit_wait): | ||
1072 | 434 | raise Exception('Cloudinit error %s' % vm_name) | ||
1073 | 435 | |||
1074 | 436 | |||
1075 | 437 | def wait_for_ping(ip, wait_time): | ||
1076 | 438 | logging.info('Waiting for ping to %s' % (ip)) | ||
1077 | 439 | for counter in range(wait_time): | ||
1078 | 440 | if ping(ip): | ||
1079 | 441 | logging.info('Ping %s success' % (ip)) | ||
1080 | 442 | return True | ||
1081 | 443 | time.sleep(1) | ||
1082 | 444 | logging.error('Ping failed for %s' % (ip)) | ||
1083 | 445 | return False | ||
1084 | 446 | |||
1085 | 447 | |||
1086 | 448 | def assign_floating_ip(nova_client, vm_name): | ||
1087 | 449 | floating_ip = nova_client.floating_ips.create() | ||
1088 | 450 | logging.info('Assigning floating IP %s to %s' % (floating_ip.ip, vm_name)) | ||
1089 | 451 | instance = nova_client.servers.find(name=vm_name) | ||
1090 | 452 | instance.add_floating_ip(floating_ip) | ||
1091 | 453 | return floating_ip.ip | ||
1092 | 454 | |||
1093 | 455 | |||
1094 | 456 | def add_secgroup_rules(nova_client): | ||
1095 | 457 | secgroup = nova_client.security_groups.find(name="default") | ||
1096 | 458 | # Using presence of a 22 rule to indicate whether secgroup rules | ||
1097 | 459 | # have been added | ||
1098 | 460 | port_rules = [rule['to_port'] for rule in secgroup.rules] | ||
1099 | 461 | if 22 in port_rules: | ||
1100 | 462 | logging.warn('Security group rules for ssh already added') | ||
1101 | 463 | else: | ||
1102 | 464 | logging.info('Adding ssh security group rule') | ||
1103 | 465 | nova_client.security_group_rules.create(secgroup.id, | ||
1104 | 466 | ip_protocol="tcp", | ||
1105 | 467 | from_port=22, | ||
1106 | 468 | to_port=22) | ||
1107 | 469 | if -1 in port_rules: | ||
1108 | 470 | logging.warn('Security group rules for ping already added') | ||
1109 | 471 | else: | ||
1110 | 472 | logging.info('Adding ping security group rule') | ||
1111 | 473 | nova_client.security_group_rules.create(secgroup.id, | ||
1112 | 474 | ip_protocol="icmp", | ||
1113 | 475 | from_port=-1, | ||
1114 | 476 | to_port=-1) | ||
1115 | 477 | |||
1116 | 478 | |||
1117 | 479 | def ping(ip): | ||
1118 | 480 | # Use the system ping command with count of 1 and wait time of 1. | ||
1119 | 481 | ret = subprocess.call(['ping', '-c', '1', '-W', '1', ip], | ||
1120 | 482 | stdout=open('/dev/null', 'w'), | ||
1121 | 483 | stderr=open('/dev/null', 'w')) | ||
1122 | 484 | return ret == 0 | ||
1123 | 485 | |||
1124 | 486 | |||
1125 | 487 | def ssh_test(username, ip, vm_name, password=None, privkey=None): | ||
1126 | 488 | logging.info('Attempting to ssh to %s(%s)' % (vm_name, ip)) | ||
1127 | 489 | ssh = paramiko.SSHClient() | ||
1128 | 490 | ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) | ||
1129 | 491 | if privkey: | ||
1130 | 492 | key = paramiko.RSAKey.from_private_key(StringIO.StringIO(privkey)) | ||
1131 | 493 | ssh.connect(ip, username=username, password='', pkey=key) | ||
1132 | 494 | else: | ||
1133 | 495 | ssh.connect(ip, username=username, password=password) | ||
1134 | 496 | stdin, stdout, stderr = ssh.exec_command('uname -n') | ||
1135 | 497 | return_string = stdout.readlines()[0].strip() | ||
1136 | 498 | ssh.close() | ||
1137 | 499 | if return_string == vm_name: | ||
1138 | 500 | logging.info('SSH to %s(%s) succesfull' % (vm_name, ip)) | ||
1139 | 501 | return True | ||
1140 | 502 | else: | ||
1141 | 503 | logging.info('SSH to %s(%s) failed' % (vm_name, ip)) | ||
1142 | 504 | return False | ||
1143 | 505 | |||
1144 | 506 | |||
1145 | 507 | def boot_and_test(nova_client, image_name, flavor_name, number, privkey, | ||
1146 | 508 | active_wait=180, cloudinit_wait=180, ping_wait=180): | ||
1147 | 509 | image_config = mojo_utils.get_mojo_config('images.yaml') | ||
1148 | 510 | for counter in range(number): | ||
1149 | 511 | instance = boot_instance(nova_client, | ||
1150 | 512 | image_name=image_name, | ||
1151 | 513 | flavor_name=flavor_name, | ||
1152 | 514 | key_name='mojo') | ||
1153 | 515 | wait_for_boot(nova_client, instance.name, | ||
1154 | 516 | image_config[image_name]['bootstring'], active_wait, | ||
1155 | 517 | cloudinit_wait) | ||
1156 | 518 | ip = assign_floating_ip(nova_client, instance.name) | ||
1157 | 519 | wait_for_ping(ip, ping_wait) | ||
1158 | 520 | if not wait_for_ping(ip, ping_wait): | ||
1159 | 521 | raise Exception('Ping of %s failed' % (ip)) | ||
1160 | 522 | ssh_test_args = { | ||
1161 | 523 | 'username': image_config[image_name]['username'], | ||
1162 | 524 | 'ip': ip, | ||
1163 | 525 | 'vm_name': instance.name, | ||
1164 | 526 | } | ||
1165 | 527 | if image_config[image_name]['auth_type'] == 'password': | ||
1166 | 528 | ssh_test_args['password'] = image_config[image_name]['password'] | ||
1167 | 529 | elif image_config[image_name]['auth_type'] == 'privkey': | ||
1168 | 530 | ssh_test_args['privkey'] = privkey | ||
1169 | 531 | if not ssh_test(**ssh_test_args): | ||
1170 | 532 | raise Exception('SSH failed' % (ip)) | ||
1171 | 533 | |||
1172 | 534 | |||
1173 | 535 | # Hacluster helper | ||
1174 | 536 | def get_crm_leader(service, resource=None): | ||
1175 | 537 | if not resource: | ||
1176 | 538 | resource = 'res_.*_vip' | ||
1177 | 539 | leader = set() | ||
1178 | 540 | for unit in mojo_utils.get_juju_units(service=service): | ||
1179 | 541 | crm_out = mojo_utils.remote_run(unit, 'sudo crm status')[0] | ||
1180 | 542 | for line in crm_out.splitlines(): | ||
1181 | 543 | line = line.lstrip() | ||
1182 | 544 | if re.match(resource, line): | ||
1183 | 545 | leader.add(line.split()[-1]) | ||
1184 | 546 | if len(leader) != 1: | ||
1185 | 547 | raise Exception('Unexpected leader count: ' + str(len(leader))) | ||
1186 | 548 | return leader.pop().split('-')[-1] | ||
1187 | 549 | |||
1188 | 550 | |||
1189 | 551 | def delete_crm_leader(service, resource=None): | ||
1190 | 552 | mach_no = get_crm_leader(service, resource) | ||
1191 | 553 | unit = mojo_utils.convert_machineno_to_unit(mach_no) | ||
1192 | 554 | mojo_utils.delete_unit(unit) | ||
1193 | 0 | 555 | ||
1194 | === added file 'mojo-spec-helpers/utils/mojo_utils.py' | |||
1195 | --- mojo-spec-helpers/utils/mojo_utils.py 1970-01-01 00:00:00 +0000 | |||
1196 | +++ mojo-spec-helpers/utils/mojo_utils.py 2015-09-08 08:23:55 +0000 | |||
1197 | @@ -0,0 +1,356 @@ | |||
1198 | 1 | #!/usr/bin/python | ||
1199 | 2 | |||
1200 | 3 | import subprocess | ||
1201 | 4 | import yaml | ||
1202 | 5 | import os | ||
1203 | 6 | import mojo | ||
1204 | 7 | import logging | ||
1205 | 8 | import time | ||
1206 | 9 | from collections import Counter | ||
1207 | 10 | from swiftclient.client import Connection | ||
1208 | 11 | |||
1209 | 12 | JUJU_STATUSES = { | ||
1210 | 13 | 'good': ['ACTIVE', 'started'], | ||
1211 | 14 | 'bad': ['error'], | ||
1212 | 15 | 'transitional': ['pending', 'pending', 'down', 'installed', 'stopped'], | ||
1213 | 16 | } | ||
1214 | 17 | |||
1215 | 18 | |||
1216 | 19 | def get_juju_status(service=None): | ||
1217 | 20 | cmd = ['juju', 'status'] | ||
1218 | 21 | if service: | ||
1219 | 22 | cmd.append(service) | ||
1220 | 23 | status_file = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout | ||
1221 | 24 | return yaml.load(status_file) | ||
1222 | 25 | |||
1223 | 26 | |||
1224 | 27 | def get_juju_units(juju_status=None, service=None): | ||
1225 | 28 | if not juju_status: | ||
1226 | 29 | juju_status = get_juju_status() | ||
1227 | 30 | units = [] | ||
1228 | 31 | if service: | ||
1229 | 32 | services = [service] | ||
1230 | 33 | else: | ||
1231 | 34 | services = [juju_service for juju_service in juju_status['services']] | ||
1232 | 35 | for svc in services: | ||
1233 | 36 | if 'units' in juju_status['services'][svc]: | ||
1234 | 37 | for unit in juju_status['services'][svc]['units']: | ||
1235 | 38 | units.append(unit) | ||
1236 | 39 | return units | ||
1237 | 40 | |||
1238 | 41 | |||
1239 | 42 | def convert_machineno_to_unit(machineno, juju_status=None): | ||
1240 | 43 | if not juju_status: | ||
1241 | 44 | juju_status = get_juju_status() | ||
1242 | 45 | services = [service for service in juju_status['services']] | ||
1243 | 46 | for svc in services: | ||
1244 | 47 | if 'units' in juju_status['services'][svc]: | ||
1245 | 48 | for unit in juju_status['services'][svc]['units']: | ||
1246 | 49 | unit_info = juju_status['services'][svc]['units'][unit] | ||
1247 | 50 | if unit_info['machine'] == machineno: | ||
1248 | 51 | return unit | ||
1249 | 52 | |||
1250 | 53 | |||
1251 | 54 | def remote_shell_check(unit): | ||
1252 | 55 | cmd = ['juju', 'run', '--unit', unit, 'uname -a'] | ||
1253 | 56 | FNULL = open(os.devnull, 'w') | ||
1254 | 57 | return not subprocess.call(cmd, stdout=FNULL, stderr=subprocess.STDOUT) | ||
1255 | 58 | |||
1256 | 59 | |||
1257 | 60 | def remote_run(unit, remote_cmd=None): | ||
1258 | 61 | cmd = ['juju', 'run', '--unit', unit] | ||
1259 | 62 | if remote_cmd: | ||
1260 | 63 | cmd.append(remote_cmd) | ||
1261 | 64 | else: | ||
1262 | 65 | cmd.append('uname -a') | ||
1263 | 66 | p = subprocess.Popen(cmd, stdout=subprocess.PIPE) | ||
1264 | 67 | output = p.communicate() | ||
1265 | 68 | if p.returncode != 0: | ||
1266 | 69 | raise Exception('Error running nagios checks') | ||
1267 | 70 | return output | ||
1268 | 71 | |||
1269 | 72 | |||
1270 | 73 | def remote_upload(unit, script, remote_dir=None): | ||
1271 | 74 | if remote_dir: | ||
1272 | 75 | dst = unit + ':' + remote_dir | ||
1273 | 76 | else: | ||
1274 | 77 | dst = unit + ':/tmp/' | ||
1275 | 78 | cmd = ['juju', 'scp', script, dst] | ||
1276 | 79 | return subprocess.check_call(cmd) | ||
1277 | 80 | |||
1278 | 81 | |||
1279 | 82 | def delete_unit(unit): | ||
1280 | 83 | service = unit.split('/')[0] | ||
1281 | 84 | unit_count = len(get_juju_units(service=service)) | ||
1282 | 85 | logging.info('Removing unit ' + unit) | ||
1283 | 86 | cmd = ['juju', 'destroy-unit', unit] | ||
1284 | 87 | subprocess.check_call(cmd) | ||
1285 | 88 | target_num = unit_count - 1 | ||
1286 | 89 | # Wait for the unit to disappear from juju status | ||
1287 | 90 | while len(get_juju_units(service=service)) > target_num: | ||
1288 | 91 | time.sleep(5) | ||
1289 | 92 | juju_wait_finished() | ||
1290 | 93 | |||
1291 | 94 | |||
1292 | 95 | def add_unit(service, unit_num=None): | ||
1293 | 96 | unit_count = len(get_juju_units(service=service)) | ||
1294 | 97 | if unit_num: | ||
1295 | 98 | additional_units = int(unit_num) | ||
1296 | 99 | else: | ||
1297 | 100 | additional_units = 1 | ||
1298 | 101 | logging.info('Adding %i unit(s) to %s' % (additional_units, service)) | ||
1299 | 102 | cmd = ['juju', 'add-unit', service, '-n', str(additional_units)] | ||
1300 | 103 | subprocess.check_call(cmd) | ||
1301 | 104 | target_num = unit_count + additional_units | ||
1302 | 105 | # Wait for the new unit to appear in juju status | ||
1303 | 106 | while len(get_juju_units(service=service)) < target_num: | ||
1304 | 107 | time.sleep(5) | ||
1305 | 108 | juju_wait_finished() | ||
1306 | 109 | |||
1307 | 110 | |||
1308 | 111 | def juju_set(service, option): | ||
1309 | 112 | subprocess.check_call(['juju', 'set', service, option]) | ||
1310 | 113 | juju_wait_finished() | ||
1311 | 114 | |||
1312 | 115 | |||
1313 | 116 | def juju_set_config_option(service, option_name, value): | ||
1314 | 117 | option = "{}={}".format(option_name, value) | ||
1315 | 118 | juju_set(service, option) | ||
1316 | 119 | |||
1317 | 120 | |||
1318 | 121 | def juju_get(service, option): | ||
1319 | 122 | cmd = ['juju', 'get', service] | ||
1320 | 123 | juju_get_output = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout | ||
1321 | 124 | service_config = yaml.load(juju_get_output) | ||
1322 | 125 | if 'value' in service_config['settings'][option]: | ||
1323 | 126 | return service_config['settings'][option]['value'] | ||
1324 | 127 | |||
1325 | 128 | |||
1326 | 129 | def get_undercload_auth(): | ||
1327 | 130 | juju_env = subprocess.check_output(['juju', 'switch']).strip('\n') | ||
1328 | 131 | juju_env_file = open(os.environ['HOME'] + "/.juju/environments.yaml", 'r') | ||
1329 | 132 | juju_env_contents = yaml.load(juju_env_file) | ||
1330 | 133 | novarc_settings = juju_env_contents['environments'][juju_env] | ||
1331 | 134 | auth_settings = { | ||
1332 | 135 | 'OS_AUTH_URL': novarc_settings['auth-url'], | ||
1333 | 136 | 'OS_TENANT_NAME': novarc_settings['tenant-name'], | ||
1334 | 137 | 'OS_USERNAME': novarc_settings['username'], | ||
1335 | 138 | 'OS_PASSWORD': novarc_settings['password'], | ||
1336 | 139 | 'OS_REGION_NAME': novarc_settings['region'], | ||
1337 | 140 | } | ||
1338 | 141 | return auth_settings | ||
1339 | 142 | |||
1340 | 143 | |||
1341 | 144 | # Openstack Client helpers | ||
1342 | 145 | def get_auth_url(juju_status=None): | ||
1343 | 146 | if juju_get('keystone', 'vip'): | ||
1344 | 147 | return juju_get('keystone', 'vip') | ||
1345 | 148 | if not juju_status: | ||
1346 | 149 | juju_status = get_juju_status() | ||
1347 | 150 | unit = juju_status['services']['keystone']['units'].itervalues().next() | ||
1348 | 151 | return unit['public-address'] | ||
1349 | 152 | |||
1350 | 153 | |||
1351 | 154 | def get_overcloud_auth(juju_status=None): | ||
1352 | 155 | if not juju_status: | ||
1353 | 156 | juju_status = get_juju_status() | ||
1354 | 157 | if juju_get('keystone', 'use-https').lower() == 'yes': | ||
1355 | 158 | transport = 'https' | ||
1356 | 159 | port = 35357 | ||
1357 | 160 | else: | ||
1358 | 161 | transport = 'http' | ||
1359 | 162 | port = 5000 | ||
1360 | 163 | address = get_auth_url() | ||
1361 | 164 | auth_settings = { | ||
1362 | 165 | 'OS_AUTH_URL': '%s://%s:%i/v2.0' % (transport, address, port), | ||
1363 | 166 | 'OS_TENANT_NAME': 'admin', | ||
1364 | 167 | 'OS_USERNAME': 'admin', | ||
1365 | 168 | 'OS_PASSWORD': 'openstack', | ||
1366 | 169 | 'OS_REGION_NAME': 'RegionOne', | ||
1367 | 170 | } | ||
1368 | 171 | return auth_settings | ||
1369 | 172 | |||
1370 | 173 | |||
1371 | 174 | def get_mojo_file(filename): | ||
1372 | 175 | spec = mojo.Spec(os.environ['MOJO_SPEC_DIR']) | ||
1373 | 176 | return spec.get_config(filename, stage=os.environ['MOJO_STAGE']) | ||
1374 | 177 | |||
1375 | 178 | |||
1376 | 179 | def get_mojo_spec_revno(): | ||
1377 | 180 | """ | ||
1378 | 181 | Get the current revision number of the mojo spec | ||
1379 | 182 | """ | ||
1380 | 183 | |||
1381 | 184 | revno_command = 'bzr revno {}'.format(os.environ['MOJO_SPEC_DIR']) | ||
1382 | 185 | return subprocess.check_output(revno_command.split()).strip() | ||
1383 | 186 | |||
1384 | 187 | |||
1385 | 188 | def get_mojo_config(filename): | ||
1386 | 189 | config_file = get_mojo_file(filename) | ||
1387 | 190 | logging.info('Using config %s' % (config_file)) | ||
1388 | 191 | return yaml.load(file(config_file, 'r')) | ||
1389 | 192 | |||
1390 | 193 | |||
1391 | 194 | def get_charm_dir(): | ||
1392 | 195 | return os.path.join(os.environ['MOJO_REPO_DIR'], | ||
1393 | 196 | os.environ['MOJO_SERIES']) | ||
1394 | 197 | |||
1395 | 198 | |||
1396 | 199 | def sync_charmhelpers(charmdir): | ||
1397 | 200 | p = subprocess.Popen(['make', 'sync'], cwd=charmdir) | ||
1398 | 201 | p.communicate() | ||
1399 | 202 | |||
1400 | 203 | |||
1401 | 204 | def sync_all_charmhelpers(): | ||
1402 | 205 | charm_base_dir = get_charm_dir() | ||
1403 | 206 | for direc in os.listdir(charm_base_dir): | ||
1404 | 207 | charm_dir = os.path.join(charm_base_dir, direc) | ||
1405 | 208 | if os.path.isdir(charm_dir): | ||
1406 | 209 | sync_charmhelpers(charm_dir) | ||
1407 | 210 | |||
1408 | 211 | |||
1409 | 212 | def parse_mojo_arg(options, mojoarg, multiargs=False): | ||
1410 | 213 | if mojoarg.upper() in os.environ: | ||
1411 | 214 | if multiargs: | ||
1412 | 215 | return os.environ[mojoarg.upper()].split() | ||
1413 | 216 | else: | ||
1414 | 217 | return os.environ[mojoarg.upper()] | ||
1415 | 218 | else: | ||
1416 | 219 | return getattr(options, mojoarg) | ||
1417 | 220 | |||
1418 | 221 | |||
1419 | 222 | def get_machine_state(juju_status, state_type): | ||
1420 | 223 | states = Counter() | ||
1421 | 224 | for machine_no in juju_status['machines']: | ||
1422 | 225 | if state_type in juju_status['machines'][machine_no]: | ||
1423 | 226 | state = juju_status['machines'][machine_no][state_type] | ||
1424 | 227 | else: | ||
1425 | 228 | state = 'unknown' | ||
1426 | 229 | states[state] += 1 | ||
1427 | 230 | return states | ||
1428 | 231 | |||
1429 | 232 | |||
1430 | 233 | def get_machine_agent_states(juju_status): | ||
1431 | 234 | return get_machine_state(juju_status, 'agent-state') | ||
1432 | 235 | |||
1433 | 236 | |||
1434 | 237 | def get_machine_instance_states(juju_status): | ||
1435 | 238 | return get_machine_state(juju_status, 'instance-state') | ||
1436 | 239 | |||
1437 | 240 | |||
1438 | 241 | def get_service_agent_states(juju_status): | ||
1439 | 242 | service_state = Counter() | ||
1440 | 243 | for service in juju_status['services']: | ||
1441 | 244 | if 'units' in juju_status['services'][service]: | ||
1442 | 245 | for unit in juju_status['services'][service]['units']: | ||
1443 | 246 | unit_info = juju_status['services'][service]['units'][unit] | ||
1444 | 247 | service_state[unit_info['agent-state']] += 1 | ||
1445 | 248 | if 'subordinates' in unit_info: | ||
1446 | 249 | for sub_unit in unit_info['subordinates']: | ||
1447 | 250 | sub_sstate = \ | ||
1448 | 251 | unit_info['subordinates'][sub_unit]['agent-state'] | ||
1449 | 252 | service_state[sub_sstate] += 1 | ||
1450 | 253 | return service_state | ||
1451 | 254 | |||
1452 | 255 | |||
1453 | 256 | def juju_status_summary(heading, statetype, states): | ||
1454 | 257 | print heading | ||
1455 | 258 | print " " + statetype | ||
1456 | 259 | for state in states: | ||
1457 | 260 | print " %s: %i" % (state, states[state]) | ||
1458 | 261 | |||
1459 | 262 | |||
1460 | 263 | def juju_status_error_check(states): | ||
1461 | 264 | for state in states: | ||
1462 | 265 | if state in JUJU_STATUSES['bad']: | ||
1463 | 266 | logging.error('Some statuses are in a bad state') | ||
1464 | 267 | return True | ||
1465 | 268 | logging.info('No statuses are in a bad state') | ||
1466 | 269 | return False | ||
1467 | 270 | |||
1468 | 271 | |||
1469 | 272 | def juju_status_all_stable(states): | ||
1470 | 273 | for state in states: | ||
1471 | 274 | if state in JUJU_STATUSES['transitional']: | ||
1472 | 275 | logging.info('Some statuses are in a transitional state') | ||
1473 | 276 | return False | ||
1474 | 277 | logging.info('Statuses are in a stable state') | ||
1475 | 278 | return True | ||
1476 | 279 | |||
1477 | 280 | |||
1478 | 281 | def juju_status_check_and_wait(): | ||
1479 | 282 | checks = { | ||
1480 | 283 | 'Machines': [{ | ||
1481 | 284 | 'Heading': 'Instance State', | ||
1482 | 285 | 'check_func': get_machine_instance_states, | ||
1483 | 286 | }, | ||
1484 | 287 | { | ||
1485 | 288 | 'Heading': 'Agent State', | ||
1486 | 289 | 'check_func': get_machine_agent_states, | ||
1487 | 290 | }], | ||
1488 | 291 | 'Services': [{ | ||
1489 | 292 | 'Heading': 'Agent State', | ||
1490 | 293 | 'check_func': get_service_agent_states, | ||
1491 | 294 | }] | ||
1492 | 295 | } | ||
1493 | 296 | stable_state = [False] | ||
1494 | 297 | while False in stable_state: | ||
1495 | 298 | juju_status = get_juju_status() | ||
1496 | 299 | stable_state = [] | ||
1497 | 300 | for juju_objtype, check_info in checks.iteritems(): | ||
1498 | 301 | for check in check_info: | ||
1499 | 302 | check_function = check['check_func'] | ||
1500 | 303 | states = check_function(juju_status) | ||
1501 | 304 | if juju_status_error_check(states): | ||
1502 | 305 | raise Exception("Error in juju status") | ||
1503 | 306 | stable_state.append(juju_status_all_stable(states)) | ||
1504 | 307 | time.sleep(5) | ||
1505 | 308 | for juju_objtype, check_info in checks.iteritems(): | ||
1506 | 309 | for check in check_info: | ||
1507 | 310 | check_function = check['check_func'] | ||
1508 | 311 | states = check_function(juju_status) | ||
1509 | 312 | juju_status_summary(juju_objtype, check['Heading'], states) | ||
1510 | 313 | |||
1511 | 314 | |||
1512 | 315 | def remote_runs(units): | ||
1513 | 316 | for unit in units: | ||
1514 | 317 | if not remote_shell_check(unit): | ||
1515 | 318 | raise Exception('Juju run failed on ' + unit) | ||
1516 | 319 | |||
1517 | 320 | |||
1518 | 321 | def juju_check_hooks_complete(): | ||
1519 | 322 | juju_units = get_juju_units() | ||
1520 | 323 | remote_runs(juju_units) | ||
1521 | 324 | remote_runs(juju_units) | ||
1522 | 325 | |||
1523 | 326 | |||
1524 | 327 | def juju_wait_finished(): | ||
1525 | 328 | # Wait till all statuses are green | ||
1526 | 329 | juju_status_check_and_wait() | ||
1527 | 330 | # juju status may report all has finished but hooks are still firing. | ||
1528 | 331 | # So check.. | ||
1529 | 332 | juju_check_hooks_complete() | ||
1530 | 333 | # Check nothing has subsequently gone bad | ||
1531 | 334 | juju_status_check_and_wait() | ||
1532 | 335 | |||
1533 | 336 | |||
1534 | 337 | def build_swift_connection(): | ||
1535 | 338 | """ | ||
1536 | 339 | Create a Swift Connection object from the environment variables | ||
1537 | 340 | OS_TENANT_NAME, OS_STORAGE_URL, OS_AUTH_URL, OS_USERNAME | ||
1538 | 341 | OS_PASSWORD | ||
1539 | 342 | """ | ||
1540 | 343 | |||
1541 | 344 | # Get extra Swift options like tenant name and storage URL | ||
1542 | 345 | os_options = {'tenant_name': os.environ.get('OS_TENANT_NAME')} | ||
1543 | 346 | storage_url = os.environ.get('OS_STORAGE_URL') | ||
1544 | 347 | if storage_url: | ||
1545 | 348 | os_options['object_storage_url'] = storage_url | ||
1546 | 349 | |||
1547 | 350 | return Connection( | ||
1548 | 351 | os.environ.get('OS_AUTH_URL'), | ||
1549 | 352 | os.environ.get('OS_USERNAME'), | ||
1550 | 353 | os.environ.get('OS_PASSWORD'), | ||
1551 | 354 | auth_version='2.0', | ||
1552 | 355 | os_options=os_options | ||
1553 | 356 | ) | ||
1554 | 0 | 357 | ||
1555 | === added file 'mojo-spec-helpers/utils/shyaml.py' | |||
1556 | --- mojo-spec-helpers/utils/shyaml.py 1970-01-01 00:00:00 +0000 | |||
1557 | +++ mojo-spec-helpers/utils/shyaml.py 2015-09-08 08:23:55 +0000 | |||
1558 | @@ -0,0 +1,219 @@ | |||
1559 | 1 | #!/usr/bin/env python | ||
1560 | 2 | |||
1561 | 3 | # Note: to launch test, you can use: | ||
1562 | 4 | # python -m doctest -d shyaml.py | ||
1563 | 5 | # or | ||
1564 | 6 | # nosetests | ||
1565 | 7 | |||
1566 | 8 | from __future__ import print_function | ||
1567 | 9 | |||
1568 | 10 | import sys | ||
1569 | 11 | import yaml | ||
1570 | 12 | import os.path | ||
1571 | 13 | import re | ||
1572 | 14 | |||
1573 | 15 | EXNAME = os.path.basename(sys.argv[0]) | ||
1574 | 16 | |||
1575 | 17 | |||
1576 | 18 | def tokenize(s): | ||
1577 | 19 | r"""Returns an iterable in all subpart of a '.' separated string | ||
1578 | 20 | So: | ||
1579 | 21 | >>> list(tokenize('foo.bar.wiz')) | ||
1580 | 22 | ['foo', 'bar', 'wiz'] | ||
1581 | 23 | this function has to deal with any type of data in the string. So it | ||
1582 | 24 | actually interprets the string. Characters with meaning are '.' and '\'. | ||
1583 | 25 | Both of these can be included in a token by quoting them with '\'. | ||
1584 | 26 | So dot of slashes can be contained in token: | ||
1585 | 27 | >>> print('\n'.join(tokenize(r'foo.dot<\.>.slash<\\>'))) | ||
1586 | 28 | foo | ||
1587 | 29 | dot<.> | ||
1588 | 30 | slash<\> | ||
1589 | 31 | Notice that empty keys are also supported: | ||
1590 | 32 | >>> list(tokenize(r'foo..bar')) | ||
1591 | 33 | ['foo', '', 'bar'] | ||
1592 | 34 | Given an empty string: | ||
1593 | 35 | >>> list(tokenize(r'')) | ||
1594 | 36 | [''] | ||
1595 | 37 | And a None value: | ||
1596 | 38 | >>> list(tokenize(None)) | ||
1597 | 39 | [] | ||
1598 | 40 | """ | ||
1599 | 41 | if s is None: | ||
1600 | 42 | raise StopIteration | ||
1601 | 43 | tokens = (re.sub(r'\\(\\|\.)', r'\1', m.group(0)) | ||
1602 | 44 | for m in re.finditer(r'((\\.|[^.\\])*)', s)) | ||
1603 | 45 | # an empty string superfluous token is added | ||
1604 | 46 | # after all non-empty string token: | ||
1605 | 47 | for token in tokens: | ||
1606 | 48 | if len(token) != 0: | ||
1607 | 49 | next(tokens) | ||
1608 | 50 | yield token | ||
1609 | 51 | |||
1610 | 52 | |||
1611 | 53 | def mget(dct, key, default=None): | ||
1612 | 54 | r"""Allow to get values deep in a dict with doted keys | ||
1613 | 55 | Accessing leaf values is quite straightforward: | ||
1614 | 56 | >>> dct = {'a': {'x': 1, 'b': {'c': 2}}} | ||
1615 | 57 | >>> mget(dct, 'a.x') | ||
1616 | 58 | 1 | ||
1617 | 59 | >>> mget(dct, 'a.b.c') | ||
1618 | 60 | 2 | ||
1619 | 61 | But you can also get subdict if your key is not targeting a | ||
1620 | 62 | leaf value: | ||
1621 | 63 | >>> mget(dct, 'a.b') | ||
1622 | 64 | {'c': 2} | ||
1623 | 65 | As a special feature, list access is also supported by providing a | ||
1624 | 66 | (possibily signed) integer, it'll be interpreted as usual python | ||
1625 | 67 | sequence access using bracket notation: | ||
1626 | 68 | >>> mget({'a': {'x': [1, 5], 'b': {'c': 2}}}, 'a.x.-1') | ||
1627 | 69 | 5 | ||
1628 | 70 | >>> mget({'a': {'x': 1, 'b': [{'c': 2}]}}, 'a.b.0.c') | ||
1629 | 71 | 2 | ||
1630 | 72 | Keys that contains '.' can be accessed by escaping them: | ||
1631 | 73 | >>> dct = {'a': {'x': 1}, 'a.x': 3, 'a.y': 4} | ||
1632 | 74 | >>> mget(dct, 'a.x') | ||
1633 | 75 | 1 | ||
1634 | 76 | >>> mget(dct, r'a\.x') | ||
1635 | 77 | 3 | ||
1636 | 78 | >>> mget(dct, r'a.y') | ||
1637 | 79 | >>> mget(dct, r'a\.y') | ||
1638 | 80 | 4 | ||
1639 | 81 | As a consequence, if your key contains a '\', you should also escape it: | ||
1640 | 82 | >>> dct = {r'a\x': 3, r'a\.x': 4, 'a.x': 5, 'a\\': {'x': 6}} | ||
1641 | 83 | >>> mget(dct, r'a\\x') | ||
1642 | 84 | 3 | ||
1643 | 85 | >>> mget(dct, r'a\\\.x') | ||
1644 | 86 | 4 | ||
1645 | 87 | >>> mget(dct, r'a\\.x') | ||
1646 | 88 | 6 | ||
1647 | 89 | >>> mget({'a\\': {'b': 1}}, r'a\\.b') | ||
1648 | 90 | 1 | ||
1649 | 91 | >>> mget({r'a.b\.c': 1}, r'a\.b\\\.c') | ||
1650 | 92 | 1 | ||
1651 | 93 | And even empty strings key are supported: | ||
1652 | 94 | >>> dct = {r'a': {'': {'y': 3}, 'y': 4}, 'b': {'': {'': 1}}, '': 2} | ||
1653 | 95 | >>> mget(dct, r'a..y') | ||
1654 | 96 | 3 | ||
1655 | 97 | >>> mget(dct, r'a.y') | ||
1656 | 98 | 4 | ||
1657 | 99 | >>> mget(dct, r'') | ||
1658 | 100 | 2 | ||
1659 | 101 | >>> mget(dct, r'b..') | ||
1660 | 102 | 1 | ||
1661 | 103 | mget support also default value if the key is not found: | ||
1662 | 104 | >>> mget({'a': 1}, 'b.y', default='N/A') | ||
1663 | 105 | 'N/A' | ||
1664 | 106 | but will complain if you are trying to get into a leaf: | ||
1665 | 107 | >>> mget({'a': 1}, 'a.y', default='N/A') # doctest: +ELLIPSIS | ||
1666 | 108 | Traceback (most recent call last): | ||
1667 | 109 | ... | ||
1668 | 110 | TypeError: 'int' object ... | ||
1669 | 111 | if the key is None, the whole dct should be sent back: | ||
1670 | 112 | >>> mget({'a': 1}, None) | ||
1671 | 113 | {'a': 1} | ||
1672 | 114 | """ | ||
1673 | 115 | return aget(dct, tokenize(key), default) | ||
1674 | 116 | |||
1675 | 117 | |||
1676 | 118 | def aget(dct, key, default=None): | ||
1677 | 119 | r"""Allow to get values deep in a dict with iterable keys | ||
1678 | 120 | Accessing leaf values is quite straightforward: | ||
1679 | 121 | >>> dct = {'a': {'x': 1, 'b': {'c': 2}}} | ||
1680 | 122 | >>> aget(dct, ('a', 'x')) | ||
1681 | 123 | 1 | ||
1682 | 124 | >>> aget(dct, ('a', 'b', 'c')) | ||
1683 | 125 | 2 | ||
1684 | 126 | If key is empty, it returns unchanged the ``dct`` value. | ||
1685 | 127 | >>> aget({'x': 1}, ()) | ||
1686 | 128 | {'x': 1} | ||
1687 | 129 | """ | ||
1688 | 130 | key = iter(key) | ||
1689 | 131 | try: | ||
1690 | 132 | head = next(key) | ||
1691 | 133 | except StopIteration: | ||
1692 | 134 | return dct | ||
1693 | 135 | try: | ||
1694 | 136 | value = dct[int(head)] if isinstance(dct, list) else dct[head] | ||
1695 | 137 | except KeyError: | ||
1696 | 138 | return default | ||
1697 | 139 | return aget(value, key, default) | ||
1698 | 140 | |||
1699 | 141 | |||
1700 | 142 | def stderr(msg): | ||
1701 | 143 | sys.stderr.write(msg + "\n") | ||
1702 | 144 | |||
1703 | 145 | |||
1704 | 146 | def die(msg, errlvl=1, prefix="Error: "): | ||
1705 | 147 | stderr("%s%s" % (prefix, msg)) | ||
1706 | 148 | sys.exit(errlvl) | ||
1707 | 149 | |||
1708 | 150 | SIMPLE_TYPES = (str, int, float) | ||
1709 | 151 | COMPLEX_TYPES = (list, dict) | ||
1710 | 152 | |||
1711 | 153 | |||
1712 | 154 | def dump(value): | ||
1713 | 155 | return value if isinstance(value, SIMPLE_TYPES) \ | ||
1714 | 156 | else yaml.dump(value, default_flow_style=False) | ||
1715 | 157 | |||
1716 | 158 | |||
1717 | 159 | def type_name(value): | ||
1718 | 160 | """Returns pseudo-YAML type name of given value.""" | ||
1719 | 161 | return "struct" if isinstance(value, dict) else \ | ||
1720 | 162 | "sequence" if isinstance(value, (tuple, list)) else \ | ||
1721 | 163 | type(value).__name__ | ||
1722 | 164 | |||
1723 | 165 | |||
1724 | 166 | def stdout(value): | ||
1725 | 167 | sys.stdout.write(value) | ||
1726 | 168 | |||
1727 | 169 | |||
1728 | 170 | def main(args): | ||
1729 | 171 | usage = """usage: | ||
1730 | 172 | %(exname)s {get-value{,-0},get-type,keys{,-0},values{,-0}} KEY DEFAULT | ||
1731 | 173 | """ % {"exname": EXNAME} | ||
1732 | 174 | if len(args) == 0: | ||
1733 | 175 | die(usage, errlvl=0, prefix="") | ||
1734 | 176 | action = args[0] | ||
1735 | 177 | key_value = None if len(args) == 1 else args[1] | ||
1736 | 178 | default = args[2] if len(args) > 2 else "" | ||
1737 | 179 | contents = yaml.load(sys.stdin) | ||
1738 | 180 | try: | ||
1739 | 181 | value = mget(contents, key_value, default) | ||
1740 | 182 | except IndexError: | ||
1741 | 183 | die("list index error in path %r." % key_value) | ||
1742 | 184 | except (KeyError, TypeError): | ||
1743 | 185 | die("invalid path %r." % key_value) | ||
1744 | 186 | |||
1745 | 187 | tvalue = type_name(value) | ||
1746 | 188 | termination = "\0" if action.endswith("-0") else "\n" | ||
1747 | 189 | |||
1748 | 190 | if action == "get-value": | ||
1749 | 191 | print(dump(value), end='') | ||
1750 | 192 | elif action in ("get-values", "get-values-0"): | ||
1751 | 193 | if isinstance(value, dict): | ||
1752 | 194 | for k, v in value.iteritems(): | ||
1753 | 195 | stdout("%s%s%s%s" % (dump(k), termination, | ||
1754 | 196 | dump(v), termination)) | ||
1755 | 197 | elif isinstance(value, list): | ||
1756 | 198 | for l in value: | ||
1757 | 199 | stdout("%s%s" % (dump(l), termination)) | ||
1758 | 200 | else: | ||
1759 | 201 | die("%s does not support %r type. " | ||
1760 | 202 | "Please provide or select a sequence or struct." | ||
1761 | 203 | % (action, tvalue)) | ||
1762 | 204 | elif action == "get-type": | ||
1763 | 205 | print(tvalue) | ||
1764 | 206 | elif action in ("keys", "keys-0", "values", "values-0"): | ||
1765 | 207 | if isinstance(value, dict): | ||
1766 | 208 | method = value.keys if action.startswith("keys") else value.values | ||
1767 | 209 | for k in method(): | ||
1768 | 210 | stdout("%s%s" % (dump(k), termination)) | ||
1769 | 211 | else: | ||
1770 | 212 | die("%s does not support %r type. " | ||
1771 | 213 | "Please provide or select a struct." % (action, tvalue)) | ||
1772 | 214 | else: | ||
1773 | 215 | die("Invalid argument.\n%s" % usage) | ||
1774 | 216 | |||
1775 | 217 | |||
1776 | 218 | if __name__ == "__main__": | ||
1777 | 219 | sys.exit(main(sys.argv[1:])) | ||
1778 | 0 | 220 | ||
1779 | === added directory 'mojo-spec-helpers/utils/tests' | |||
1780 | === added file 'mojo-spec-helpers/utils/tests/README.md' | |||
1781 | --- mojo-spec-helpers/utils/tests/README.md 1970-01-01 00:00:00 +0000 | |||
1782 | +++ mojo-spec-helpers/utils/tests/README.md 2015-09-08 08:23:55 +0000 | |||
1783 | @@ -0,0 +1,67 @@ | |||
1784 | 1 | Tests for python managers | ||
1785 | 2 | === | ||
1786 | 3 | |||
1787 | 4 | These are tests for `container_managers.py`, `cache_managers.py` | ||
1788 | 5 | and some functions in `mojo_utils.py`. | ||
1789 | 6 | |||
1790 | 7 | Setup | ||
1791 | 8 | --- | ||
1792 | 9 | |||
1793 | 10 | ### A test container | ||
1794 | 11 | |||
1795 | 12 | Tests for `container_managers.py` require a test container to be setup with objects named as follows: | ||
1796 | 13 | |||
1797 | 14 | - latest-build-label | ||
1798 | 15 | - deployed-build-label | ||
1799 | 16 | - deployed-spec-revno | ||
1800 | 17 | - code-upgrade-test-build-label-01-test-build-label-02-succeeded | ||
1801 | 18 | - mojo-run-155-succeeded | ||
1802 | 19 | |||
1803 | 20 | This container should be openly readable: | ||
1804 | 21 | |||
1805 | 22 | ``` bash | ||
1806 | 23 | swift post --read-acl .r:* ${TEST_CONTAINER_NAME} | ||
1807 | 24 | ``` | ||
1808 | 25 | |||
1809 | 26 | You should then set the URL for this container in the environment variable `TEST_CONTAINER_URL`: | ||
1810 | 27 | |||
1811 | 28 | ``` bash | ||
1812 | 29 | export TEST_CONTAINER_URL=$(swift stat -v ${TEST_CONTAINER_NAME} | egrep -o 'http.*$') | ||
1813 | 30 | ``` | ||
1814 | 31 | |||
1815 | 32 | ### A local swift account | ||
1816 | 33 | |||
1817 | 34 | You also need to make the credentials for connecting to a swift account available with `OS_*` environment variables: | ||
1818 | 35 | |||
1819 | 36 | ``` bash | ||
1820 | 37 | export OS_USERNAME=${USERNAME} | ||
1821 | 38 | export OS_TENANT_NAME=${TENANT_NAME} | ||
1822 | 39 | export OS_PASSWORD=${PASSWORD} | ||
1823 | 40 | export OS_STORAGE_URL=${OPTIONAL_STORAGE_URL} | ||
1824 | 41 | export OS_AUTH_URL=${AUTH_URL} | ||
1825 | 42 | export OS_REGION_NAME=${REGION_NAME} | ||
1826 | 43 | ``` | ||
1827 | 44 | |||
1828 | 45 | ### PYTHONPATH | ||
1829 | 46 | |||
1830 | 47 | You also need to add the parent directory to your python path to run the tests: | ||
1831 | 48 | |||
1832 | 49 | ``` bash | ||
1833 | 50 | export PYTHONPATH=.. | ||
1834 | 51 | ``` | ||
1835 | 52 | |||
1836 | 53 | Running the tests | ||
1837 | 54 | --- | ||
1838 | 55 | |||
1839 | 56 | You can either run the tests with native python: | ||
1840 | 57 | |||
1841 | 58 | ``` bash | ||
1842 | 59 | ./run_tests.py | ||
1843 | 60 | ``` | ||
1844 | 61 | |||
1845 | 62 | Or with `pytest`: | ||
1846 | 63 | |||
1847 | 64 | ``` bash | ||
1848 | 65 | py.test | ||
1849 | 66 | ``` | ||
1850 | 67 | |||
1851 | 0 | 68 | ||
1852 | === added file 'mojo-spec-helpers/utils/tests/run_tests.py' | |||
1853 | --- mojo-spec-helpers/utils/tests/run_tests.py 1970-01-01 00:00:00 +0000 | |||
1854 | +++ mojo-spec-helpers/utils/tests/run_tests.py 2015-09-08 08:23:55 +0000 | |||
1855 | @@ -0,0 +1,18 @@ | |||
1856 | 1 | #!/usr/bin/env python | ||
1857 | 2 | |||
1858 | 3 | """ | ||
1859 | 4 | These are tests for container_managers.py, cache_managers.py | ||
1860 | 5 | and some scripts in mojo_utils.py | ||
1861 | 6 | |||
1862 | 7 | To run the container_tests, you'll need to set several environment | ||
1863 | 8 | variables - see test_container_managers.py | ||
1864 | 9 | """ | ||
1865 | 10 | |||
1866 | 11 | from test_container_managers import container_tests | ||
1867 | 12 | from test_cache_managers import test_cache_managers | ||
1868 | 13 | from test_mojo_utils import test_get_mojo_spec_revno | ||
1869 | 14 | |||
1870 | 15 | |||
1871 | 16 | container_tests() | ||
1872 | 17 | test_cache_managers() | ||
1873 | 18 | test_get_mojo_spec_revno() | ||
1874 | 0 | 19 | ||
1875 | === added file 'mojo-spec-helpers/utils/tests/test_cache_managers.py' | |||
1876 | --- mojo-spec-helpers/utils/tests/test_cache_managers.py 1970-01-01 00:00:00 +0000 | |||
1877 | +++ mojo-spec-helpers/utils/tests/test_cache_managers.py 2015-09-08 08:23:55 +0000 | |||
1878 | @@ -0,0 +1,80 @@ | |||
1879 | 1 | # System imports | ||
1880 | 2 | import os | ||
1881 | 3 | import subprocess | ||
1882 | 4 | import json | ||
1883 | 5 | |||
1884 | 6 | # Local imports | ||
1885 | 7 | from cache_managers import JsonCache | ||
1886 | 8 | |||
1887 | 9 | |||
1888 | 10 | def _check_cache_file(cache, cache_path): | ||
1889 | 11 | # Check file exists | ||
1890 | 12 | assert os.path.isfile(cache_path), "Cache file not created" | ||
1891 | 13 | print u"\u2713 Cache file exists: {}".format(cache_path) | ||
1892 | 14 | |||
1893 | 15 | # Check the data | ||
1894 | 16 | with open(cache_path) as cache_file: | ||
1895 | 17 | assert json.load(cache_file) == cache.get_cache(), "Bad data" | ||
1896 | 18 | print u"\u2713 Cache file data is correct" | ||
1897 | 19 | |||
1898 | 20 | |||
1899 | 21 | def test_cache_managers(): | ||
1900 | 22 | print ( | ||
1901 | 23 | "\n===\n" | ||
1902 | 24 | "Test JsonCache" | ||
1903 | 25 | "\n===\n" | ||
1904 | 26 | ) | ||
1905 | 27 | |||
1906 | 28 | cache_path = subprocess.check_output( | ||
1907 | 29 | 'mktemp -u /tmp/cache-XXXX.json'.split() | ||
1908 | 30 | ).strip() | ||
1909 | 31 | |||
1910 | 32 | cache = JsonCache(cache_path=cache_path) | ||
1911 | 33 | |||
1912 | 34 | fake_data = { | ||
1913 | 35 | 'sentence': 'hello world', | ||
1914 | 36 | 'number': 12, | ||
1915 | 37 | 'array': [1, 2, 3, "fish"] | ||
1916 | 38 | } | ||
1917 | 39 | |||
1918 | 40 | # Insert items using "set" | ||
1919 | 41 | for key, value in fake_data.iteritems(): | ||
1920 | 42 | cache.set(key, value) | ||
1921 | 43 | |||
1922 | 44 | # Check data against inserted data | ||
1923 | 45 | assert fake_data == cache.get_cache(), "Data not set correctly" | ||
1924 | 46 | print u"\u2713 Data correctly inserted" | ||
1925 | 47 | |||
1926 | 48 | # Verify cache file | ||
1927 | 49 | _check_cache_file(cache, cache_path) | ||
1928 | 50 | |||
1929 | 51 | # Check retrieving each key with "get" | ||
1930 | 52 | for key, value in fake_data.iteritems(): | ||
1931 | 53 | assert cache.get(key) == value | ||
1932 | 54 | |||
1933 | 55 | print u"\u2713 Successfully retrieved items with 'get'" | ||
1934 | 56 | |||
1935 | 57 | # Check wiping the cache | ||
1936 | 58 | cache.wipe() | ||
1937 | 59 | |||
1938 | 60 | assert not os.path.isfile(cache_path), "Cache file shouldn't exist!" | ||
1939 | 61 | assert not cache.get(fake_data.keys()[0]), "Cache still returning data" | ||
1940 | 62 | print u"\u2713 Cache successfully wiped" | ||
1941 | 63 | |||
1942 | 64 | # Recreate cache with "put_cache" | ||
1943 | 65 | cache.put_cache(fake_data) | ||
1944 | 66 | |||
1945 | 67 | # Check data against inserted data | ||
1946 | 68 | assert fake_data == cache.get_cache() | ||
1947 | 69 | print u"\u2713 Data correctly inserted" | ||
1948 | 70 | |||
1949 | 71 | # Check file integrity | ||
1950 | 72 | _check_cache_file(cache, cache_path) | ||
1951 | 73 | |||
1952 | 74 | # Clean up | ||
1953 | 75 | cache.wipe() | ||
1954 | 76 | assert not os.path.isfile(cache_path), "Cache file shouldn't exist!" | ||
1955 | 77 | print u"\u2713 Deleted {}".format(cache_path) | ||
1956 | 78 | |||
1957 | 79 | if __name__ == "__main__": | ||
1958 | 80 | test_cache_managers() | ||
1959 | 0 | 81 | ||
1960 | === added file 'mojo-spec-helpers/utils/tests/test_container_managers.py' | |||
1961 | --- mojo-spec-helpers/utils/tests/test_container_managers.py 1970-01-01 00:00:00 +0000 | |||
1962 | +++ mojo-spec-helpers/utils/tests/test_container_managers.py 2015-09-08 08:23:55 +0000 | |||
1963 | @@ -0,0 +1,188 @@ | |||
1964 | 1 | # System | ||
1965 | 2 | import os | ||
1966 | 3 | |||
1967 | 4 | # Local imports | ||
1968 | 5 | from container_managers import ( | ||
1969 | 6 | BuildContainer, | ||
1970 | 7 | CIContainer, | ||
1971 | 8 | DeployedEnvironmentContainer, | ||
1972 | 9 | LocalEnvironmentSwiftContainer | ||
1973 | 10 | ) | ||
1974 | 11 | from mojo_utils import build_swift_connection | ||
1975 | 12 | |||
1976 | 13 | """ | ||
1977 | 14 | Tests for container_managers. | ||
1978 | 15 | |||
1979 | 16 | To test these you'll need to set a TEST_CONTAINER_URL, | ||
1980 | 17 | which should be the HTTP URL to a swift container which contains | ||
1981 | 18 | the following publicly readable objects: | ||
1982 | 19 | - latest-build-label | ||
1983 | 20 | - deployed-build-label | ||
1984 | 21 | - deployed-spec-revno | ||
1985 | 22 | - code-upgrade-test-build-label-01-test-build-label-02-succeeded | ||
1986 | 23 | - mojo-run-155-succeeded | ||
1987 | 24 | |||
1988 | 25 | You'll also need to have the following environment variables setup | ||
1989 | 26 | with credentials to connect to a valid (testing) Swift account: | ||
1990 | 27 | |||
1991 | 28 | OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_REGION_NAME | ||
1992 | 29 | """ | ||
1993 | 30 | |||
1994 | 31 | test_container_url = os.environ.get('TEST_CONTAINER_URL') | ||
1995 | 32 | |||
1996 | 33 | |||
1997 | 34 | def test_deployed_environment_container(): | ||
1998 | 35 | print ( | ||
1999 | 36 | "\n===\n" | ||
2000 | 37 | "Test DeployedEnvironmentContainer" | ||
2001 | 38 | "\n===\n" | ||
2002 | 39 | ) | ||
2003 | 40 | |||
2004 | 41 | deployed_env_container = DeployedEnvironmentContainer( | ||
2005 | 42 | container_url=test_container_url, | ||
2006 | 43 | ) | ||
2007 | 44 | |||
2008 | 45 | build_label = deployed_env_container.deployed_build_label() | ||
2009 | 46 | assert build_label, "Build label missing" | ||
2010 | 47 | print u"\u2713 Found build label: {}".format(build_label) | ||
2011 | 48 | |||
2012 | 49 | spec_revno = deployed_env_container.deployed_spec_revno() | ||
2013 | 50 | assert spec_revno, "Spec revno missing" | ||
2014 | 51 | print u"\u2713 Found spec revno (from web): {}".format(spec_revno) | ||
2015 | 52 | |||
2016 | 53 | |||
2017 | 54 | def test_local_environment_swift_container(): | ||
2018 | 55 | print ( | ||
2019 | 56 | "\n===\n" | ||
2020 | 57 | "Test LocalEnvironmentSwiftContainer" | ||
2021 | 58 | "\n===\n" | ||
2022 | 59 | ) | ||
2023 | 60 | |||
2024 | 61 | swift_connection = build_swift_connection() | ||
2025 | 62 | |||
2026 | 63 | container_name = 'test-container' | ||
2027 | 64 | |||
2028 | 65 | swift_connection.put_container(container_name) | ||
2029 | 66 | |||
2030 | 67 | container = LocalEnvironmentSwiftContainer( | ||
2031 | 68 | swift_connection=swift_connection, | ||
2032 | 69 | container_name=container_name | ||
2033 | 70 | ) | ||
2034 | 71 | |||
2035 | 72 | deployed_build_label = "fiddlesticksandfishes" | ||
2036 | 73 | previous_build_label = "fishesandfiddlesticks" | ||
2037 | 74 | deployed_spec_revno = '111' | ||
2038 | 75 | |||
2039 | 76 | container.save_deployed_build_label(deployed_build_label) | ||
2040 | 77 | container.save_previous_build_label(previous_build_label) | ||
2041 | 78 | container.save_mojo_spec_revno(deployed_spec_revno) | ||
2042 | 79 | |||
2043 | 80 | assert container.deployed_build_label() == deployed_build_label | ||
2044 | 81 | assert container.previous_build_label() == previous_build_label | ||
2045 | 82 | assert container.deployed_spec_revno() == deployed_spec_revno | ||
2046 | 83 | print u"\u2713 Saved and retrieved build_labels and spec_revno" | ||
2047 | 84 | |||
2048 | 85 | # Get names for objects | ||
2049 | 86 | previous_obj = container.previous_build_obj | ||
2050 | 87 | deployed_obj = container.deployed_build_obj | ||
2051 | 88 | revno_obj = container.deployed_revno_obj | ||
2052 | 89 | upgrade_object = container.code_upgrade_succeeded_template.format( | ||
2053 | 90 | previous_build_label, deployed_build_label | ||
2054 | 91 | ) | ||
2055 | 92 | run_object = container.mojo_run_succeeded_template.format( | ||
2056 | 93 | deployed_spec_revno | ||
2057 | 94 | ) | ||
2058 | 95 | |||
2059 | 96 | # Remove the objects if they exist | ||
2060 | 97 | for object_name in [previous_obj, deployed_obj, revno_obj, upgrade_object]: | ||
2061 | 98 | try: | ||
2062 | 99 | swift_connection.delete_object(container_name, object_name) | ||
2063 | 100 | except: | ||
2064 | 101 | pass | ||
2065 | 102 | |||
2066 | 103 | container.save_code_upgrade_succeeded( | ||
2067 | 104 | previous_build_label, deployed_build_label | ||
2068 | 105 | ) | ||
2069 | 106 | |||
2070 | 107 | container.save_mojo_run_succeeded(deployed_spec_revno) | ||
2071 | 108 | |||
2072 | 109 | upgrade_head = swift_connection.head_object(container_name, upgrade_object) | ||
2073 | 110 | |||
2074 | 111 | run_head = swift_connection.head_object(container_name, run_object) | ||
2075 | 112 | |||
2076 | 113 | # Make sure the objects were saved to swift | ||
2077 | 114 | assert int(upgrade_head['content-length']) > 0 | ||
2078 | 115 | assert int(run_head['content-length']) > 0 | ||
2079 | 116 | |||
2080 | 117 | print u"\u2713 Code upgrade and mojo run successfully saved" | ||
2081 | 118 | |||
2082 | 119 | |||
2083 | 120 | def test_latest_build(): | ||
2084 | 121 | print ( | ||
2085 | 122 | "\n===\n" | ||
2086 | 123 | "Test BuildContainer" | ||
2087 | 124 | "\n===\n" | ||
2088 | 125 | ) | ||
2089 | 126 | |||
2090 | 127 | build_container = BuildContainer( | ||
2091 | 128 | container_url=test_container_url | ||
2092 | 129 | ) | ||
2093 | 130 | |||
2094 | 131 | build_label = build_container.latest_build_label() | ||
2095 | 132 | assert build_label, "Build label missing" | ||
2096 | 133 | print u"\u2713 Found latest build label: {}".format(build_label) | ||
2097 | 134 | |||
2098 | 135 | |||
2099 | 136 | def test_ci_container(): | ||
2100 | 137 | print ( | ||
2101 | 138 | "\n===\n" | ||
2102 | 139 | "Test CIContainer" | ||
2103 | 140 | "\n===\n" | ||
2104 | 141 | ) | ||
2105 | 142 | |||
2106 | 143 | ci_container = CIContainer( | ||
2107 | 144 | container_url=test_container_url | ||
2108 | 145 | ) | ||
2109 | 146 | |||
2110 | 147 | build_one = "test-build-label-01" | ||
2111 | 148 | build_two = "test-build-label-02" | ||
2112 | 149 | |||
2113 | 150 | good_upgrade = ci_container.has_code_upgrade_been_tested( | ||
2114 | 151 | from_build_label=build_one, | ||
2115 | 152 | to_build_label=build_two | ||
2116 | 153 | ) | ||
2117 | 154 | bad_upgrade = ci_container.has_code_upgrade_been_tested( | ||
2118 | 155 | from_build_label=build_two, | ||
2119 | 156 | to_build_label=build_one | ||
2120 | 157 | ) | ||
2121 | 158 | |||
2122 | 159 | good_upgrade = ci_container.has_code_upgrade_been_tested( | ||
2123 | 160 | from_build_label=build_one, | ||
2124 | 161 | to_build_label=build_two | ||
2125 | 162 | ) | ||
2126 | 163 | bad_upgrade = ci_container.has_code_upgrade_been_tested( | ||
2127 | 164 | from_build_label=build_two, | ||
2128 | 165 | to_build_label=build_one | ||
2129 | 166 | ) | ||
2130 | 167 | |||
2131 | 168 | assert good_upgrade, "Good upgrade test missing" | ||
2132 | 169 | assert bad_upgrade is False, "Bad upgrade isn't false" | ||
2133 | 170 | print u"\u2713 Upgrades checked successfully" | ||
2134 | 171 | |||
2135 | 172 | good_mojo_test = ci_container.has_mojo_run_been_tested(spec_revno="155") | ||
2136 | 173 | bad_mojo_test = ci_container.has_mojo_run_been_tested(spec_revno="99999") | ||
2137 | 174 | |||
2138 | 175 | assert good_mojo_test, "Good mojo test missing" | ||
2139 | 176 | assert bad_mojo_test is False, "Bad mojo test isn't false" | ||
2140 | 177 | print u"\u2713 Mojo runs checked succeeded" | ||
2141 | 178 | |||
2142 | 179 | |||
2143 | 180 | def container_tests(): | ||
2144 | 181 | test_deployed_environment_container() | ||
2145 | 182 | test_local_environment_swift_container() | ||
2146 | 183 | test_latest_build() | ||
2147 | 184 | test_ci_container() | ||
2148 | 185 | |||
2149 | 186 | |||
2150 | 187 | if __name__ == "__main__": | ||
2151 | 188 | container_tests() | ||
2152 | 0 | 189 | ||
2153 | === added file 'mojo-spec-helpers/utils/tests/test_mojo_utils.py' | |||
2154 | --- mojo-spec-helpers/utils/tests/test_mojo_utils.py 1970-01-01 00:00:00 +0000 | |||
2155 | +++ mojo-spec-helpers/utils/tests/test_mojo_utils.py 2015-09-08 08:23:55 +0000 | |||
2156 | @@ -0,0 +1,23 @@ | |||
2157 | 1 | # System imports | ||
2158 | 2 | import os | ||
2159 | 3 | |||
2160 | 4 | # Loca imports | ||
2161 | 5 | from mojo_utils import get_mojo_spec_revno | ||
2162 | 6 | |||
2163 | 7 | |||
2164 | 8 | def test_get_mojo_spec_revno(): | ||
2165 | 9 | print ( | ||
2166 | 10 | "\n===\n" | ||
2167 | 11 | "Test get_mojo_spec_revno" | ||
2168 | 12 | "\n===\n" | ||
2169 | 13 | ) | ||
2170 | 14 | |||
2171 | 15 | os.environ['MOJO_SPEC_DIR'] = os.path.abspath( | ||
2172 | 16 | __file__ + '/../../../..' | ||
2173 | 17 | ) | ||
2174 | 18 | |||
2175 | 19 | assert get_mojo_spec_revno().isdigit() | ||
2176 | 20 | print u"\u2713 Successfully retrieved revno" | ||
2177 | 21 | |||
2178 | 22 | if __name__ == "__main__": | ||
2179 | 23 | test_get_mojo_spec_revno() |
I'm going to test this in CI first before putting this MP up for review.