Merge lp:~mthaddon/mojo/juju-intro-mojo-specs into lp:mojo/mojo-specs

Proposed by Tom Haddon
Status: Merged
Merged at revision: 9
Proposed branch: lp:~mthaddon/mojo/juju-intro-mojo-specs
Merge into: lp:mojo/mojo-specs
Diff against target: 2179 lines (+2033/-27)
21 files modified
juju-intro/README (+5/-0)
juju-intro/collect (+5/-0)
juju-intro/deploy (+14/-0)
juju-intro/manifest (+10/-0)
juju-intro/manifest-verify (+4/-0)
juju-intro/verify-installed (+10/-0)
mojo-how-to/devel/verify (+0/-26)
mojo-how-to/manifest-verify (+3/-1)
mojo-spec-helpers/tests/check-juju (+5/-0)
mojo-spec-helpers/tests/verify-nrpe (+37/-0)
mojo-spec-helpers/utils/add-floating-ip (+114/-0)
mojo-spec-helpers/utils/cache_managers.py (+56/-0)
mojo-spec-helpers/utils/container_managers.py (+265/-0)
mojo-spec-helpers/utils/mojo_os_utils.py (+554/-0)
mojo-spec-helpers/utils/mojo_utils.py (+356/-0)
mojo-spec-helpers/utils/shyaml.py (+219/-0)
mojo-spec-helpers/utils/tests/README.md (+67/-0)
mojo-spec-helpers/utils/tests/run_tests.py (+18/-0)
mojo-spec-helpers/utils/tests/test_cache_managers.py (+80/-0)
mojo-spec-helpers/utils/tests/test_container_managers.py (+188/-0)
mojo-spec-helpers/utils/tests/test_mojo_utils.py (+23/-0)
To merge this branch: bzr merge lp:~mthaddon/mojo/juju-intro-mojo-specs
Reviewer Review Type Date Requested Status
Paul Collins Approve
Review via email: mp+270005@code.launchpad.net

Description of the change

Add a "juju-intro" service which uses the charms described on https://jujucharms.com/docs/stable/getting-started so that we can test these actually work in CI

To post a comment you must log in.
Revision history for this message
Tom Haddon (mthaddon) wrote :

I'm going to test this in CI first before putting this MP up for review.

Revision history for this message
Tom Haddon (mthaddon) wrote :

This is now ready for review. Has run as follows: http://paste.ubuntu.com/12272171/

70. By Tom Haddon

Just use the current promulgated series of each charm

71. By Tom Haddon

Add an e2e check for the site being up after initial install

Revision history for this message
Paul Collins (pjdc) wrote :

Approving, although I noticed:
 - running this spec with the local provider fails here due to mysql's default dataset-size of 80%; not sure how likely it is for someone to be using it, however
 - mojo-spec-helpers may need another refresh (or is itself stale); add-floating-ip was rewritten in 100% Python recently (see internal trunk)

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== added directory 'juju-intro'
=== added file 'juju-intro/README'
--- juju-intro/README 1970-01-01 00:00:00 +0000
+++ juju-intro/README 2015-09-08 08:23:55 +0000
@@ -0,0 +1,5 @@
1This spec contains the services that are included in the introduction to Juju
2as part of [1], and is intended to be used with CI to confirm that the charms
3we're pointing new users at for Juju are always in a working state.
4
5[1] https://jujucharms.com/docs/stable/getting-started
06
=== added symlink 'juju-intro/check-juju'
=== target is u'../mojo-spec-helpers/tests/check-juju'
=== added file 'juju-intro/collect'
--- juju-intro/collect 1970-01-01 00:00:00 +0000
+++ juju-intro/collect 2015-09-08 08:23:55 +0000
@@ -0,0 +1,5 @@
1wordpress lp:charms/wordpress
2mysql lp:charms/mysql
3
4# subordinates
5nrpe lp:charms/nrpe-external-master
06
=== added file 'juju-intro/deploy'
--- juju-intro/deploy 1970-01-01 00:00:00 +0000
+++ juju-intro/deploy 2015-09-08 08:23:55 +0000
@@ -0,0 +1,14 @@
1wordpress:
2 series: {{ series }}
3 services:
4 wordpress:
5 charm: wordpress
6 expose: true
7 mysql:
8 charm: mysql
9 nrpe:
10 charm: nrpe
11 relations:
12 - ["wordpress", "mysql"]
13 - ["wordpress", "nrpe"]
14 - ["mysql", "nrpe"]
015
=== added file 'juju-intro/manifest'
--- juju-intro/manifest 1970-01-01 00:00:00 +0000
+++ juju-intro/manifest 2015-09-08 08:23:55 +0000
@@ -0,0 +1,10 @@
1collect
2deploy delay=0
3include config=manifest-verify
4
5## This isn't included in the main verify manifest because after we've installed
6## the service this won't work as expected so it's a one time thing for the
7## initial deployment
8
9# Verify the site is installed
10verify config=verify-installed
011
=== added file 'juju-intro/manifest-verify'
--- juju-intro/manifest-verify 1970-01-01 00:00:00 +0000
+++ juju-intro/manifest-verify 2015-09-08 08:23:55 +0000
@@ -0,0 +1,4 @@
1# Checking juju status
2verify config=check-juju
3# Running all naigos checks to confirm service is working as expected
4verify config=verify-nrpe
05
=== added file 'juju-intro/verify-installed'
--- juju-intro/verify-installed 1970-01-01 00:00:00 +0000
+++ juju-intro/verify-installed 2015-09-08 08:23:55 +0000
@@ -0,0 +1,10 @@
1#!/bin/bash
2
3set -e
4set -u
5
6# Check the service is actually up and we can get to the install page
7# This isn't included in the main verify manifest because after we've installed
8# the service this won't work as expected so it's a one time thing for the
9# initial deployment
10juju ssh wordpress/0 "/usr/lib/nagios/plugins/check_http -I 127.0.0.1 -H localhost -f follow -s '<title>WordPress &rsaquo; Installation</title>'"
011
=== added symlink 'juju-intro/verify-nrpe'
=== target is u'../mojo-spec-helpers/tests/verify-nrpe'
=== removed file 'mojo-how-to/devel/verify'
--- mojo-how-to/devel/verify 2015-01-26 16:23:38 +0000
+++ mojo-how-to/devel/verify 1970-01-01 00:00:00 +0000
@@ -1,26 +0,0 @@
1#!/bin/bash
2
3set -e
4
5# If we have any etc bzr nagios checks, we need to wait up to 15 minutes
6# for the cron to run to populate the check file, so just ignore those
7NAGIOS_OUTPUT=$(juju status | sed -rn 's/^ {8}public-address: //p'| xargs -I% ssh ubuntu@% 'egrep -oh /usr.*lib.* /etc/nagios/nrpe.d/check_* |grep -v check_etc_bzr.py |sed "s/.*/(set -x; &) || echo MOJO_NAGIOS_FAIL /"|sudo -u nagios -s bash |& sed "s/^/%: /"' 2>/dev/null)
8
9echo "${NAGIOS_OUTPUT}"
10
11NAGIOS_FAIL=$(echo "${NAGIOS_OUTPUT}" | grep MOJO_NAGIOS_FAIL) || true
12
13if [ -n "${NAGIOS_FAIL}" ]; then
14 echo "########################"
15 echo "# Nagios Checks Failed #"
16 echo "########################"
17 exit 1
18else
19 echo "########################"
20 echo "# Nagios Checks Passed #"
21 echo "########################"
22fi
23
24echo "#########################"
25echo "# Successfully verified #"
26echo "#########################"
270
=== modified file 'mojo-how-to/manifest-verify'
--- mojo-how-to/manifest-verify 2015-01-21 15:39:29 +0000
+++ mojo-how-to/manifest-verify 2015-09-08 08:23:55 +0000
@@ -1,2 +1,4 @@
1# Check juju
2verify config=check-juju
1# The service is up and running, let's verify it3# The service is up and running, let's verify it
2verify4verify config=verify-nrpe
35
=== removed symlink 'mojo-how-to/production/verify'
=== target was u'../devel/verify'
=== added directory 'mojo-spec-helpers'
=== added directory 'mojo-spec-helpers/tests'
=== added file 'mojo-spec-helpers/tests/check-juju'
--- mojo-spec-helpers/tests/check-juju 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/tests/check-juju 2015-09-08 08:23:55 +0000
@@ -0,0 +1,5 @@
1#!/usr/bin/python
2import utils.mojo_utils as mojo_utils
3
4mojo_utils.juju_check_hooks_complete()
5mojo_utils.juju_status_check_and_wait()
06
=== added symlink 'mojo-spec-helpers/tests/utils'
=== target is u'../utils'
=== added file 'mojo-spec-helpers/tests/verify-nrpe'
--- mojo-spec-helpers/tests/verify-nrpe 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/tests/verify-nrpe 2015-09-08 08:23:55 +0000
@@ -0,0 +1,37 @@
1#!/bin/bash
2
3set -e
4
5# If we have any etc bzr nagios checks, we need to wait up to 15 minutes
6# for the cron to run to populate the check file, so just ignore those
7check() {
8 juju ssh $1 'egrep -oh /usr.*lib.* /etc/nagios/nrpe.d/check_* |\
9 grep -v check_etc_bzr.py |sed "s/.*/(set -x; &) || \
10 echo MOJO_NAGIOS_FAIL /"|sudo -u nagios -s bash' 2>/dev/null
11}
12
13NRPE_UNITS=$(juju status | sed -rn 's/^ *(nrpe\/[0-9]*):$/\1/p')
14NAGIOS_OUTPUT=$(
15 for unit in $NRPE_UNITS; do
16 check $unit | sed --e "s#^#$unit: #"
17 done
18)
19
20echo "${NAGIOS_OUTPUT}"
21
22NAGIOS_FAIL=$(echo "${NAGIOS_OUTPUT}" | grep MOJO_NAGIOS_FAIL) || true
23
24if [ -n "${NAGIOS_FAIL}" ]; then
25 echo "########################"
26 echo "# Nagios Checks Failed #"
27 echo "########################"
28 exit 1
29else
30 echo "########################"
31 echo "# Nagios Checks Passed #"
32 echo "########################"
33fi
34
35echo "########################"
36echo "# Succesfully verified #"
37echo "########################"
038
=== added directory 'mojo-spec-helpers/utils'
=== added file 'mojo-spec-helpers/utils/__init__.py'
=== added file 'mojo-spec-helpers/utils/add-floating-ip'
--- mojo-spec-helpers/utils/add-floating-ip 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/add-floating-ip 2015-09-08 08:23:55 +0000
@@ -0,0 +1,114 @@
1#!/bin/sh
2#
3# Author: Paul Gear
4# Description: Manage floating IP allocations in mojo local directory for a juju service or unit.
5# NOTE: $MOJO_PROJECT and $MOJO_STAGE must be set before calling this script.
6#
7
8set -e
9set -u
10
11
12SECRETS_DIR="/srv/mojo/LOCAL/$MOJO_PROJECT/$MOJO_STAGE/"
13
14# Echo to standard error
15# Useful for printing from functions without polluting the "returned" output
16echo_stderr() { echo "$@" 1>&2; }
17
18# print the juju unit followed by the machine instance id
19get_juju_units()
20{
21 juju status "$@" | python -c '
22import sys, yaml
23status = yaml.safe_load(sys.stdin)
24for serv in status["services"]:
25 if status["services"][serv].get("units"):
26 for unit in status["services"][serv]["units"]:
27 machine = status["services"][serv]["units"][unit]["machine"]
28 instance = status["machines"][machine]["instance-id"]
29 print unit, instance
30'
31}
32
33
34# get the existing floating IP for this unit, or create a new one
35get_unit_floating_ip()
36{
37 UNIT="$1"
38 MACHINE="$2"
39 UNIT_FILE_NAME=$(echo "$UNIT" | tr '/' '_')
40 FLOATING_IP_FILE="$SECRETS_DIR/$UNIT_FILE_NAME"
41 IP=""
42 if [ -s "$FLOATING_IP_FILE" ]; then
43 IP=$(head -n 1 "$FLOATING_IP_FILE")
44
45 echo_stderr "- Found IP "$IP" for "$UNIT
46
47 # check how the IP is used now
48 ALLOCATION=$(nova floating-ip-list | awk -v IP="$IP" '$2 == IP {print $5}')
49 case $ALLOCATION in
50 "-")
51 # unallocated - we can use it
52 echo_stderr "- IP "$IP" is currently unallocated"
53 ;;
54 "")
55 # non-existent - we'll create one below
56 IP=""
57 echo_stderr "- No IP found for "$UNIT
58 ;;
59 *)
60 # allocated to a unit already
61 if nova show $MACHINE | awk '$3 == "network"' | sed -re 's/,? +/\n/g' | grep -q "^$IP$"; then
62 # it's allocated to us; do nothing
63 echo_stderr "- IP "$IP" is already allocated to "$UNIT
64 return
65 fi
66 # it's allocated to another unit - create one below
67 IP=""
68 echo_stderr "- IP "$IP" is allocated to another unit"
69 ;;
70 esac
71 fi
72 if [ -z "$IP" ]; then
73 IP=$(nova floating-ip-create | grep -wo '[0-9.a-f:]*')
74 echo "$IP" > "$FLOATING_IP_FILE"
75 echo_stderr "- Created new IP "$IP
76 fi
77 echo "$IP"
78}
79
80
81usage()
82{
83 cat <<EOF
84Usage: $0 {service|unit}
85
86# Add a floating IP to the apache2/0 unit:
87add-floating-ip apache2/0
88
89# Add floating IPs to all units the jenkins-slave service:
90add-floating-ip jenkins-slave
91
92# Add floating IPs to all units in the haproxy and squid services:
93add-floating-ip haproxy squid
94
95EOF
96 exit 2
97}
98
99
100if [ "$#" -lt 1 ]; then
101 usage
102fi
103
104for i in "$@"; do
105 get_juju_units "$i" | while read unit machine; do
106 echo_stderr ""
107 echo_stderr "Assigning IPs for "$unit
108 IP=$(get_unit_floating_ip "$unit" "$machine")
109 if [ -n "$IP" ]; then
110 echo_stderr "- Assigning IP "$IP" to "$unit
111 nova floating-ip-associate "$machine" "$IP"
112 fi
113 done
114done
0115
=== added file 'mojo-spec-helpers/utils/cache_managers.py'
--- mojo-spec-helpers/utils/cache_managers.py 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/cache_managers.py 2015-09-08 08:23:55 +0000
@@ -0,0 +1,56 @@
1# System
2import os
3import json
4
5
6class JsonCache:
7 """
8 Store key value pairs in a JSON file
9 """
10
11 def __init__(self, cache_path):
12 self.cache_path = cache_path
13
14 def get_cache(self):
15 """
16 Get the dictionary from the cache file
17 or if it doesn't exist, return an empty dictionary
18 """
19
20 if os.path.isfile(self.cache_path):
21 with open(self.cache_path) as cache_file:
22 return json.load(cache_file)
23 else:
24 return {}
25
26 def put_cache(self, cache):
27 """
28 Save a dictionary to the JSON cache file
29 """
30
31 with open(self.cache_path, 'w') as cache_file:
32 json.dump(cache, cache_file)
33
34 def set(self, name, value):
35 """
36 Set a key value pair to the cache
37 """
38
39 cache = self.get_cache()
40 cache[name] = value
41 self.put_cache(cache)
42
43 def get(self, name):
44 """
45 Retrieve a value from the cache by key
46 """
47
48 return self.get_cache().get(name)
49
50 def wipe(self):
51 """
52 Remove the cache file
53 """
54
55 if os.path.isfile(self.cache_path):
56 os.remove(self.cache_path)
057
=== added file 'mojo-spec-helpers/utils/container_managers.py'
--- mojo-spec-helpers/utils/container_managers.py 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/container_managers.py 2015-09-08 08:23:55 +0000
@@ -0,0 +1,265 @@
1# Modules
2import requests
3
4
5class LocalEnvironmentSwiftContainer:
6 """
7 Manage a Swift container for the local deployment environment
8 for storing deployment settings
9
10 swift_connection: A working swiftclient.client.Connection object
11 container_name: The name of the swift container to use
12 """
13
14 previous_build_obj = 'previous-build-label'
15 deployed_build_obj = 'deployed-build-label'
16 deployed_revno_obj = 'deployed-spec-revno'
17 code_upgrade_succeeded_template = 'code-upgrade-{}-{}-succeeded'
18 mojo_run_succeeded_template = 'mojo-run-{}-succeeded'
19
20 def __init__(self, swift_connection, container_name):
21 self.swift_connection = swift_connection
22 self.container_name = container_name
23
24 def previous_build_label(self):
25 """
26 Get the build label that was previously deployed on this environment
27 From the Swift account associated with this environment
28 through environment variables "OS_AUTH_URL" etc.
29 """
30
31 return self.swift_connection.get_object(
32 container=self.container_name,
33 obj=self.previous_build_obj
34 )[1].strip()
35
36 def deployed_build_label(self):
37 """
38 Get the build label that is currently deployed on this environment
39 From the Swift account associated with this environment
40 through environment variables "OS_AUTH_URL" etc.
41 """
42
43 return self.swift_connection.get_object(
44 container=self.container_name,
45 obj=self.deployed_build_obj
46 )[1].strip()
47
48 def deployed_spec_revno(self):
49 """
50 Get the mojo spec revision number that is currently deployed
51 on this environment from the Swift account associated with this
52 environment through environment variables "OS_AUTH_URL" etc.
53 """
54
55 return self.swift_connection.get_object(
56 container=self.container_name,
57 obj=self.deployed_revno_obj
58 )[1].strip()
59
60 def save_code_upgrade_succeeded(
61 self, from_build_label, to_build_label
62 ):
63 """
64 Save an object into Swift
65 to signify that a code-upgrade succeeded
66 from one build_label to another
67 """
68
69 upgrade_object_name = self.code_upgrade_succeeded_template.format(
70 from_build_label, to_build_label
71 )
72
73 self.swift_connection.put_object(
74 container=self.container_name,
75 obj=upgrade_object_name,
76 contents='true'
77 )
78
79 def save_mojo_run_succeeded(self, spec_revno):
80 """
81 Save an object into Swift
82 to signifiy that a mojo-run succeeded with
83 a given revision number of the mojo spec
84 """
85
86 run_object_name = self.mojo_run_succeeded_template.format(
87 spec_revno
88 )
89
90 self.swift_connection.put_object(
91 container=self.container_name,
92 obj=run_object_name,
93 contents='true'
94 )
95
96 def save_deployed_build_label(self, build_label):
97 """
98 Save an object into swift containing
99 the build_label which is deployed to this environment
100 """
101
102 self.swift_connection.put_object(
103 container=self.container_name,
104 obj='deployed-build-label',
105 contents=build_label
106 )
107
108 def save_previous_build_label(self, build_label):
109 """
110 Save an object into swift
111 to record the previously deployed build_label
112 """
113
114 self.swift_connection.put_object(
115 container=self.container_name,
116 obj='previous-build-label',
117 contents=build_label
118 )
119
120 def save_mojo_spec_revno(self, spec_revno):
121 """
122 Save an object into swift containing
123 the current mojo spec revision number
124 """
125
126 self.swift_connection.put_object(
127 container=self.container_name,
128 obj='deployed-spec-revno',
129 contents=spec_revno
130 )
131
132
133class HttpContainer:
134 """
135 Methods for retrieving objects
136 from a Swift HTTP storage container
137 """
138
139 def __init__(self, container_url):
140 """
141 container_url: The storage URL path for the swift container
142 """
143
144 self.container_url = container_url
145
146 def get(self, object_name):
147 """
148 Retrieve the contents of an object
149 """
150
151 object_url = '{}/{}'.format(self.container_url, object_name)
152
153 response = requests.get(object_url)
154
155 try:
156 response.raise_for_status()
157 except requests.exceptions.HTTPError as http_error:
158 http_error.message += '. URL: {}'.format(object_url)
159 http_error.args += ('URL: {}'.format(object_url),)
160 raise http_error
161
162 response.raise_for_status()
163
164 return response.text
165
166 def head(self, object_name):
167 """
168 Retrieve the HEAD of an object
169 """
170
171 object_url = '{}/{}'.format(self.container_url, object_name)
172
173 return requests.head(object_url)
174
175 def exists(self, object_name):
176 """
177 Check an object exists
178 """
179
180 return self.head(object_name).ok
181
182
183class CIContainer(HttpContainer):
184 """
185 Methods for retrieving continuous integration
186 resources from an http swift store
187 """
188
189 code_upgrade_succeeded_template = 'code-upgrade-{}-{}-succeeded'
190 mojo_run_succeeded_template = 'mojo-run-{}-succeeded'
191
192 def has_code_upgrade_been_tested(
193 self,
194 from_build_label,
195 to_build_label
196 ):
197 """
198 Check if a specific code upgrade has been tested
199 (from one build_label to another)
200 by checking if a specially named Swift object exists
201 in the CI system's Swift HTTP store.
202 (This object will have been created
203 by self.save_code_upgrade_succeeded)
204 """
205
206 return self.exists(
207 self.code_upgrade_succeeded_template.format(
208 from_build_label,
209 to_build_label
210 )
211 )
212
213 def has_mojo_run_been_tested(self, spec_revno):
214 """
215 Check if a specific mojo spec revision number has been tested
216 by checking if a specially named Swift object exists
217 in the CI system's Swift HTTP store.
218 (This object will have been created by self.save_mojo_run_succeeded)
219 """
220
221 return self.exists(
222 self.mojo_run_succeeded_template.format(
223 spec_revno
224 )
225 )
226
227
228class BuildContainer(HttpContainer):
229 """
230 Methods for interacting with the HTTP Swift store
231 containing code builds
232 """
233
234 def latest_build_label(self):
235 """
236 Get the latest build label from the webteam's Swift HTTP object store
237 """
238
239 return self.get('latest-build-label').strip()
240
241
242class DeployedEnvironmentContainer(HttpContainer):
243 """
244 Methods for requesting information
245 from the HTTP swift store for a
246 depoyed environment (e.g. production, staging)
247 """
248
249 def deployed_build_label(self):
250 """
251 Get the build label which was most recently
252 deployed to this environment
253 from their Swift HTTP object store
254 """
255
256 return self.get('deployed-build-label').strip()
257
258 def deployed_spec_revno(self):
259 """
260 Get the version of the mojo spec
261 which was most recently run on this environment
262 from their Swift HTTP object store
263 """
264
265 return self.get('deployed-spec-revno').strip()
0266
=== added file 'mojo-spec-helpers/utils/mojo_os_utils.py'
--- mojo-spec-helpers/utils/mojo_os_utils.py 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/mojo_os_utils.py 2015-09-08 08:23:55 +0000
@@ -0,0 +1,554 @@
1#!/usr/bin/python
2
3import swiftclient
4import glanceclient
5from keystoneclient.v2_0 import client as keystoneclient
6import mojo_utils
7from novaclient.v1_1 import client as novaclient
8from neutronclient.v2_0 import client as neutronclient
9import logging
10import re
11import sys
12import tempfile
13import urllib
14import os
15import time
16import subprocess
17import paramiko
18import StringIO
19
20
21# Openstack Client helpers
22def get_nova_creds(cloud_creds):
23 auth = {
24 'username': cloud_creds['OS_USERNAME'],
25 'api_key': cloud_creds['OS_PASSWORD'],
26 'auth_url': cloud_creds['OS_AUTH_URL'],
27 'project_id': cloud_creds['OS_TENANT_NAME'],
28 'region_name': cloud_creds['OS_REGION_NAME'],
29 }
30 return auth
31
32
33def get_ks_creds(cloud_creds):
34 auth = {
35 'username': cloud_creds['OS_USERNAME'],
36 'password': cloud_creds['OS_PASSWORD'],
37 'auth_url': cloud_creds['OS_AUTH_URL'],
38 'tenant_name': cloud_creds['OS_TENANT_NAME'],
39 'region_name': cloud_creds['OS_REGION_NAME'],
40 }
41 return auth
42
43
44def get_swift_creds(cloud_creds):
45 auth = {
46 'user': cloud_creds['OS_USERNAME'],
47 'key': cloud_creds['OS_PASSWORD'],
48 'authurl': cloud_creds['OS_AUTH_URL'],
49 'tenant_name': cloud_creds['OS_TENANT_NAME'],
50 'auth_version': '2.0',
51 }
52 return auth
53
54
55def get_nova_client(novarc_creds):
56 nova_creds = get_nova_creds(novarc_creds)
57 return novaclient.Client(**nova_creds)
58
59
60def get_neutron_client(novarc_creds):
61 neutron_creds = get_ks_creds(novarc_creds)
62 return neutronclient.Client(**neutron_creds)
63
64
65def get_keystone_client(novarc_creds):
66 keystone_creds = get_ks_creds(novarc_creds)
67 keystone_creds['insecure'] = True
68 return keystoneclient.Client(**keystone_creds)
69
70
71def get_swift_client(novarc_creds, insecure=True):
72 swift_creds = get_swift_creds(novarc_creds)
73 swift_creds['insecure'] = insecure
74 return swiftclient.client.Connection(**swift_creds)
75
76
77def get_glance_client(novarc_creds):
78 kc = get_keystone_client(novarc_creds)
79 glance_endpoint = kc.service_catalog.url_for(service_type='image',
80 endpoint_type='publicURL')
81 return glanceclient.Client('1', glance_endpoint, token=kc.auth_token)
82
83
84# Glance Helpers
85def download_image(image, image_glance_name=None):
86 logging.info('Downloading ' + image)
87 tmp_dir = tempfile.mkdtemp(dir='/tmp')
88 if not image_glance_name:
89 image_glance_name = image.split('/')[-1]
90 local_file = os.path.join(tmp_dir, image_glance_name)
91 urllib.urlretrieve(image, local_file)
92 return local_file
93
94
95def upload_image(gclient, ifile, image_name, public, disk_format,
96 container_format):
97 logging.info('Uploading %s to glance ' % (image_name))
98 with open(ifile) as fimage:
99 gclient.images.create(
100 name=image_name,
101 is_public=public,
102 disk_format=disk_format,
103 container_format=container_format,
104 data=fimage)
105
106
107def get_images_list(gclient):
108 return [image.name for image in gclient.images.list()]
109
110
111# Keystone helpers
112def tenant_create(kclient, tenants):
113 current_tenants = [tenant.name for tenant in kclient.tenants.list()]
114 for tenant in tenants:
115 if tenant in current_tenants:
116 logging.warning('Not creating tenant %s it already'
117 'exists' % (tenant))
118 else:
119 logging.info('Creating tenant %s' % (tenant))
120 kclient.tenants.create(tenant_name=tenant)
121
122
123def user_create(kclient, users):
124 current_users = [user.name for user in kclient.users.list()]
125 for user in users:
126 if user['username'] in current_users:
127 logging.warning('Not creating user %s it already'
128 'exists' % (user['username']))
129 else:
130 logging.info('Creating user %s' % (user['username']))
131 tenant_id = get_tenant_id(kclient, user['tenant'])
132 kclient.users.create(name=user['username'],
133 password=user['password'],
134 email=user['email'],
135 tenant_id=tenant_id)
136
137
138def get_roles_for_user(kclient, user_id, tenant_id):
139 roles = []
140 ksuser_roles = kclient.roles.roles_for_user(user_id, tenant_id)
141 for role in ksuser_roles:
142 roles.append(role.id)
143 return roles
144
145
146def add_users_to_roles(kclient, users):
147 for user_details in users:
148 tenant_id = get_tenant_id(kclient, user_details['tenant'])
149 for role_name in user_details['roles']:
150 role = kclient.roles.find(name=role_name)
151 user = kclient.users.find(name=user_details['username'])
152 users_roles = get_roles_for_user(kclient, user, tenant_id)
153 if role.id in users_roles:
154 logging.warning('Not adding role %s to %s it already has '
155 'it' % (user_details['username'], role_name))
156 else:
157 logging.info('Adding %s to role %s for tenant'
158 '%s' % (user_details['username'], role_name,
159 tenant_id))
160 kclient.roles.add_user_role(user_details['username'], role,
161 tenant_id)
162
163
164def get_tenant_id(ks_client, tenant_name):
165 for t in ks_client.tenants.list():
166 if t._info['name'] == tenant_name:
167 return t._info['id']
168 return None
169
170
171# Neutron Helpers
172def get_gateway_uuids():
173 gateway_config = mojo_utils.get_juju_status('neutron-gateway')
174 uuids = []
175 for machine in gateway_config['machines']:
176 uuids.append(gateway_config['machines'][machine]['instance-id'])
177 return uuids
178
179
180def get_net_uuid(neutron_client, net_name):
181 network = neutron_client.list_networks(name=net_name)['networks'][0]
182 return network['id']
183
184
185def configure_gateway_ext_port(novaclient):
186 uuids = get_gateway_uuids()
187 for uuid in uuids:
188 server = novaclient.servers.get(uuid)
189 mac_addrs = [a.mac_addr for a in server.interface_list()]
190 if len(mac_addrs) < 2:
191 logging.info('Adding additional port to Neutron Gateway')
192 server.interface_attach(port_id=None, net_id=None, fixed_ip=None)
193 else:
194 logging.warning('Neutron Gateway already has additional port')
195 if uuids:
196 logging.info('Seting Neutron Gateway external port to eth1')
197 mojo_utils.juju_set('neutron-gateway', 'ext-port=eth1')
198
199
200def create_tenant_network(neutron_client, tenant_id, net_name='private',
201 shared=False, network_type='gre'):
202 networks = neutron_client.list_networks(name=net_name)
203 if len(networks['networks']) == 0:
204 logging.info('Creating network: %s',
205 net_name)
206 network_msg = {
207 'network': {
208 'name': net_name,
209 'shared': shared,
210 'tenant_id': tenant_id,
211 }
212 }
213 if network_type == 'vxlan':
214 network_msg['network']['provider:segmentation_id'] = 1233
215 network_msg['network']['provider:network_type'] = network_type
216 network = neutron_client.create_network(network_msg)['network']
217 else:
218 logging.warning('Network %s already exists.', net_name)
219 network = networks['networks'][0]
220 return network
221
222
223def create_external_network(neutron_client, tenant_id, net_name='ext_net',
224 network_type='gre'):
225 networks = neutron_client.list_networks(name=net_name)
226 if len(networks['networks']) == 0:
227 logging.info('Configuring external bridge')
228 network_msg = {
229 'name': net_name,
230 'router:external': True,
231 'tenant_id': tenant_id,
232 }
233 if network_type == 'vxlan':
234 network_msg['provider:segmentation_id'] = 1234
235 network_msg['provider:network_type'] = network_type
236
237 logging.info('Creating new external network definition: %s',
238 net_name)
239 network = neutron_client.create_network(
240 {'network': network_msg})['network']
241 logging.info('New external network created: %s', network['id'])
242 else:
243 logging.warning('Network %s already exists.', net_name)
244 network = networks['networks'][0]
245 return network
246
247
248def create_tenant_subnet(neutron_client, tenant_id, network, cidr, dhcp=True,
249 subnet_name='private_subnet'):
250 # Create subnet
251 subnets = neutron_client.list_subnets(name=subnet_name)
252 if len(subnets['subnets']) == 0:
253 logging.info('Creating subnet')
254 subnet_msg = {
255 'subnet': {
256 'name': subnet_name,
257 'network_id': network['id'],
258 'enable_dhcp': dhcp,
259 'cidr': cidr,
260 'ip_version': 4,
261 'tenant_id': tenant_id
262 }
263 }
264 subnet = neutron_client.create_subnet(subnet_msg)['subnet']
265 else:
266 logging.warning('Subnet %s already exists.', subnet_name)
267 subnet = subnets['subnets'][0]
268 return subnet
269
270
271def create_external_subnet(neutron_client, tenant_id, network,
272 default_gateway=None, cidr=None,
273 start_floating_ip=None, end_floating_ip=None,
274 subnet_name='ext_net_subnet'):
275 subnets = neutron_client.list_subnets(name=subnet_name)
276 if len(subnets['subnets']) == 0:
277 subnet_msg = {
278 'name': subnet_name,
279 'network_id': network['id'],
280 'enable_dhcp': False,
281 'ip_version': 4,
282 'tenant_id': tenant_id
283 }
284
285 if default_gateway:
286 subnet_msg['gateway_ip'] = default_gateway
287 if cidr:
288 subnet_msg['cidr'] = cidr
289 if (start_floating_ip and end_floating_ip):
290 allocation_pool = {
291 'start': start_floating_ip,
292 'end': end_floating_ip,
293 }
294 subnet_msg['allocation_pools'] = [allocation_pool]
295
296 logging.info('Creating new subnet')
297 subnet = neutron_client.create_subnet({'subnet': subnet_msg})['subnet']
298 logging.info('New subnet created: %s', subnet['id'])
299 else:
300 logging.warning('Subnet %s already exists.', subnet_name)
301 subnet = subnets['subnets'][0]
302 return subnet
303
304
305def update_subnet_dns(neutron_client, subnet, dns_servers):
306 msg = {
307 'subnet': {
308 'dns_nameservers': dns_servers.split(',')
309 }
310 }
311 logging.info('Updating dns_nameservers (%s) for subnet',
312 dns_servers)
313 neutron_client.update_subnet(subnet['id'], msg)
314
315
316def create_provider_router(neutron_client, tenant_id):
317 routers = neutron_client.list_routers(name='provider-router')
318 if len(routers['routers']) == 0:
319 logging.info('Creating provider router for external network access')
320 router_info = {
321 'router': {
322 'name': 'provider-router',
323 'tenant_id': tenant_id
324 }
325 }
326 router = neutron_client.create_router(router_info)['router']
327 logging.info('New router created: %s', (router['id']))
328 else:
329 logging.warning('Router provider-router already exists.')
330 router = routers['routers'][0]
331 return router
332
333
334def plug_extnet_into_router(neutron_client, router, network):
335 ports = neutron_client.list_ports(device_owner='network:router_gateway',
336 network_id=network['id'])
337 if len(ports['ports']) == 0:
338 logging.info('Plugging router into ext_net')
339 router = neutron_client.add_gateway_router(
340 router=router['id'],
341 body={'network_id': network['id']})
342 logging.info('Router connected')
343 else:
344 logging.warning('Router already connected')
345
346
347def plug_subnet_into_router(neutron_client, router, network, subnet):
348 routers = neutron_client.list_routers(name=router)
349 if len(routers['routers']) == 0:
350 logging.error('Unable to locate provider router %s', router)
351 sys.exit(1)
352 else:
353 # Check to see if subnet already plugged into router
354 ports = neutron_client.list_ports(
355 device_owner='network:router_interface',
356 network_id=network['id'])
357 if len(ports['ports']) == 0:
358 logging.info('Adding interface from subnet to %s' % (router))
359 router = routers['routers'][0]
360 neutron_client.add_interface_router(router['id'],
361 {'subnet_id': subnet['id']})
362 else:
363 logging.warning('Router already connected to subnet')
364
365
366# Nova Helpers
367def create_keypair(nova_client, keypair_name):
368 if nova_client.keypairs.findall(name=keypair_name):
369 _oldkey = nova_client.keypairs.find(name=keypair_name)
370 logging.info('Deleting key %s' % (keypair_name))
371 nova_client.keypairs.delete(_oldkey)
372 logging.info('Creating key %s' % (keypair_name))
373 new_key = nova_client.keypairs.create(name=keypair_name)
374 return new_key.private_key
375
376
377def boot_instance(nova_client, image_name, flavor_name, key_name):
378 image = nova_client.images.find(name=image_name)
379 flavor = nova_client.flavors.find(name=flavor_name)
380 net = nova_client.networks.find(label="private")
381 nics = [{'net-id': net.id}]
382 # Obviously time may not produce a unique name
383 vm_name = time.strftime("%Y%m%d%H%M%S")
384 logging.info('Creating %s %s '
385 'instance %s' % (flavor_name, image_name, vm_name))
386 instance = nova_client.servers.create(name=vm_name,
387 image=image,
388 flavor=flavor,
389 key_name=key_name,
390 nics=nics)
391 return instance
392
393
394def wait_for_active(nova_client, vm_name, wait_time):
395 logging.info('Waiting %is for %s to reach ACTIVE '
396 'state' % (wait_time, vm_name))
397 for counter in range(wait_time):
398 instance = nova_client.servers.find(name=vm_name)
399 if instance.status == 'ACTIVE':
400 logging.info('%s is ACTIVE' % (vm_name))
401 return True
402 elif instance.status != 'BUILD':
403 logging.error('instance %s in unknown '
404 'state %s' % (instance.name, instance.status))
405 return False
406 time.sleep(1)
407 logging.error('instance %s failed to reach '
408 'active state in %is' % (instance.name, wait_time))
409 return False
410
411
412def wait_for_cloudinit(nova_client, vm_name, bootstring, wait_time):
413 logging.info('Waiting %is for cloudinit on %s to '
414 'complete' % (wait_time, vm_name))
415 instance = nova_client.servers.find(name=vm_name)
416 for counter in range(wait_time):
417 instance = nova_client.servers.find(name=vm_name)
418 console_log = instance.get_console_output()
419 if bootstring in console_log:
420 logging.info('Cloudinit for %s is complete' % (vm_name))
421 return True
422 time.sleep(1)
423 logging.error('cloudinit for instance %s failed '
424 'to complete in %is' % (instance.name, wait_time))
425 return False
426
427
428def wait_for_boot(nova_client, vm_name, bootstring, active_wait,
429 cloudinit_wait):
430 if not wait_for_active(nova_client, vm_name, active_wait):
431 raise Exception('Error initialising %s' % vm_name)
432 if not wait_for_cloudinit(nova_client, vm_name, bootstring,
433 cloudinit_wait):
434 raise Exception('Cloudinit error %s' % vm_name)
435
436
437def wait_for_ping(ip, wait_time):
438 logging.info('Waiting for ping to %s' % (ip))
439 for counter in range(wait_time):
440 if ping(ip):
441 logging.info('Ping %s success' % (ip))
442 return True
443 time.sleep(1)
444 logging.error('Ping failed for %s' % (ip))
445 return False
446
447
448def assign_floating_ip(nova_client, vm_name):
449 floating_ip = nova_client.floating_ips.create()
450 logging.info('Assigning floating IP %s to %s' % (floating_ip.ip, vm_name))
451 instance = nova_client.servers.find(name=vm_name)
452 instance.add_floating_ip(floating_ip)
453 return floating_ip.ip
454
455
456def add_secgroup_rules(nova_client):
457 secgroup = nova_client.security_groups.find(name="default")
458 # Using presence of a 22 rule to indicate whether secgroup rules
459 # have been added
460 port_rules = [rule['to_port'] for rule in secgroup.rules]
461 if 22 in port_rules:
462 logging.warn('Security group rules for ssh already added')
463 else:
464 logging.info('Adding ssh security group rule')
465 nova_client.security_group_rules.create(secgroup.id,
466 ip_protocol="tcp",
467 from_port=22,
468 to_port=22)
469 if -1 in port_rules:
470 logging.warn('Security group rules for ping already added')
471 else:
472 logging.info('Adding ping security group rule')
473 nova_client.security_group_rules.create(secgroup.id,
474 ip_protocol="icmp",
475 from_port=-1,
476 to_port=-1)
477
478
479def ping(ip):
480 # Use the system ping command with count of 1 and wait time of 1.
481 ret = subprocess.call(['ping', '-c', '1', '-W', '1', ip],
482 stdout=open('/dev/null', 'w'),
483 stderr=open('/dev/null', 'w'))
484 return ret == 0
485
486
487def ssh_test(username, ip, vm_name, password=None, privkey=None):
488 logging.info('Attempting to ssh to %s(%s)' % (vm_name, ip))
489 ssh = paramiko.SSHClient()
490 ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
491 if privkey:
492 key = paramiko.RSAKey.from_private_key(StringIO.StringIO(privkey))
493 ssh.connect(ip, username=username, password='', pkey=key)
494 else:
495 ssh.connect(ip, username=username, password=password)
496 stdin, stdout, stderr = ssh.exec_command('uname -n')
497 return_string = stdout.readlines()[0].strip()
498 ssh.close()
499 if return_string == vm_name:
500 logging.info('SSH to %s(%s) succesfull' % (vm_name, ip))
501 return True
502 else:
503 logging.info('SSH to %s(%s) failed' % (vm_name, ip))
504 return False
505
506
507def boot_and_test(nova_client, image_name, flavor_name, number, privkey,
508 active_wait=180, cloudinit_wait=180, ping_wait=180):
509 image_config = mojo_utils.get_mojo_config('images.yaml')
510 for counter in range(number):
511 instance = boot_instance(nova_client,
512 image_name=image_name,
513 flavor_name=flavor_name,
514 key_name='mojo')
515 wait_for_boot(nova_client, instance.name,
516 image_config[image_name]['bootstring'], active_wait,
517 cloudinit_wait)
518 ip = assign_floating_ip(nova_client, instance.name)
519 wait_for_ping(ip, ping_wait)
520 if not wait_for_ping(ip, ping_wait):
521 raise Exception('Ping of %s failed' % (ip))
522 ssh_test_args = {
523 'username': image_config[image_name]['username'],
524 'ip': ip,
525 'vm_name': instance.name,
526 }
527 if image_config[image_name]['auth_type'] == 'password':
528 ssh_test_args['password'] = image_config[image_name]['password']
529 elif image_config[image_name]['auth_type'] == 'privkey':
530 ssh_test_args['privkey'] = privkey
531 if not ssh_test(**ssh_test_args):
532 raise Exception('SSH failed' % (ip))
533
534
535# Hacluster helper
536def get_crm_leader(service, resource=None):
537 if not resource:
538 resource = 'res_.*_vip'
539 leader = set()
540 for unit in mojo_utils.get_juju_units(service=service):
541 crm_out = mojo_utils.remote_run(unit, 'sudo crm status')[0]
542 for line in crm_out.splitlines():
543 line = line.lstrip()
544 if re.match(resource, line):
545 leader.add(line.split()[-1])
546 if len(leader) != 1:
547 raise Exception('Unexpected leader count: ' + str(len(leader)))
548 return leader.pop().split('-')[-1]
549
550
551def delete_crm_leader(service, resource=None):
552 mach_no = get_crm_leader(service, resource)
553 unit = mojo_utils.convert_machineno_to_unit(mach_no)
554 mojo_utils.delete_unit(unit)
0555
=== added file 'mojo-spec-helpers/utils/mojo_utils.py'
--- mojo-spec-helpers/utils/mojo_utils.py 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/mojo_utils.py 2015-09-08 08:23:55 +0000
@@ -0,0 +1,356 @@
1#!/usr/bin/python
2
3import subprocess
4import yaml
5import os
6import mojo
7import logging
8import time
9from collections import Counter
10from swiftclient.client import Connection
11
12JUJU_STATUSES = {
13 'good': ['ACTIVE', 'started'],
14 'bad': ['error'],
15 'transitional': ['pending', 'pending', 'down', 'installed', 'stopped'],
16}
17
18
19def get_juju_status(service=None):
20 cmd = ['juju', 'status']
21 if service:
22 cmd.append(service)
23 status_file = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
24 return yaml.load(status_file)
25
26
27def get_juju_units(juju_status=None, service=None):
28 if not juju_status:
29 juju_status = get_juju_status()
30 units = []
31 if service:
32 services = [service]
33 else:
34 services = [juju_service for juju_service in juju_status['services']]
35 for svc in services:
36 if 'units' in juju_status['services'][svc]:
37 for unit in juju_status['services'][svc]['units']:
38 units.append(unit)
39 return units
40
41
42def convert_machineno_to_unit(machineno, juju_status=None):
43 if not juju_status:
44 juju_status = get_juju_status()
45 services = [service for service in juju_status['services']]
46 for svc in services:
47 if 'units' in juju_status['services'][svc]:
48 for unit in juju_status['services'][svc]['units']:
49 unit_info = juju_status['services'][svc]['units'][unit]
50 if unit_info['machine'] == machineno:
51 return unit
52
53
54def remote_shell_check(unit):
55 cmd = ['juju', 'run', '--unit', unit, 'uname -a']
56 FNULL = open(os.devnull, 'w')
57 return not subprocess.call(cmd, stdout=FNULL, stderr=subprocess.STDOUT)
58
59
60def remote_run(unit, remote_cmd=None):
61 cmd = ['juju', 'run', '--unit', unit]
62 if remote_cmd:
63 cmd.append(remote_cmd)
64 else:
65 cmd.append('uname -a')
66 p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
67 output = p.communicate()
68 if p.returncode != 0:
69 raise Exception('Error running nagios checks')
70 return output
71
72
73def remote_upload(unit, script, remote_dir=None):
74 if remote_dir:
75 dst = unit + ':' + remote_dir
76 else:
77 dst = unit + ':/tmp/'
78 cmd = ['juju', 'scp', script, dst]
79 return subprocess.check_call(cmd)
80
81
82def delete_unit(unit):
83 service = unit.split('/')[0]
84 unit_count = len(get_juju_units(service=service))
85 logging.info('Removing unit ' + unit)
86 cmd = ['juju', 'destroy-unit', unit]
87 subprocess.check_call(cmd)
88 target_num = unit_count - 1
89 # Wait for the unit to disappear from juju status
90 while len(get_juju_units(service=service)) > target_num:
91 time.sleep(5)
92 juju_wait_finished()
93
94
95def add_unit(service, unit_num=None):
96 unit_count = len(get_juju_units(service=service))
97 if unit_num:
98 additional_units = int(unit_num)
99 else:
100 additional_units = 1
101 logging.info('Adding %i unit(s) to %s' % (additional_units, service))
102 cmd = ['juju', 'add-unit', service, '-n', str(additional_units)]
103 subprocess.check_call(cmd)
104 target_num = unit_count + additional_units
105 # Wait for the new unit to appear in juju status
106 while len(get_juju_units(service=service)) < target_num:
107 time.sleep(5)
108 juju_wait_finished()
109
110
111def juju_set(service, option):
112 subprocess.check_call(['juju', 'set', service, option])
113 juju_wait_finished()
114
115
116def juju_set_config_option(service, option_name, value):
117 option = "{}={}".format(option_name, value)
118 juju_set(service, option)
119
120
121def juju_get(service, option):
122 cmd = ['juju', 'get', service]
123 juju_get_output = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
124 service_config = yaml.load(juju_get_output)
125 if 'value' in service_config['settings'][option]:
126 return service_config['settings'][option]['value']
127
128
129def get_undercload_auth():
130 juju_env = subprocess.check_output(['juju', 'switch']).strip('\n')
131 juju_env_file = open(os.environ['HOME'] + "/.juju/environments.yaml", 'r')
132 juju_env_contents = yaml.load(juju_env_file)
133 novarc_settings = juju_env_contents['environments'][juju_env]
134 auth_settings = {
135 'OS_AUTH_URL': novarc_settings['auth-url'],
136 'OS_TENANT_NAME': novarc_settings['tenant-name'],
137 'OS_USERNAME': novarc_settings['username'],
138 'OS_PASSWORD': novarc_settings['password'],
139 'OS_REGION_NAME': novarc_settings['region'],
140 }
141 return auth_settings
142
143
144# Openstack Client helpers
145def get_auth_url(juju_status=None):
146 if juju_get('keystone', 'vip'):
147 return juju_get('keystone', 'vip')
148 if not juju_status:
149 juju_status = get_juju_status()
150 unit = juju_status['services']['keystone']['units'].itervalues().next()
151 return unit['public-address']
152
153
154def get_overcloud_auth(juju_status=None):
155 if not juju_status:
156 juju_status = get_juju_status()
157 if juju_get('keystone', 'use-https').lower() == 'yes':
158 transport = 'https'
159 port = 35357
160 else:
161 transport = 'http'
162 port = 5000
163 address = get_auth_url()
164 auth_settings = {
165 'OS_AUTH_URL': '%s://%s:%i/v2.0' % (transport, address, port),
166 'OS_TENANT_NAME': 'admin',
167 'OS_USERNAME': 'admin',
168 'OS_PASSWORD': 'openstack',
169 'OS_REGION_NAME': 'RegionOne',
170 }
171 return auth_settings
172
173
174def get_mojo_file(filename):
175 spec = mojo.Spec(os.environ['MOJO_SPEC_DIR'])
176 return spec.get_config(filename, stage=os.environ['MOJO_STAGE'])
177
178
179def get_mojo_spec_revno():
180 """
181 Get the current revision number of the mojo spec
182 """
183
184 revno_command = 'bzr revno {}'.format(os.environ['MOJO_SPEC_DIR'])
185 return subprocess.check_output(revno_command.split()).strip()
186
187
188def get_mojo_config(filename):
189 config_file = get_mojo_file(filename)
190 logging.info('Using config %s' % (config_file))
191 return yaml.load(file(config_file, 'r'))
192
193
194def get_charm_dir():
195 return os.path.join(os.environ['MOJO_REPO_DIR'],
196 os.environ['MOJO_SERIES'])
197
198
199def sync_charmhelpers(charmdir):
200 p = subprocess.Popen(['make', 'sync'], cwd=charmdir)
201 p.communicate()
202
203
204def sync_all_charmhelpers():
205 charm_base_dir = get_charm_dir()
206 for direc in os.listdir(charm_base_dir):
207 charm_dir = os.path.join(charm_base_dir, direc)
208 if os.path.isdir(charm_dir):
209 sync_charmhelpers(charm_dir)
210
211
212def parse_mojo_arg(options, mojoarg, multiargs=False):
213 if mojoarg.upper() in os.environ:
214 if multiargs:
215 return os.environ[mojoarg.upper()].split()
216 else:
217 return os.environ[mojoarg.upper()]
218 else:
219 return getattr(options, mojoarg)
220
221
222def get_machine_state(juju_status, state_type):
223 states = Counter()
224 for machine_no in juju_status['machines']:
225 if state_type in juju_status['machines'][machine_no]:
226 state = juju_status['machines'][machine_no][state_type]
227 else:
228 state = 'unknown'
229 states[state] += 1
230 return states
231
232
233def get_machine_agent_states(juju_status):
234 return get_machine_state(juju_status, 'agent-state')
235
236
237def get_machine_instance_states(juju_status):
238 return get_machine_state(juju_status, 'instance-state')
239
240
241def get_service_agent_states(juju_status):
242 service_state = Counter()
243 for service in juju_status['services']:
244 if 'units' in juju_status['services'][service]:
245 for unit in juju_status['services'][service]['units']:
246 unit_info = juju_status['services'][service]['units'][unit]
247 service_state[unit_info['agent-state']] += 1
248 if 'subordinates' in unit_info:
249 for sub_unit in unit_info['subordinates']:
250 sub_sstate = \
251 unit_info['subordinates'][sub_unit]['agent-state']
252 service_state[sub_sstate] += 1
253 return service_state
254
255
256def juju_status_summary(heading, statetype, states):
257 print heading
258 print " " + statetype
259 for state in states:
260 print " %s: %i" % (state, states[state])
261
262
263def juju_status_error_check(states):
264 for state in states:
265 if state in JUJU_STATUSES['bad']:
266 logging.error('Some statuses are in a bad state')
267 return True
268 logging.info('No statuses are in a bad state')
269 return False
270
271
272def juju_status_all_stable(states):
273 for state in states:
274 if state in JUJU_STATUSES['transitional']:
275 logging.info('Some statuses are in a transitional state')
276 return False
277 logging.info('Statuses are in a stable state')
278 return True
279
280
281def juju_status_check_and_wait():
282 checks = {
283 'Machines': [{
284 'Heading': 'Instance State',
285 'check_func': get_machine_instance_states,
286 },
287 {
288 'Heading': 'Agent State',
289 'check_func': get_machine_agent_states,
290 }],
291 'Services': [{
292 'Heading': 'Agent State',
293 'check_func': get_service_agent_states,
294 }]
295 }
296 stable_state = [False]
297 while False in stable_state:
298 juju_status = get_juju_status()
299 stable_state = []
300 for juju_objtype, check_info in checks.iteritems():
301 for check in check_info:
302 check_function = check['check_func']
303 states = check_function(juju_status)
304 if juju_status_error_check(states):
305 raise Exception("Error in juju status")
306 stable_state.append(juju_status_all_stable(states))
307 time.sleep(5)
308 for juju_objtype, check_info in checks.iteritems():
309 for check in check_info:
310 check_function = check['check_func']
311 states = check_function(juju_status)
312 juju_status_summary(juju_objtype, check['Heading'], states)
313
314
315def remote_runs(units):
316 for unit in units:
317 if not remote_shell_check(unit):
318 raise Exception('Juju run failed on ' + unit)
319
320
321def juju_check_hooks_complete():
322 juju_units = get_juju_units()
323 remote_runs(juju_units)
324 remote_runs(juju_units)
325
326
327def juju_wait_finished():
328 # Wait till all statuses are green
329 juju_status_check_and_wait()
330 # juju status may report all has finished but hooks are still firing.
331 # So check..
332 juju_check_hooks_complete()
333 # Check nothing has subsequently gone bad
334 juju_status_check_and_wait()
335
336
337def build_swift_connection():
338 """
339 Create a Swift Connection object from the environment variables
340 OS_TENANT_NAME, OS_STORAGE_URL, OS_AUTH_URL, OS_USERNAME
341 OS_PASSWORD
342 """
343
344 # Get extra Swift options like tenant name and storage URL
345 os_options = {'tenant_name': os.environ.get('OS_TENANT_NAME')}
346 storage_url = os.environ.get('OS_STORAGE_URL')
347 if storage_url:
348 os_options['object_storage_url'] = storage_url
349
350 return Connection(
351 os.environ.get('OS_AUTH_URL'),
352 os.environ.get('OS_USERNAME'),
353 os.environ.get('OS_PASSWORD'),
354 auth_version='2.0',
355 os_options=os_options
356 )
0357
=== added file 'mojo-spec-helpers/utils/shyaml.py'
--- mojo-spec-helpers/utils/shyaml.py 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/shyaml.py 2015-09-08 08:23:55 +0000
@@ -0,0 +1,219 @@
1#!/usr/bin/env python
2
3# Note: to launch test, you can use:
4# python -m doctest -d shyaml.py
5# or
6# nosetests
7
8from __future__ import print_function
9
10import sys
11import yaml
12import os.path
13import re
14
15EXNAME = os.path.basename(sys.argv[0])
16
17
18def tokenize(s):
19 r"""Returns an iterable in all subpart of a '.' separated string
20 So:
21 >>> list(tokenize('foo.bar.wiz'))
22 ['foo', 'bar', 'wiz']
23 this function has to deal with any type of data in the string. So it
24 actually interprets the string. Characters with meaning are '.' and '\'.
25 Both of these can be included in a token by quoting them with '\'.
26 So dot of slashes can be contained in token:
27 >>> print('\n'.join(tokenize(r'foo.dot<\.>.slash<\\>')))
28 foo
29 dot<.>
30 slash<\>
31 Notice that empty keys are also supported:
32 >>> list(tokenize(r'foo..bar'))
33 ['foo', '', 'bar']
34 Given an empty string:
35 >>> list(tokenize(r''))
36 ['']
37 And a None value:
38 >>> list(tokenize(None))
39 []
40 """
41 if s is None:
42 raise StopIteration
43 tokens = (re.sub(r'\\(\\|\.)', r'\1', m.group(0))
44 for m in re.finditer(r'((\\.|[^.\\])*)', s))
45 # an empty string superfluous token is added
46 # after all non-empty string token:
47 for token in tokens:
48 if len(token) != 0:
49 next(tokens)
50 yield token
51
52
53def mget(dct, key, default=None):
54 r"""Allow to get values deep in a dict with doted keys
55 Accessing leaf values is quite straightforward:
56 >>> dct = {'a': {'x': 1, 'b': {'c': 2}}}
57 >>> mget(dct, 'a.x')
58 1
59 >>> mget(dct, 'a.b.c')
60 2
61 But you can also get subdict if your key is not targeting a
62 leaf value:
63 >>> mget(dct, 'a.b')
64 {'c': 2}
65 As a special feature, list access is also supported by providing a
66 (possibily signed) integer, it'll be interpreted as usual python
67 sequence access using bracket notation:
68 >>> mget({'a': {'x': [1, 5], 'b': {'c': 2}}}, 'a.x.-1')
69 5
70 >>> mget({'a': {'x': 1, 'b': [{'c': 2}]}}, 'a.b.0.c')
71 2
72 Keys that contains '.' can be accessed by escaping them:
73 >>> dct = {'a': {'x': 1}, 'a.x': 3, 'a.y': 4}
74 >>> mget(dct, 'a.x')
75 1
76 >>> mget(dct, r'a\.x')
77 3
78 >>> mget(dct, r'a.y')
79 >>> mget(dct, r'a\.y')
80 4
81 As a consequence, if your key contains a '\', you should also escape it:
82 >>> dct = {r'a\x': 3, r'a\.x': 4, 'a.x': 5, 'a\\': {'x': 6}}
83 >>> mget(dct, r'a\\x')
84 3
85 >>> mget(dct, r'a\\\.x')
86 4
87 >>> mget(dct, r'a\\.x')
88 6
89 >>> mget({'a\\': {'b': 1}}, r'a\\.b')
90 1
91 >>> mget({r'a.b\.c': 1}, r'a\.b\\\.c')
92 1
93 And even empty strings key are supported:
94 >>> dct = {r'a': {'': {'y': 3}, 'y': 4}, 'b': {'': {'': 1}}, '': 2}
95 >>> mget(dct, r'a..y')
96 3
97 >>> mget(dct, r'a.y')
98 4
99 >>> mget(dct, r'')
100 2
101 >>> mget(dct, r'b..')
102 1
103 mget support also default value if the key is not found:
104 >>> mget({'a': 1}, 'b.y', default='N/A')
105 'N/A'
106 but will complain if you are trying to get into a leaf:
107 >>> mget({'a': 1}, 'a.y', default='N/A') # doctest: +ELLIPSIS
108 Traceback (most recent call last):
109 ...
110 TypeError: 'int' object ...
111 if the key is None, the whole dct should be sent back:
112 >>> mget({'a': 1}, None)
113 {'a': 1}
114 """
115 return aget(dct, tokenize(key), default)
116
117
118def aget(dct, key, default=None):
119 r"""Allow to get values deep in a dict with iterable keys
120 Accessing leaf values is quite straightforward:
121 >>> dct = {'a': {'x': 1, 'b': {'c': 2}}}
122 >>> aget(dct, ('a', 'x'))
123 1
124 >>> aget(dct, ('a', 'b', 'c'))
125 2
126 If key is empty, it returns unchanged the ``dct`` value.
127 >>> aget({'x': 1}, ())
128 {'x': 1}
129 """
130 key = iter(key)
131 try:
132 head = next(key)
133 except StopIteration:
134 return dct
135 try:
136 value = dct[int(head)] if isinstance(dct, list) else dct[head]
137 except KeyError:
138 return default
139 return aget(value, key, default)
140
141
142def stderr(msg):
143 sys.stderr.write(msg + "\n")
144
145
146def die(msg, errlvl=1, prefix="Error: "):
147 stderr("%s%s" % (prefix, msg))
148 sys.exit(errlvl)
149
150SIMPLE_TYPES = (str, int, float)
151COMPLEX_TYPES = (list, dict)
152
153
154def dump(value):
155 return value if isinstance(value, SIMPLE_TYPES) \
156 else yaml.dump(value, default_flow_style=False)
157
158
159def type_name(value):
160 """Returns pseudo-YAML type name of given value."""
161 return "struct" if isinstance(value, dict) else \
162 "sequence" if isinstance(value, (tuple, list)) else \
163 type(value).__name__
164
165
166def stdout(value):
167 sys.stdout.write(value)
168
169
170def main(args):
171 usage = """usage:
172 %(exname)s {get-value{,-0},get-type,keys{,-0},values{,-0}} KEY DEFAULT
173 """ % {"exname": EXNAME}
174 if len(args) == 0:
175 die(usage, errlvl=0, prefix="")
176 action = args[0]
177 key_value = None if len(args) == 1 else args[1]
178 default = args[2] if len(args) > 2 else ""
179 contents = yaml.load(sys.stdin)
180 try:
181 value = mget(contents, key_value, default)
182 except IndexError:
183 die("list index error in path %r." % key_value)
184 except (KeyError, TypeError):
185 die("invalid path %r." % key_value)
186
187 tvalue = type_name(value)
188 termination = "\0" if action.endswith("-0") else "\n"
189
190 if action == "get-value":
191 print(dump(value), end='')
192 elif action in ("get-values", "get-values-0"):
193 if isinstance(value, dict):
194 for k, v in value.iteritems():
195 stdout("%s%s%s%s" % (dump(k), termination,
196 dump(v), termination))
197 elif isinstance(value, list):
198 for l in value:
199 stdout("%s%s" % (dump(l), termination))
200 else:
201 die("%s does not support %r type. "
202 "Please provide or select a sequence or struct."
203 % (action, tvalue))
204 elif action == "get-type":
205 print(tvalue)
206 elif action in ("keys", "keys-0", "values", "values-0"):
207 if isinstance(value, dict):
208 method = value.keys if action.startswith("keys") else value.values
209 for k in method():
210 stdout("%s%s" % (dump(k), termination))
211 else:
212 die("%s does not support %r type. "
213 "Please provide or select a struct." % (action, tvalue))
214 else:
215 die("Invalid argument.\n%s" % usage)
216
217
218if __name__ == "__main__":
219 sys.exit(main(sys.argv[1:]))
0220
=== added directory 'mojo-spec-helpers/utils/tests'
=== added file 'mojo-spec-helpers/utils/tests/README.md'
--- mojo-spec-helpers/utils/tests/README.md 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/tests/README.md 2015-09-08 08:23:55 +0000
@@ -0,0 +1,67 @@
1Tests for python managers
2===
3
4These are tests for `container_managers.py`, `cache_managers.py`
5and some functions in `mojo_utils.py`.
6
7Setup
8---
9
10### A test container
11
12Tests for `container_managers.py` require a test container to be setup with objects named as follows:
13
14- latest-build-label
15- deployed-build-label
16- deployed-spec-revno
17- code-upgrade-test-build-label-01-test-build-label-02-succeeded
18- mojo-run-155-succeeded
19
20This container should be openly readable:
21
22``` bash
23swift post --read-acl .r:* ${TEST_CONTAINER_NAME}
24```
25
26You should then set the URL for this container in the environment variable `TEST_CONTAINER_URL`:
27
28``` bash
29export TEST_CONTAINER_URL=$(swift stat -v ${TEST_CONTAINER_NAME} | egrep -o 'http.*$')
30```
31
32### A local swift account
33
34You also need to make the credentials for connecting to a swift account available with `OS_*` environment variables:
35
36``` bash
37export OS_USERNAME=${USERNAME}
38export OS_TENANT_NAME=${TENANT_NAME}
39export OS_PASSWORD=${PASSWORD}
40export OS_STORAGE_URL=${OPTIONAL_STORAGE_URL}
41export OS_AUTH_URL=${AUTH_URL}
42export OS_REGION_NAME=${REGION_NAME}
43```
44
45### PYTHONPATH
46
47You also need to add the parent directory to your python path to run the tests:
48
49``` bash
50export PYTHONPATH=..
51```
52
53Running the tests
54---
55
56You can either run the tests with native python:
57
58``` bash
59./run_tests.py
60```
61
62Or with `pytest`:
63
64``` bash
65py.test
66```
67
068
=== added file 'mojo-spec-helpers/utils/tests/run_tests.py'
--- mojo-spec-helpers/utils/tests/run_tests.py 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/tests/run_tests.py 2015-09-08 08:23:55 +0000
@@ -0,0 +1,18 @@
1#!/usr/bin/env python
2
3"""
4These are tests for container_managers.py, cache_managers.py
5and some scripts in mojo_utils.py
6
7To run the container_tests, you'll need to set several environment
8variables - see test_container_managers.py
9"""
10
11from test_container_managers import container_tests
12from test_cache_managers import test_cache_managers
13from test_mojo_utils import test_get_mojo_spec_revno
14
15
16container_tests()
17test_cache_managers()
18test_get_mojo_spec_revno()
019
=== added file 'mojo-spec-helpers/utils/tests/test_cache_managers.py'
--- mojo-spec-helpers/utils/tests/test_cache_managers.py 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/tests/test_cache_managers.py 2015-09-08 08:23:55 +0000
@@ -0,0 +1,80 @@
1# System imports
2import os
3import subprocess
4import json
5
6# Local imports
7from cache_managers import JsonCache
8
9
10def _check_cache_file(cache, cache_path):
11 # Check file exists
12 assert os.path.isfile(cache_path), "Cache file not created"
13 print u"\u2713 Cache file exists: {}".format(cache_path)
14
15 # Check the data
16 with open(cache_path) as cache_file:
17 assert json.load(cache_file) == cache.get_cache(), "Bad data"
18 print u"\u2713 Cache file data is correct"
19
20
21def test_cache_managers():
22 print (
23 "\n===\n"
24 "Test JsonCache"
25 "\n===\n"
26 )
27
28 cache_path = subprocess.check_output(
29 'mktemp -u /tmp/cache-XXXX.json'.split()
30 ).strip()
31
32 cache = JsonCache(cache_path=cache_path)
33
34 fake_data = {
35 'sentence': 'hello world',
36 'number': 12,
37 'array': [1, 2, 3, "fish"]
38 }
39
40 # Insert items using "set"
41 for key, value in fake_data.iteritems():
42 cache.set(key, value)
43
44 # Check data against inserted data
45 assert fake_data == cache.get_cache(), "Data not set correctly"
46 print u"\u2713 Data correctly inserted"
47
48 # Verify cache file
49 _check_cache_file(cache, cache_path)
50
51 # Check retrieving each key with "get"
52 for key, value in fake_data.iteritems():
53 assert cache.get(key) == value
54
55 print u"\u2713 Successfully retrieved items with 'get'"
56
57 # Check wiping the cache
58 cache.wipe()
59
60 assert not os.path.isfile(cache_path), "Cache file shouldn't exist!"
61 assert not cache.get(fake_data.keys()[0]), "Cache still returning data"
62 print u"\u2713 Cache successfully wiped"
63
64 # Recreate cache with "put_cache"
65 cache.put_cache(fake_data)
66
67 # Check data against inserted data
68 assert fake_data == cache.get_cache()
69 print u"\u2713 Data correctly inserted"
70
71 # Check file integrity
72 _check_cache_file(cache, cache_path)
73
74 # Clean up
75 cache.wipe()
76 assert not os.path.isfile(cache_path), "Cache file shouldn't exist!"
77 print u"\u2713 Deleted {}".format(cache_path)
78
79if __name__ == "__main__":
80 test_cache_managers()
081
=== added file 'mojo-spec-helpers/utils/tests/test_container_managers.py'
--- mojo-spec-helpers/utils/tests/test_container_managers.py 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/tests/test_container_managers.py 2015-09-08 08:23:55 +0000
@@ -0,0 +1,188 @@
1# System
2import os
3
4# Local imports
5from container_managers import (
6 BuildContainer,
7 CIContainer,
8 DeployedEnvironmentContainer,
9 LocalEnvironmentSwiftContainer
10)
11from mojo_utils import build_swift_connection
12
13"""
14Tests for container_managers.
15
16To test these you'll need to set a TEST_CONTAINER_URL,
17which should be the HTTP URL to a swift container which contains
18the following publicly readable objects:
19- latest-build-label
20- deployed-build-label
21- deployed-spec-revno
22- code-upgrade-test-build-label-01-test-build-label-02-succeeded
23- mojo-run-155-succeeded
24
25You'll also need to have the following environment variables setup
26with credentials to connect to a valid (testing) Swift account:
27
28OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_REGION_NAME
29"""
30
31test_container_url = os.environ.get('TEST_CONTAINER_URL')
32
33
34def test_deployed_environment_container():
35 print (
36 "\n===\n"
37 "Test DeployedEnvironmentContainer"
38 "\n===\n"
39 )
40
41 deployed_env_container = DeployedEnvironmentContainer(
42 container_url=test_container_url,
43 )
44
45 build_label = deployed_env_container.deployed_build_label()
46 assert build_label, "Build label missing"
47 print u"\u2713 Found build label: {}".format(build_label)
48
49 spec_revno = deployed_env_container.deployed_spec_revno()
50 assert spec_revno, "Spec revno missing"
51 print u"\u2713 Found spec revno (from web): {}".format(spec_revno)
52
53
54def test_local_environment_swift_container():
55 print (
56 "\n===\n"
57 "Test LocalEnvironmentSwiftContainer"
58 "\n===\n"
59 )
60
61 swift_connection = build_swift_connection()
62
63 container_name = 'test-container'
64
65 swift_connection.put_container(container_name)
66
67 container = LocalEnvironmentSwiftContainer(
68 swift_connection=swift_connection,
69 container_name=container_name
70 )
71
72 deployed_build_label = "fiddlesticksandfishes"
73 previous_build_label = "fishesandfiddlesticks"
74 deployed_spec_revno = '111'
75
76 container.save_deployed_build_label(deployed_build_label)
77 container.save_previous_build_label(previous_build_label)
78 container.save_mojo_spec_revno(deployed_spec_revno)
79
80 assert container.deployed_build_label() == deployed_build_label
81 assert container.previous_build_label() == previous_build_label
82 assert container.deployed_spec_revno() == deployed_spec_revno
83 print u"\u2713 Saved and retrieved build_labels and spec_revno"
84
85 # Get names for objects
86 previous_obj = container.previous_build_obj
87 deployed_obj = container.deployed_build_obj
88 revno_obj = container.deployed_revno_obj
89 upgrade_object = container.code_upgrade_succeeded_template.format(
90 previous_build_label, deployed_build_label
91 )
92 run_object = container.mojo_run_succeeded_template.format(
93 deployed_spec_revno
94 )
95
96 # Remove the objects if they exist
97 for object_name in [previous_obj, deployed_obj, revno_obj, upgrade_object]:
98 try:
99 swift_connection.delete_object(container_name, object_name)
100 except:
101 pass
102
103 container.save_code_upgrade_succeeded(
104 previous_build_label, deployed_build_label
105 )
106
107 container.save_mojo_run_succeeded(deployed_spec_revno)
108
109 upgrade_head = swift_connection.head_object(container_name, upgrade_object)
110
111 run_head = swift_connection.head_object(container_name, run_object)
112
113 # Make sure the objects were saved to swift
114 assert int(upgrade_head['content-length']) > 0
115 assert int(run_head['content-length']) > 0
116
117 print u"\u2713 Code upgrade and mojo run successfully saved"
118
119
120def test_latest_build():
121 print (
122 "\n===\n"
123 "Test BuildContainer"
124 "\n===\n"
125 )
126
127 build_container = BuildContainer(
128 container_url=test_container_url
129 )
130
131 build_label = build_container.latest_build_label()
132 assert build_label, "Build label missing"
133 print u"\u2713 Found latest build label: {}".format(build_label)
134
135
136def test_ci_container():
137 print (
138 "\n===\n"
139 "Test CIContainer"
140 "\n===\n"
141 )
142
143 ci_container = CIContainer(
144 container_url=test_container_url
145 )
146
147 build_one = "test-build-label-01"
148 build_two = "test-build-label-02"
149
150 good_upgrade = ci_container.has_code_upgrade_been_tested(
151 from_build_label=build_one,
152 to_build_label=build_two
153 )
154 bad_upgrade = ci_container.has_code_upgrade_been_tested(
155 from_build_label=build_two,
156 to_build_label=build_one
157 )
158
159 good_upgrade = ci_container.has_code_upgrade_been_tested(
160 from_build_label=build_one,
161 to_build_label=build_two
162 )
163 bad_upgrade = ci_container.has_code_upgrade_been_tested(
164 from_build_label=build_two,
165 to_build_label=build_one
166 )
167
168 assert good_upgrade, "Good upgrade test missing"
169 assert bad_upgrade is False, "Bad upgrade isn't false"
170 print u"\u2713 Upgrades checked successfully"
171
172 good_mojo_test = ci_container.has_mojo_run_been_tested(spec_revno="155")
173 bad_mojo_test = ci_container.has_mojo_run_been_tested(spec_revno="99999")
174
175 assert good_mojo_test, "Good mojo test missing"
176 assert bad_mojo_test is False, "Bad mojo test isn't false"
177 print u"\u2713 Mojo runs checked succeeded"
178
179
180def container_tests():
181 test_deployed_environment_container()
182 test_local_environment_swift_container()
183 test_latest_build()
184 test_ci_container()
185
186
187if __name__ == "__main__":
188 container_tests()
0189
=== added file 'mojo-spec-helpers/utils/tests/test_mojo_utils.py'
--- mojo-spec-helpers/utils/tests/test_mojo_utils.py 1970-01-01 00:00:00 +0000
+++ mojo-spec-helpers/utils/tests/test_mojo_utils.py 2015-09-08 08:23:55 +0000
@@ -0,0 +1,23 @@
1# System imports
2import os
3
4# Loca imports
5from mojo_utils import get_mojo_spec_revno
6
7
8def test_get_mojo_spec_revno():
9 print (
10 "\n===\n"
11 "Test get_mojo_spec_revno"
12 "\n===\n"
13 )
14
15 os.environ['MOJO_SPEC_DIR'] = os.path.abspath(
16 __file__ + '/../../../..'
17 )
18
19 assert get_mojo_spec_revno().isdigit()
20 print u"\u2713 Successfully retrieved revno"
21
22if __name__ == "__main__":
23 test_get_mojo_spec_revno()

Subscribers

People subscribed via source and target branches

to all changes: