Merge lp:~bigdata-dev/charms/bundles/apache-analytics-sql/trunk into lp:~charmers/charms/bundles/apache-analytics-sql/bundle

Proposed by Cory Johns
Status: Merged
Merged at revision: 16
Proposed branch: lp:~bigdata-dev/charms/bundles/apache-analytics-sql/trunk
Merge into: lp:~charmers/charms/bundles/apache-analytics-sql/bundle
Diff against target: 203 lines (+136/-7)
4 files modified
bundle-dev.yaml (+1/-1)
bundle.yaml (+6/-6)
tests/01-bundle.py (+126/-0)
tests/tests.yaml (+3/-0)
To merge this branch: bzr merge lp:~bigdata-dev/charms/bundles/apache-analytics-sql/trunk
Reviewer Review Type Date Requested Status
Kevin W Monroe Approve
Review via email: mp+273282@code.launchpad.net

Description of the change

Tests for CWR

To post a comment you must log in.
18. By Cory Johns

Added tests.yaml and removed no longer needed 00-setup

19. By Cory Johns

Fixed reference to mysql in dev bundle

Revision history for this message
Kevin W Monroe (kwmonroe) wrote :

LGTM, +1

review: Approve
20. By Cory Johns

Updated to latest charm revisions

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'bundle-dev.yaml'
2--- bundle-dev.yaml 2015-09-17 14:37:10 +0000
3+++ bundle-dev.yaml 2015-10-07 21:29:42 +0000
4@@ -21,7 +21,7 @@
5 gui-y: "200"
6 constraints: mem=3G
7 mysql:
8- charm: cs:~bigdata-dev/trusty/mysql
9+ charm: cs:trusty/mysql
10 num_units: 1
11 options:
12 binlog-format: ROW
13
14=== modified file 'bundle.yaml'
15--- bundle.yaml 2015-09-28 14:07:17 +0000
16+++ bundle.yaml 2015-10-07 21:29:42 +0000
17@@ -1,20 +1,20 @@
18 services:
19 compute-slave:
20- charm: cs:trusty/apache-hadoop-compute-slave-8
21+ charm: cs:trusty/apache-hadoop-compute-slave-9
22 num_units: 3
23 annotations:
24 gui-x: "300"
25 gui-y: "200"
26 constraints: mem=3G
27 hdfs-master:
28- charm: cs:trusty/apache-hadoop-hdfs-master-8
29+ charm: cs:trusty/apache-hadoop-hdfs-master-9
30 num_units: 1
31 annotations:
32 gui-x: "600"
33 gui-y: "350"
34 constraints: mem=7G
35 hive:
36- charm: cs:trusty/apache-hive-9
37+ charm: cs:trusty/apache-hive-10
38 num_units: 1
39 annotations:
40 gui-x: "1200"
41@@ -29,19 +29,19 @@
42 gui-x: "1200"
43 gui-y: "450"
44 plugin:
45- charm: cs:trusty/apache-hadoop-plugin-7
46+ charm: cs:trusty/apache-hadoop-plugin-8
47 annotations:
48 gui-x: "900"
49 gui-y: "200"
50 secondary-namenode:
51- charm: cs:trusty/apache-hadoop-hdfs-secondary-6
52+ charm: cs:trusty/apache-hadoop-hdfs-secondary-7
53 num_units: 1
54 annotations:
55 gui-x: "600"
56 gui-y: "600"
57 constraints: mem=7G
58 yarn-master:
59- charm: cs:trusty/apache-hadoop-yarn-master-6
60+ charm: cs:trusty/apache-hadoop-yarn-master-7
61 num_units: 1
62 annotations:
63 gui-x: "600"
64
65=== added directory 'tests'
66=== added file 'tests/01-bundle.py'
67--- tests/01-bundle.py 1970-01-01 00:00:00 +0000
68+++ tests/01-bundle.py 2015-10-07 21:29:42 +0000
69@@ -0,0 +1,126 @@
70+#!/usr/bin/env python3
71+
72+import os
73+import unittest
74+
75+import yaml
76+import amulet
77+
78+
79+class TestBundle(unittest.TestCase):
80+ bundle_file = os.path.join(os.path.dirname(__file__), '..', 'bundle.yaml')
81+
82+ @classmethod
83+ def setUpClass(cls):
84+ cls.d = amulet.Deployment(series='trusty')
85+ with open(cls.bundle_file) as f:
86+ bun = f.read()
87+ bundle = yaml.safe_load(bun)
88+ cls.d.load(bundle)
89+ cls.d.setup(timeout=1800)
90+ cls.d.sentry.wait_for_messages({'hive': 'Ready'}, timeout=1800)
91+ cls.hdfs = cls.d.sentry['hdfs-master'][0]
92+ cls.yarn = cls.d.sentry['yarn-master'][0]
93+ cls.slave = cls.d.sentry['compute-slave'][0]
94+ cls.secondary = cls.d.sentry['secondary-namenode'][0]
95+ cls.hive = cls.d.sentry['hive'][0]
96+
97+ def test_components(self):
98+ """
99+ Confirm that all of the required components are up and running.
100+ """
101+ hdfs, retcode = self.hdfs.run("pgrep -a java")
102+ yarn, retcode = self.yarn.run("pgrep -a java")
103+ slave, retcode = self.slave.run("pgrep -a java")
104+ secondary, retcode = self.secondary.run("pgrep -a java")
105+ hive, retcode = self.hive.run("pgrep -a java")
106+
107+ # .NameNode needs the . to differentiate it from SecondaryNameNode
108+ assert '.NameNode' in hdfs, "NameNode not started"
109+ assert '.NameNode' not in yarn, "NameNode should not be running on yarn-master"
110+ assert '.NameNode' not in slave, "NameNode should not be running on compute-slave"
111+ assert '.NameNode' not in secondary, "NameNode should not be running on secondary-namenode"
112+ assert '.NameNode' not in hive, "NameNode should not be running on hive"
113+
114+ assert 'ResourceManager' in yarn, "ResourceManager not started"
115+ assert 'ResourceManager' not in hdfs, "ResourceManager should not be running on hdfs-master"
116+ assert 'ResourceManager' not in slave, "ResourceManager should not be running on compute-slave"
117+ assert 'ResourceManager' not in secondary, "ResourceManager should not be running on secondary-namenode"
118+ assert 'ResourceManager' not in hive, "ResourceManager should not be running on hive"
119+
120+ assert 'JobHistoryServer' in yarn, "JobHistoryServer not started"
121+ assert 'JobHistoryServer' not in hdfs, "JobHistoryServer should not be running on hdfs-master"
122+ assert 'JobHistoryServer' not in slave, "JobHistoryServer should not be running on compute-slave"
123+ assert 'JobHistoryServer' not in secondary, "JobHistoryServer should not be running on secondary-namenode"
124+ assert 'JobHistoryServer' not in hive, "JobHistoryServer should not be running on hive"
125+
126+ assert 'NodeManager' in slave, "NodeManager not started"
127+ assert 'NodeManager' not in yarn, "NodeManager should not be running on yarn-master"
128+ assert 'NodeManager' not in hdfs, "NodeManager should not be running on hdfs-master"
129+ assert 'NodeManager' not in secondary, "NodeManager should not be running on secondary-namenode"
130+ assert 'NodeManager' not in hive, "NodeManager should not be running on hive"
131+
132+ assert 'DataNode' in slave, "DataServer not started"
133+ assert 'DataNode' not in yarn, "DataNode should not be running on yarn-master"
134+ assert 'DataNode' not in hdfs, "DataNode should not be running on hdfs-master"
135+ assert 'DataNode' not in secondary, "DataNode should not be running on secondary-namenode"
136+ assert 'DataNode' not in hive, "DataNode should not be running on hive"
137+
138+ assert 'SecondaryNameNode' in secondary, "SecondaryNameNode not started"
139+ assert 'SecondaryNameNode' not in yarn, "SecondaryNameNode should not be running on yarn-master"
140+ assert 'SecondaryNameNode' not in hdfs, "SecondaryNameNode should not be running on hdfs-master"
141+ assert 'SecondaryNameNode' not in slave, "SecondaryNameNode should not be running on compute-slave"
142+ assert 'SecondaryNameNode' not in hive, "SecondaryNameNode should not be running on hive"
143+
144+ assert 'HiveServer2' in hive, 'Hive should be running on hive'
145+
146+ def test_hdfs_dir(self):
147+ """
148+ Validate admin few hadoop activities on HDFS cluster.
149+ 1) This test validates mkdir on hdfs cluster
150+ 2) This test validates change hdfs dir owner on the cluster
151+ 3) This test validates setting hdfs directory access permission on the cluster
152+
153+ NB: These are order-dependent, so must be done as part of a single test case.
154+ """
155+ output, retcode = self.hive.run("su hdfs -c 'hdfs dfs -mkdir -p /user/ubuntu'")
156+ assert retcode == 0, "Created a user directory on hdfs FAILED:\n{}".format(output)
157+ output, retcode = self.hive.run("su hdfs -c 'hdfs dfs -chown ubuntu:ubuntu /user/ubuntu'")
158+ assert retcode == 0, "Assigning an owner to hdfs directory FAILED:\n{}".format(output)
159+ output, retcode = self.hive.run("su hdfs -c 'hdfs dfs -chmod -R 755 /user/ubuntu'")
160+ assert retcode == 0, "seting directory permission on hdfs FAILED:\n{}".format(output)
161+
162+ def test_yarn_mapreduce_exe(self):
163+ """
164+ Validate yarn mapreduce operations:
165+ 1) validate mapreduce execution - writing to hdfs
166+ 2) validate successful mapreduce operation after the execution
167+ 3) validate mapreduce execution - reading and writing to hdfs
168+ 4) validate successful mapreduce operation after the execution
169+ 5) validate successful deletion of mapreduce operation result from hdfs
170+
171+ NB: These are order-dependent, so must be done as part of a single test case.
172+ """
173+ jar_file = '/usr/lib/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar'
174+ test_steps = [
175+ ('teragen', "su ubuntu -c 'hadoop jar {} teragen 10000 /user/ubuntu/teragenout'".format(jar_file)),
176+ ('mapreduce #1', "su hdfs -c 'hdfs dfs -ls /user/ubuntu/teragenout/_SUCCESS'"),
177+ ('terasort', "su ubuntu -c 'hadoop jar {} terasort /user/ubuntu/teragenout /user/ubuntu/terasortout'".
178+ format(jar_file)),
179+ ('mapreduce #2', "su hdfs -c 'hdfs dfs -ls /user/ubuntu/terasortout/_SUCCESS'"),
180+ ('cleanup', "su hdfs -c 'hdfs dfs -rm -r /user/ubuntu/teragenout'"),
181+ ]
182+ for name, step in test_steps:
183+ output, retcode = self.hive.run(step)
184+ assert retcode == 0, "{} FAILED:\n{}".format(name, output)
185+
186+ def test_hive(self):
187+ self.hive.run("echo 'create table test(col1 int, col2 string); show tables;' > test.sql")
188+ output, retcode = self.hive.run("sudo su hive -c 'hive -f test.sql'")
189+
190+ assert retcode == 0, 'Hive command failed (%s): %s' % (retcode, output)
191+ self.assertEqual('test', output)
192+
193+
194+if __name__ == '__main__':
195+ unittest.main()
196
197=== added file 'tests/tests.yaml'
198--- tests/tests.yaml 1970-01-01 00:00:00 +0000
199+++ tests/tests.yaml 2015-10-07 21:29:42 +0000
200@@ -0,0 +1,3 @@
201+reset: false
202+packages:
203+ - amulet

Subscribers

People subscribed via source and target branches