Merge lp:~bigdata-dev/charms/trusty/hdp-pig/trunk into lp:charms/trusty/hdp-pig

Proposed by Charles Butler
Status: Merged
Merged at revision: 19
Proposed branch: lp:~bigdata-dev/charms/trusty/hdp-pig/trunk
Merge into: lp:charms/trusty/hdp-pig
Diff against target: 188 lines (+57/-17)
4 files modified
hooks/hooks.py (+25/-6)
metadata.yaml (+2/-0)
tests/10-deploy-hadoop-pig (+21/-7)
tests/hadoop-pig-cluster.yaml (+9/-4)
To merge this branch: bzr merge lp:~bigdata-dev/charms/trusty/hdp-pig/trunk
Reviewer Review Type Date Requested Status
amir sanjar (community) Approve
charmers Pending
Review via email: mp+244184@code.launchpad.net

Description of the change

Adds amir's openstack workarounds, and updates testcase

To post a comment you must log in.
Revision history for this message
amir sanjar (asanjar) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added symlink 'hooks/hadoop-nodes-relation-changed'
2=== target is u'hooks.py'
3=== modified file 'hooks/hooks.py'
4--- hooks/hooks.py 2014-08-07 16:47:38 +0000
5+++ hooks/hooks.py 2014-12-09 17:14:57 +0000
6@@ -3,10 +3,11 @@
7 import os
8 import sys
9 from charmhelpers.core import hookenv, host
10-from bdutils import chmodRecursive
11+from bdutils import chmodRecursive, fileSetKV
12 from hdputils import install_base_pkg, config_all_nodes, home, hdpScript, setHadoopConfigXML, configureYarnClient, setHadoopEnvVar
13 from shutil import copyfile, copy
14-from charmhelpers.core.hookenv import log, relation_get
15+from charmhelpers.core.hookenv import log, relation_get, unit_get
16+from charmhelpers.lib.utils import get_unit_hostname
17
18 sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib'))
19
20@@ -36,6 +37,7 @@
21
22 pigHDPConfPath = os.path.join(os.path.sep,home, hdpScript, "configuration_files", "pig")
23 hadoopHDPConfPath = os.path.join(os.path.sep,home, hdpScript, "configuration_files", "core_hadoop","hadoop-env.sh")
24+hosts_path = os.path.join(os.path.sep, 'etc', 'hosts')
25
26 hooks = hookenv.Hooks()
27 log = hookenv.log
28@@ -49,6 +51,9 @@
29 packages =['ntp', 'openjdk-7-jdk',"pig"]
30 install_base_pkg(packages)
31 config_pig_node()
32+ fileSetKV(hosts_path, unit_get('private-address')+' ', get_unit_hostname())
33+ host.add_user_to_group("ubuntu", "hadoop")
34+
35
36
37 @hooks.hook('config-changed')
38@@ -85,9 +90,12 @@
39 if not resourceManagerReady:
40 log ("PIG ==> resourcemanager not ready","INFO")
41 sys.exit(0)
42- log("PIG ==> resourcemanager IP={}".format(relation_get('private-address')),"INFO")
43+ rm_ip = relation_get('private-address')
44+ log("PIG ==> resourcemanager IP={}".format(rm_ip),"INFO")
45 setHadoopEnvVar()
46- configureYarnClient(relation_get('private-address'))
47+ configureYarnClient(rm_ip)
48+ rm_hostname = relation_get('resourceManager_hostname')
49+ fileSetKV(hosts_path, rm_ip+' ', rm_hostname)
50
51 @hooks.hook('namenode-relation-changed')
52 def namenode_relation_changed():
53@@ -97,10 +105,21 @@
54 if not nameNodeReady:
55 log("PIG ==> namenode not ready- Changed phase","INFO")
56 sys.exit(0)
57- log("PIG ==> namenode_IP={}".format(relation_get('private-address')),"INFO")
58+ nn_ip = relation_get('private-address')
59+ log("PIG ==> namenode_IP={}".format(nn_ip),"INFO")
60 setHadoopEnvVar()
61- setHadoopConfigXML("core-site.xml", "fs.defaultFS", "hdfs://"+relation_get('private-address')+":8020")
62+ setHadoopConfigXML("core-site.xml", "fs.defaultFS", "hdfs://"+nn_ip+":8020")
63+ nn_hostname = relation_get("namenode_hostname")
64+ fileSetKV(hosts_path, nn_ip+' ', nn_hostname)
65
66+@hooks.hook('hadoop-nodes-relation-changed')
67+def hadoop_nodes_relation_changed():
68+ compute_ip = relation_get('private-address')
69+ hostname = relation_get('hostname')
70+ log("==> Connect to hadoop compute node {}={}".format(compute_ip, hostname),"INFO")
71+ if hostname != None:
72+ fileSetKV(hosts_path, compute_ip+' ', hostname)
73+
74 if __name__ == "__main__":
75 # execute a hook based on the name the program is called by
76 hooks.execute(sys.argv)
77
78=== modified file 'metadata.yaml'
79--- metadata.yaml 2014-09-10 18:24:08 +0000
80+++ metadata.yaml 2014-12-09 17:14:57 +0000
81@@ -18,3 +18,5 @@
82 interface: mapred
83 namenode:
84 interface: dfs
85+ hadoop-nodes:
86+ interface: mapred
87
88=== modified file 'tests/10-deploy-hadoop-pig'
89--- tests/10-deploy-hadoop-pig 2014-09-19 15:13:12 +0000
90+++ tests/10-deploy-hadoop-pig 2014-12-09 17:14:57 +0000
91@@ -12,6 +12,17 @@
92 f = open(os.path.join(os.path.dirname(__file__), 'hadoop-pig-cluster.yaml'))
93 bun = f.read()
94 self.d.load(yaml.safe_load(bun))
95+# self.d.add('yarn-hdfs-master', charm='/home/sanjar/development/charms/trusty/hdp-hadoop',constraints={'cpu-cores':4, 'mem':8192})
96+# self.d.add('compute-node', charm='/home/sanjar/development/charms/trusty/hdp-hadoop', constraints={'cpu-cores':4, 'mem':8192})
97+ # self.d.relate("yarn-hdfs-master:namenode","compute-node:datanode" )
98+ # self.d.relate("yarn-hdfs-master:resourcemanager","compute-node:nodemanager" )
99+ # self.d.add_unit("compute-node", units=1)
100+ # self.d.add('hdp-pig', charm='/home/sanjar/development/charms/trusty/hdp-pig', constraints={'cpu-cores':4, 'mem':8192})
101+ # self.d.relate("hdp-pig:namenode","yarn-hdfs-master:namenode" )
102+ # self.d.relate("hdp-pig:resourcemanager","yarn-hdfs-master:resourcemanager" )
103+ # self.d.relate("compute-node:hadoop-nodes","hdp-pig:hadoop-nodes")
104+
105+
106 try:
107 self.d.setup(timeout=9000000)
108 self.d.sentry.wait()
109@@ -31,7 +42,7 @@
110
111 def test_remote_hdfs_status(self):
112 o,c= self.pig_unit.run("sudo su hdfs -c ' hdfs dfsadmin -report'")
113- if o.find('Datanodes available: 1') == -1:
114+ if o.find('Datanodes available: 2') == -1:
115 amulet.raise_status(amulet.FAIL, msg="Remote HDFS not available")
116
117 def test_remote_yarn_status(self):
118@@ -40,22 +51,25 @@
119 amulet.raise_status(amulet.FAIL, msg="Remote YARN not available")
120 ##
121
122- def test_hdfs_mkdir(self):
123- o,c= self.pig_unit.run("su hdfs -c 'hdfs dfs -mkdir -p /user/hduser'")
124+ def test_hdfs(self):
125+ o,c= self.pig_unit.run("su hdfs -c 'hdfs dfs -mkdir -p /tmp'")
126+ o,c= self.pig_unit.run("su hdfs -c 'hdfs dfs -mkdir -p /user/ubuntu'")
127 if c != 0:
128 amulet.raise_status(amulet.FAIL, msg=" Created a user directory on hdfs FAILED")
129+ o,c= self.pig_unit.run("su hdfs -c 'hdfs dfs -chown -R ubuntu:hdfs /tmp'")
130+ o,c= self.pig_unit.run("su hdfs -c 'hdfs dfs -chown -R ubuntu:hdfs /user'")
131+ if c != 0:
132+ amulet.raise_status(amulet.FAIL, msg=" Assign hdfs owner FAILED")
133
134 # def test_hdfs_copyfile(self):
135 # if c != 0:
136 # amulet.raise_status(amulet.FAIL, msg=" Created a user directory on hdfs FAILED")
137
138 def test_run_pig_mapreduce(self):
139- o,c= self.pig_unit.run("sudo su hdfs -c 'hdfs dfs -copyFromLocal /etc/passwd /user/hduser/passwd'")
140- o,c= self.pig_unit.run("echo \"A = load \'/user/hduser/passwd\' using PigStorage(\':\');\">/tmp/id.pig ")
141+ o,c= self.pig_unit.run("sudo su hdfs -c 'hdfs dfs -copyFromLocal /etc/passwd /user/ubuntu/passwd'")
142+ o,c= self.pig_unit.run("echo \"A = load \'/user/ubuntu/passwd\' using PigStorage(\':\');\">/tmp/id.pig ")
143 o,c= self.pig_unit.run("echo \"B = foreach A generate \\$0 as id; store B into \'/tmp/id.out\';\">>/tmp/id.pig ")
144 o,c= self.pig_unit.run("sudo su hdfs -c 'pig -l /tmp/pig.log /tmp/id.pig'")
145- if c != 0:
146- amulet.raise_status(amulet.FAIL, msg=" Pig script-> mapreduce FAILED")
147 o,c= self.pig_unit.run("hadoop fs -ls /tmp/id.out/_SUCCESS")
148 if c != 0:
149 amulet.raise_status(amulet.FAIL, msg=" Pig script-> mapreduce passes but mapreduce failed during execution")
150
151=== modified file 'tests/hadoop-pig-cluster.yaml'
152--- tests/hadoop-pig-cluster.yaml 2014-09-17 19:36:42 +0000
153+++ tests/hadoop-pig-cluster.yaml 2014-12-09 17:14:57 +0000
154@@ -1,20 +1,23 @@
155 hortonworks-pig-solution:
156 services:
157 "compute-node":
158- charm: "cs:~asanjar/trusty/hdp-hadoop"
159- num_units: 1
160+ charm: "cs:trusty/hdp-hadoop"
161+ num_units: 2
162+ constraints: "cpu-cores=4 mem=8192"
163 annotations:
164 "gui-x": "656.6386478556842"
165 "gui-y": "997.2758247589375"
166 "yarn-hdfs-master":
167- charm: "cs:~asanjar/trusty/hdp-hadoop"
168+ charm: "cs:trusty/hdp-hadoop"
169 num_units: 1
170+ constraints: "cpu-cores=4 mem=8192"
171 annotations:
172 "gui-x": "657.3613521443158"
173 "gui-y": "654.7241752410625"
174 "hdp-pig":
175- charm: "cs:~asanjar/trusty/hdp-pig"
176+ charm: "hdp-pig"
177 num_units: 1
178+ constraints: "cpu-cores=4 mem=8192"
179 annotations:
180 "gui-x": "675.9159435670526"
181 "gui-y": "279.7241752410624"
182@@ -27,4 +30,6 @@
183 - "compute-node:nodemanager"
184 - - "hdp-pig:resourcemanager"
185 - "yarn-hdfs-master:resourcemanager"
186+ - - "compute-node:hadoop-nodes"
187+ - "hdp-pig:hadoop-nodes"
188 series: trusty

Subscribers

People subscribed via source and target branches