Merge lp:~bigdata-dev/charms/trusty/hdp-pig/trunk into lp:charms/trusty/hdp-pig

Proposed by Charles Butler
Status: Merged
Merged at revision: 19
Proposed branch: lp:~bigdata-dev/charms/trusty/hdp-pig/trunk
Merge into: lp:charms/trusty/hdp-pig
Diff against target: 188 lines (+57/-17)
4 files modified
hooks/hooks.py (+25/-6)
metadata.yaml (+2/-0)
tests/10-deploy-hadoop-pig (+21/-7)
tests/hadoop-pig-cluster.yaml (+9/-4)
To merge this branch: bzr merge lp:~bigdata-dev/charms/trusty/hdp-pig/trunk
Reviewer Review Type Date Requested Status
amir sanjar (community) Approve
charmers Pending
Review via email: mp+244184@code.launchpad.net

Description of the change

Adds amir's openstack workarounds, and updates testcase

To post a comment you must log in.
Revision history for this message
amir sanjar (asanjar) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== added symlink 'hooks/hadoop-nodes-relation-changed'
=== target is u'hooks.py'
=== modified file 'hooks/hooks.py'
--- hooks/hooks.py 2014-08-07 16:47:38 +0000
+++ hooks/hooks.py 2014-12-09 17:14:57 +0000
@@ -3,10 +3,11 @@
3import os3import os
4import sys4import sys
5from charmhelpers.core import hookenv, host5from charmhelpers.core import hookenv, host
6from bdutils import chmodRecursive6from bdutils import chmodRecursive, fileSetKV
7from hdputils import install_base_pkg, config_all_nodes, home, hdpScript, setHadoopConfigXML, configureYarnClient, setHadoopEnvVar7from hdputils import install_base_pkg, config_all_nodes, home, hdpScript, setHadoopConfigXML, configureYarnClient, setHadoopEnvVar
8from shutil import copyfile, copy8from shutil import copyfile, copy
9from charmhelpers.core.hookenv import log, relation_get9from charmhelpers.core.hookenv import log, relation_get, unit_get
10from charmhelpers.lib.utils import get_unit_hostname
1011
11sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib'))12sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib'))
1213
@@ -36,6 +37,7 @@
36 37
37pigHDPConfPath = os.path.join(os.path.sep,home, hdpScript, "configuration_files", "pig")38pigHDPConfPath = os.path.join(os.path.sep,home, hdpScript, "configuration_files", "pig")
38hadoopHDPConfPath = os.path.join(os.path.sep,home, hdpScript, "configuration_files", "core_hadoop","hadoop-env.sh")39hadoopHDPConfPath = os.path.join(os.path.sep,home, hdpScript, "configuration_files", "core_hadoop","hadoop-env.sh")
40hosts_path = os.path.join(os.path.sep, 'etc', 'hosts')
3941
40hooks = hookenv.Hooks()42hooks = hookenv.Hooks()
41log = hookenv.log43log = hookenv.log
@@ -49,6 +51,9 @@
49 packages =['ntp', 'openjdk-7-jdk',"pig"]51 packages =['ntp', 'openjdk-7-jdk',"pig"]
50 install_base_pkg(packages)52 install_base_pkg(packages)
51 config_pig_node()53 config_pig_node()
54 fileSetKV(hosts_path, unit_get('private-address')+' ', get_unit_hostname())
55 host.add_user_to_group("ubuntu", "hadoop")
56
5257
5358
54@hooks.hook('config-changed')59@hooks.hook('config-changed')
@@ -85,9 +90,12 @@
85 if not resourceManagerReady:90 if not resourceManagerReady:
86 log ("PIG ==> resourcemanager not ready","INFO")91 log ("PIG ==> resourcemanager not ready","INFO")
87 sys.exit(0)92 sys.exit(0)
88 log("PIG ==> resourcemanager IP={}".format(relation_get('private-address')),"INFO")93 rm_ip = relation_get('private-address')
94 log("PIG ==> resourcemanager IP={}".format(rm_ip),"INFO")
89 setHadoopEnvVar()95 setHadoopEnvVar()
90 configureYarnClient(relation_get('private-address'))96 configureYarnClient(rm_ip)
97 rm_hostname = relation_get('resourceManager_hostname')
98 fileSetKV(hosts_path, rm_ip+' ', rm_hostname)
91 99
92@hooks.hook('namenode-relation-changed')100@hooks.hook('namenode-relation-changed')
93def namenode_relation_changed():101def namenode_relation_changed():
@@ -97,10 +105,21 @@
97 if not nameNodeReady:105 if not nameNodeReady:
98 log("PIG ==> namenode not ready- Changed phase","INFO")106 log("PIG ==> namenode not ready- Changed phase","INFO")
99 sys.exit(0)107 sys.exit(0)
100 log("PIG ==> namenode_IP={}".format(relation_get('private-address')),"INFO")108 nn_ip = relation_get('private-address')
109 log("PIG ==> namenode_IP={}".format(nn_ip),"INFO")
101 setHadoopEnvVar()110 setHadoopEnvVar()
102 setHadoopConfigXML("core-site.xml", "fs.defaultFS", "hdfs://"+relation_get('private-address')+":8020")111 setHadoopConfigXML("core-site.xml", "fs.defaultFS", "hdfs://"+nn_ip+":8020")
112 nn_hostname = relation_get("namenode_hostname")
113 fileSetKV(hosts_path, nn_ip+' ', nn_hostname)
103 114
115@hooks.hook('hadoop-nodes-relation-changed')
116def hadoop_nodes_relation_changed():
117 compute_ip = relation_get('private-address')
118 hostname = relation_get('hostname')
119 log("==> Connect to hadoop compute node {}={}".format(compute_ip, hostname),"INFO")
120 if hostname != None:
121 fileSetKV(hosts_path, compute_ip+' ', hostname)
122
104if __name__ == "__main__":123if __name__ == "__main__":
105 # execute a hook based on the name the program is called by124 # execute a hook based on the name the program is called by
106 hooks.execute(sys.argv)125 hooks.execute(sys.argv)
107126
=== modified file 'metadata.yaml'
--- metadata.yaml 2014-09-10 18:24:08 +0000
+++ metadata.yaml 2014-12-09 17:14:57 +0000
@@ -18,3 +18,5 @@
18 interface: mapred18 interface: mapred
19 namenode:19 namenode:
20 interface: dfs20 interface: dfs
21 hadoop-nodes:
22 interface: mapred
2123
=== modified file 'tests/10-deploy-hadoop-pig'
--- tests/10-deploy-hadoop-pig 2014-09-19 15:13:12 +0000
+++ tests/10-deploy-hadoop-pig 2014-12-09 17:14:57 +0000
@@ -12,6 +12,17 @@
12 f = open(os.path.join(os.path.dirname(__file__), 'hadoop-pig-cluster.yaml'))12 f = open(os.path.join(os.path.dirname(__file__), 'hadoop-pig-cluster.yaml'))
13 bun = f.read()13 bun = f.read()
14 self.d.load(yaml.safe_load(bun))14 self.d.load(yaml.safe_load(bun))
15# self.d.add('yarn-hdfs-master', charm='/home/sanjar/development/charms/trusty/hdp-hadoop',constraints={'cpu-cores':4, 'mem':8192})
16# self.d.add('compute-node', charm='/home/sanjar/development/charms/trusty/hdp-hadoop', constraints={'cpu-cores':4, 'mem':8192})
17 # self.d.relate("yarn-hdfs-master:namenode","compute-node:datanode" )
18 # self.d.relate("yarn-hdfs-master:resourcemanager","compute-node:nodemanager" )
19 # self.d.add_unit("compute-node", units=1)
20 # self.d.add('hdp-pig', charm='/home/sanjar/development/charms/trusty/hdp-pig', constraints={'cpu-cores':4, 'mem':8192})
21 # self.d.relate("hdp-pig:namenode","yarn-hdfs-master:namenode" )
22 # self.d.relate("hdp-pig:resourcemanager","yarn-hdfs-master:resourcemanager" )
23 # self.d.relate("compute-node:hadoop-nodes","hdp-pig:hadoop-nodes")
24
25
15 try:26 try:
16 self.d.setup(timeout=9000000)27 self.d.setup(timeout=9000000)
17 self.d.sentry.wait()28 self.d.sentry.wait()
@@ -31,7 +42,7 @@
3142
32 def test_remote_hdfs_status(self):43 def test_remote_hdfs_status(self):
33 o,c= self.pig_unit.run("sudo su hdfs -c ' hdfs dfsadmin -report'")44 o,c= self.pig_unit.run("sudo su hdfs -c ' hdfs dfsadmin -report'")
34 if o.find('Datanodes available: 1') == -1:45 if o.find('Datanodes available: 2') == -1:
35 amulet.raise_status(amulet.FAIL, msg="Remote HDFS not available")46 amulet.raise_status(amulet.FAIL, msg="Remote HDFS not available")
3647
37 def test_remote_yarn_status(self):48 def test_remote_yarn_status(self):
@@ -40,22 +51,25 @@
40 amulet.raise_status(amulet.FAIL, msg="Remote YARN not available")51 amulet.raise_status(amulet.FAIL, msg="Remote YARN not available")
41## 52##
42 53
43 def test_hdfs_mkdir(self):54 def test_hdfs(self):
44 o,c= self.pig_unit.run("su hdfs -c 'hdfs dfs -mkdir -p /user/hduser'")55 o,c= self.pig_unit.run("su hdfs -c 'hdfs dfs -mkdir -p /tmp'")
56 o,c= self.pig_unit.run("su hdfs -c 'hdfs dfs -mkdir -p /user/ubuntu'")
45 if c != 0:57 if c != 0:
46 amulet.raise_status(amulet.FAIL, msg=" Created a user directory on hdfs FAILED")58 amulet.raise_status(amulet.FAIL, msg=" Created a user directory on hdfs FAILED")
59 o,c= self.pig_unit.run("su hdfs -c 'hdfs dfs -chown -R ubuntu:hdfs /tmp'")
60 o,c= self.pig_unit.run("su hdfs -c 'hdfs dfs -chown -R ubuntu:hdfs /user'")
61 if c != 0:
62 amulet.raise_status(amulet.FAIL, msg=" Assign hdfs owner FAILED")
4763
48 # def test_hdfs_copyfile(self):64 # def test_hdfs_copyfile(self):
49 # if c != 0:65 # if c != 0:
50 # amulet.raise_status(amulet.FAIL, msg=" Created a user directory on hdfs FAILED") 66 # amulet.raise_status(amulet.FAIL, msg=" Created a user directory on hdfs FAILED")
5167
52 def test_run_pig_mapreduce(self):68 def test_run_pig_mapreduce(self):
53 o,c= self.pig_unit.run("sudo su hdfs -c 'hdfs dfs -copyFromLocal /etc/passwd /user/hduser/passwd'")69 o,c= self.pig_unit.run("sudo su hdfs -c 'hdfs dfs -copyFromLocal /etc/passwd /user/ubuntu/passwd'")
54 o,c= self.pig_unit.run("echo \"A = load \'/user/hduser/passwd\' using PigStorage(\':\');\">/tmp/id.pig ")70 o,c= self.pig_unit.run("echo \"A = load \'/user/ubuntu/passwd\' using PigStorage(\':\');\">/tmp/id.pig ")
55 o,c= self.pig_unit.run("echo \"B = foreach A generate \\$0 as id; store B into \'/tmp/id.out\';\">>/tmp/id.pig ") 71 o,c= self.pig_unit.run("echo \"B = foreach A generate \\$0 as id; store B into \'/tmp/id.out\';\">>/tmp/id.pig ")
56 o,c= self.pig_unit.run("sudo su hdfs -c 'pig -l /tmp/pig.log /tmp/id.pig'") 72 o,c= self.pig_unit.run("sudo su hdfs -c 'pig -l /tmp/pig.log /tmp/id.pig'")
57 if c != 0:
58 amulet.raise_status(amulet.FAIL, msg=" Pig script-> mapreduce FAILED")
59 o,c= self.pig_unit.run("hadoop fs -ls /tmp/id.out/_SUCCESS")73 o,c= self.pig_unit.run("hadoop fs -ls /tmp/id.out/_SUCCESS")
60 if c != 0:74 if c != 0:
61 amulet.raise_status(amulet.FAIL, msg=" Pig script-> mapreduce passes but mapreduce failed during execution")75 amulet.raise_status(amulet.FAIL, msg=" Pig script-> mapreduce passes but mapreduce failed during execution")
6276
=== modified file 'tests/hadoop-pig-cluster.yaml'
--- tests/hadoop-pig-cluster.yaml 2014-09-17 19:36:42 +0000
+++ tests/hadoop-pig-cluster.yaml 2014-12-09 17:14:57 +0000
@@ -1,20 +1,23 @@
1hortonworks-pig-solution: 1hortonworks-pig-solution:
2 services: 2 services:
3 "compute-node": 3 "compute-node":
4 charm: "cs:~asanjar/trusty/hdp-hadoop"4 charm: "cs:trusty/hdp-hadoop"
5 num_units: 15 num_units: 2
6 constraints: "cpu-cores=4 mem=8192"
6 annotations: 7 annotations:
7 "gui-x": "656.6386478556842"8 "gui-x": "656.6386478556842"
8 "gui-y": "997.2758247589375"9 "gui-y": "997.2758247589375"
9 "yarn-hdfs-master": 10 "yarn-hdfs-master":
10 charm: "cs:~asanjar/trusty/hdp-hadoop"11 charm: "cs:trusty/hdp-hadoop"
11 num_units: 112 num_units: 1
13 constraints: "cpu-cores=4 mem=8192"
12 annotations: 14 annotations:
13 "gui-x": "657.3613521443158"15 "gui-x": "657.3613521443158"
14 "gui-y": "654.7241752410625"16 "gui-y": "654.7241752410625"
15 "hdp-pig": 17 "hdp-pig":
16 charm: "cs:~asanjar/trusty/hdp-pig"18 charm: "hdp-pig"
17 num_units: 119 num_units: 1
20 constraints: "cpu-cores=4 mem=8192"
18 annotations: 21 annotations:
19 "gui-x": "675.9159435670526"22 "gui-x": "675.9159435670526"
20 "gui-y": "279.7241752410624"23 "gui-y": "279.7241752410624"
@@ -27,4 +30,6 @@
27 - "compute-node:nodemanager"30 - "compute-node:nodemanager"
28 - - "hdp-pig:resourcemanager"31 - - "hdp-pig:resourcemanager"
29 - "yarn-hdfs-master:resourcemanager"32 - "yarn-hdfs-master:resourcemanager"
33 - - "compute-node:hadoop-nodes"
34 - "hdp-pig:hadoop-nodes"
30 series: trusty35 series: trusty

Subscribers

People subscribed via source and target branches