Merge lp:~asanjar/charms/trusty/hdp-hadoop/trunk into lp:charms/trusty/hdp-hadoop
- Trusty Tahr (14.04)
- trunk
- Merge into trunk
Proposed by
amir sanjar
Status: | Merged | ||||
---|---|---|---|---|---|
Merged at revision: | 24 | ||||
Proposed branch: | lp:~asanjar/charms/trusty/hdp-hadoop/trunk | ||||
Merge into: | lp:charms/trusty/hdp-hadoop | ||||
Diff against target: |
655 lines (+296/-54) (has conflicts) 6 files modified
hadoop_cluster.yaml (+0/-20) hooks/hdp-hadoop-common.py (+266/-23) hooks/hdputils.py (+2/-0) metadata.yaml (+6/-9) tests/01-hadoop-cluster-deployment-1.py (+2/-2) tests/hadoop_cluster.yaml (+20/-0) Text conflict in hooks/hdp-hadoop-common.py |
||||
To merge this branch: | bzr merge lp:~asanjar/charms/trusty/hdp-hadoop/trunk | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Charles Butler (community) | Needs Fixing | ||
Review via email: mp+241021@code.launchpad.net |
Commit message
Description of the change
adding following features:
1) Openstack enablement for multi-node compute nodes
2) Enable external big data applications query hadoop compute-nodes ip&hostname
for task discovery.
3) clean-up
4) update amulet testcase
To post a comment you must log in.
- 24. By amir sanjar
-
updating hortonworks ubuntu repository
- 25. By amir sanjar
-
update hortonworks helper files
- 26. By amir sanjar
-
undo upgrade to the latest hortonworks branch
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === removed file 'hadoop_cluster.yaml' |
2 | --- hadoop_cluster.yaml 2014-08-25 16:33:37 +0000 |
3 | +++ hadoop_cluster.yaml 1970-01-01 00:00:00 +0000 |
4 | @@ -1,20 +0,0 @@ |
5 | -hdp-hadoop-cluster: |
6 | - services: |
7 | - "compute-node": |
8 | - charm: "cs:~asanjar/trusty/hdp-hadoop" |
9 | - num_units: 1 |
10 | - annotations: |
11 | - "gui-x": "525.7681500094167" |
12 | - "gui-y": "608.4847070634866" |
13 | - "yarn-hdfs-master": |
14 | - charm: "cs:~asanjar/trusty/hdp-hadoop" |
15 | - num_units: 1 |
16 | - annotations: |
17 | - "gui-x": "532" |
18 | - "gui-y": "236.51529293651342" |
19 | - relations: |
20 | - - - "yarn-hdfs-master:namenode" |
21 | - - "compute-node:datanode" |
22 | - - - "yarn-hdfs-master:resourcemanager" |
23 | - - "compute-node:nodemanager" |
24 | - series: trusty |
25 | |
26 | === added symlink 'hooks/compute-nodes-relation-changed' |
27 | === target is u'hdp-hadoop-common.py' |
28 | === added symlink 'hooks/compute-nodes-relation-joined' |
29 | === target is u'hdp-hadoop-common.py' |
30 | === added symlink 'hooks/compute-relation-joined' |
31 | === target is u'hdp-hadoop-common.py' |
32 | === added symlink 'hooks/hadoop-nodes-relation-joined' |
33 | === target is u'hdp-hadoop-common.py' |
34 | === modified file 'hooks/hdp-hadoop-common.py' |
35 | --- hooks/hdp-hadoop-common.py 2014-11-03 17:56:47 +0000 |
36 | +++ hooks/hdp-hadoop-common.py 2014-11-08 05:32:58 +0000 |
37 | @@ -1,4 +1,4 @@ |
38 | -#!/usr/bin/env python |
39 | +#!/usr/bin/python |
40 | import os |
41 | import subprocess |
42 | import sys |
43 | @@ -6,6 +6,7 @@ |
44 | import shlex |
45 | import shutil |
46 | import inspect |
47 | +import time |
48 | |
49 | from hdputils import install_base_pkg, updateHDPDirectoryScript, config_all_nodes, \ |
50 | setHadoopEnvVar, home, hdpScript, configureJAVA, config_all_nodes |
51 | @@ -13,7 +14,12 @@ |
52 | |
53 | from charmhelpers.lib.utils import config_get, get_unit_hostname |
54 | from shutil import rmtree, copyfile |
55 | +<<<<<<< TREE |
56 | from charmhelpers.core.hookenv import log, Hooks, relation_get, relation_set, unit_get, open_port |
57 | +======= |
58 | +from socket import gethostname |
59 | +from charmhelpers.core.hookenv import log, Hooks, relation_get, relation_set, unit_get, open_port, local_unit, related_units |
60 | +>>>>>>> MERGE-SOURCE |
61 | from charmhelpers.core.host import service_start, service_stop, add_user_to_group |
62 | from time import sleep |
63 | |
64 | @@ -144,9 +150,17 @@ |
65 | def callHDFS_fs(command): |
66 | cmd = shlex.split("su hdfs -c '/usr/lib/hadoop/bin/hadoop fs {}'".format(command)) |
67 | subprocess.call(cmd) |
68 | +<<<<<<< TREE |
69 | |
70 | def startJobHistory(): |
71 | log("==> startJobHistory") |
72 | +======= |
73 | +########################### |
74 | +# Start Job History server |
75 | +########################### |
76 | +def start_jh(): |
77 | + log("==> start_jh") |
78 | +>>>>>>> MERGE-SOURCE |
79 | path = os.path.join(os.path.sep, 'usr', 'lib', 'hadoop-yarn', 'bin', 'container-executor') |
80 | chownRecursive(path, 'root', 'hadoop') |
81 | os.chmod(path, 650) |
82 | @@ -163,45 +177,96 @@ |
83 | cmd = shlex.split("su {} -c '/usr/lib/hadoop-mapreduce/sbin/mr-jobhistory-daemon.sh --config {} start historyserver'".\ |
84 | format(os.environ['MAPRED_USER'], hadoopConfDir)) |
85 | subprocess.call(cmd) |
86 | +<<<<<<< TREE |
87 | |
88 | def stopJobHistory(): |
89 | +======= |
90 | + |
91 | +########################### |
92 | +# Stop Job History server |
93 | +########################### |
94 | +def stop_jh(): |
95 | +>>>>>>> MERGE-SOURCE |
96 | hadoopConfDir = os.environ["HADOOP_CONF_DIR"] |
97 | os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec" |
98 | - cmd = shlex.split("su {} -c '/usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config {} stop resourcemanager'".\ |
99 | + cmd = shlex.split("su {} -c '/usr/lib/hadoop-mapreduce/sbin/mr-jobhistory-daemon.sh --config {} stop historyserver'".\ |
100 | format(os.environ['MAPRED_USER'], hadoopConfDir)) |
101 | subprocess.call(cmd) |
102 | +<<<<<<< TREE |
103 | |
104 | # candidate for BD charm helper |
105 | |
106 | def start_namenode(hdfsUser): |
107 | +======= |
108 | + |
109 | +############################################################################################ |
110 | +# restart Job History server - Must be done everytime yarn-site.xml and |
111 | +# mpared-site.xml are modfied |
112 | +############################################################################################ |
113 | +def restart_jh(): |
114 | + if is_jvm_service_active("JobHistoryServer"): |
115 | + stop_jh() |
116 | + start_jh() |
117 | + |
118 | +########################## |
119 | +# Start NameNode server |
120 | +########################### |
121 | +def start_nn(hdfsUser): |
122 | +>>>>>>> MERGE-SOURCE |
123 | log("==> start namenode for user={}".format(hdfsUser), "INFO") |
124 | hadoopConfDir = os.environ["HADOOP_CONF_DIR"] |
125 | cmd = shlex.split("su {} -c '/usr/lib/hadoop/sbin/hadoop-daemon.sh --config {} start namenode'".\ |
126 | format(hdfsUser, hadoopConfDir)) |
127 | +<<<<<<< TREE |
128 | subprocess.check_call(cmd) |
129 | |
130 | def stop_namenode(hdfsUser): |
131 | log("==> start namenode for user={}".format(hdfsUser), "INFO") |
132 | +======= |
133 | + subprocess.check_call(cmd) |
134 | + |
135 | +########################### |
136 | +# Stop Name Node server |
137 | +########################### |
138 | +def stop_nn(hdfsUser): |
139 | + log("==> stop namenode for user={}".format(hdfsUser), "INFO") |
140 | +>>>>>>> MERGE-SOURCE |
141 | hadoopConfDir = os.environ["HADOOP_CONF_DIR"] |
142 | cmd = shlex.split("su {} -c '/usr/lib/hadoop/sbin/hadoop-daemon.sh --config {} stop namenode'".\ |
143 | format(hdfsUser, hadoopConfDir)) |
144 | subprocess.call(cmd) |
145 | -# candidate for BD charm helper |
146 | |
147 | -def start_datanode(hdfsUser): |
148 | - log("==> start namenode for user={}".format(hdfsUser), "INFO") |
149 | +########################### |
150 | +# Start Data Node |
151 | +########################### |
152 | +def start_dn(hdfsUser): |
153 | + log("==> start datanode for user={}".format(hdfsUser), "INFO") |
154 | hadoopConfDir = os.environ["HADOOP_CONF_DIR"] |
155 | cmd = shlex.split("su {} -c '/usr/lib/hadoop/sbin/hadoop-daemon.sh --config {} start datanode'".\ |
156 | format(hdfsUser, hadoopConfDir)) |
157 | +<<<<<<< TREE |
158 | subprocess.check_call(cmd) |
159 | |
160 | def stop_datanode(hdfsUser): |
161 | log("==> start namenode for user={}".format(hdfsUser), "INFO") |
162 | +======= |
163 | + subprocess.check_call(cmd) |
164 | + |
165 | +########################### |
166 | +# Stop Data Node |
167 | +########################### |
168 | +def stop_dn(hdfsUser): |
169 | + log("==> stop datanode for user={}".format(hdfsUser), "INFO") |
170 | +>>>>>>> MERGE-SOURCE |
171 | hadoopConfDir = os.environ["HADOOP_CONF_DIR"] |
172 | cmd = shlex.split("su {} -c '/usr/lib/hadoop/sbin/hadoop-daemon.sh --config {} stop datanode'".\ |
173 | format(hdfsUser, hadoopConfDir)) |
174 | subprocess.call(cmd) |
175 | # candidate for BD charm helper |
176 | + |
177 | +############################################# |
178 | +# Configure YARN-SITE.XML and MAPRED-SITE.XML |
179 | +############################################# |
180 | def configureYarn(RMhostname): |
181 | yarnConfPath = os.path.join(os.path.sep, os.environ['HADOOP_CONF_DIR'],"yarn-site.xml") |
182 | mapConfDir = os.path.join(os.path.sep, os.environ['HADOOP_CONF_DIR'],"mapred-site.xml") |
183 | @@ -219,64 +284,136 @@ |
184 | #jobhistory server |
185 | setHadoopConfigXML(mapConfDir, "mapreduce.jobhistory.webapp.address", RMhostname+":19888") |
186 | setHadoopConfigXML(mapConfDir, "mapreduce.jobhistory.address", RMhostname+":10020") |
187 | +<<<<<<< TREE |
188 | |
189 | # candidate for BD charm helper |
190 | def start_RM(yarnUser): |
191 | +======= |
192 | + |
193 | +################################ |
194 | +# Start Resource Manager server |
195 | +################################ |
196 | +def start_rm(yarnUser): |
197 | +>>>>>>> MERGE-SOURCE |
198 | log("==> start resourcemanager", "INFO") |
199 | hadoopConfDir = os.environ["HADOOP_CONF_DIR"] |
200 | os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec" |
201 | cmd = shlex.split("su {} -c '/usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config {} start resourcemanager'".\ |
202 | format(yarnUser, hadoopConfDir)) |
203 | subprocess.call(cmd) |
204 | +<<<<<<< TREE |
205 | |
206 | def stop_RM(yarnUser): |
207 | +======= |
208 | + |
209 | +################################ |
210 | +# Stop Resource Manager server |
211 | +################################ |
212 | +def stop_rm(yarnUser): |
213 | +>>>>>>> MERGE-SOURCE |
214 | log("==> stop resourcemanager", "INFO") |
215 | hadoopConfDir = os.environ["HADOOP_CONF_DIR"] |
216 | os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec" |
217 | cmd = shlex.split("su {} -c '/usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config {} stop resourcemanager'".\ |
218 | format(yarnUser, hadoopConfDir)) |
219 | +<<<<<<< TREE |
220 | subprocess.check_call(cmd) |
221 | # candidate for BD charm helper |
222 | def start_NM(yarnUser): |
223 | +======= |
224 | + subprocess.check_call(cmd) |
225 | + |
226 | +################################################ |
227 | +# Start Node Manager daemon on each compute node |
228 | +################################################ |
229 | +def start_nm(yarnUser): |
230 | +>>>>>>> MERGE-SOURCE |
231 | log("==> start nodemanager", "INFO") |
232 | hadoopConfDir = os.environ["HADOOP_CONF_DIR"] |
233 | os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec" |
234 | cmd = shlex.split("su {} -c '/usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config {} start nodemanager'".\ |
235 | format(yarnUser, hadoopConfDir)) |
236 | +<<<<<<< TREE |
237 | subprocess.call(cmd) |
238 | |
239 | def stop_NM(yarnUser): |
240 | +======= |
241 | + subprocess.check_call(cmd) |
242 | + |
243 | +################################################ |
244 | +# Stop Node Manager daemon on each compute node |
245 | +################################################ |
246 | +def stop_nm(yarnUser): |
247 | +>>>>>>> MERGE-SOURCE |
248 | log("==> stop nodemanager", "INFO") |
249 | hadoopConfDir = os.environ["HADOOP_CONF_DIR"] |
250 | os.environ["HADOOP_LIBEXEC_DIR"]="/usr/lib/hadoop/libexec" |
251 | cmd = shlex.split("su {} -c '/usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config {} stop nodemanager'".\ |
252 | format(yarnUser, hadoopConfDir)) |
253 | subprocess.call(cmd) |
254 | - |
255 | +<<<<<<< TREE |
256 | + |
257 | +======= |
258 | + |
259 | +################################################ |
260 | +# Stop all running hadoop services |
261 | +# NOTE: Order is important - DO NOT CHENAGE |
262 | +################################################ |
263 | +>>>>>>> MERGE-SOURCE |
264 | def stop_hadoop_services(): |
265 | if is_jvm_service_active("ResourceManager"): |
266 | - stop_RM(os.environ['YARN_USER']) |
267 | + stop_rm(os.environ['YARN_USER']) |
268 | if is_jvm_service_active("NodeManager"): |
269 | - stop_NM(os.environ['YARN_USER']) |
270 | + stop_nm(os.environ['YARN_USER']) |
271 | if is_jvm_service_active("NameNode"): |
272 | - stop_namenode(os.environ['HDFS_USER']) |
273 | + stop_nn(os.environ['HDFS_USER']) |
274 | if is_jvm_service_active("DataNode"): |
275 | +<<<<<<< TREE |
276 | stop_datanode(os.environ['HDFS_USER']) |
277 | |
278 | +======= |
279 | + stop_dn(os.environ['HDFS_USER']) |
280 | + if is_jvm_service_active("JobHistoryServer"): |
281 | + stop_jh() |
282 | + |
283 | +################################################ |
284 | +# restart all running hadoop services |
285 | +# NOTE: Order is important - DO NOT CHENAGE |
286 | +################################################ |
287 | +>>>>>>> MERGE-SOURCE |
288 | def restart_hadoop_services(): |
289 | + if is_jvm_service_active("NameNode"): |
290 | + stop_nn(os.environ['HDFS_USER']) |
291 | + start_nn(os.environ['HDFS_USER']) |
292 | if is_jvm_service_active("ResourceManager"): |
293 | - stop_RM(os.environ['YARN_USER']) |
294 | - start_RM(os.environ['YARN_USER']) |
295 | + stop_rm(os.environ['YARN_USER']) |
296 | + start_rm(os.environ['YARN_USER']) |
297 | if is_jvm_service_active("NodeManager"): |
298 | - stop_NM(os.environ['YARN_USER']) |
299 | - start_NM(os.environ['YARN_USER']) |
300 | - if is_jvm_service_active("NameNode"): |
301 | - stop_namenode(os.environ['HDFS_USER']) |
302 | - start_namenode(os.environ['HDFS_USER']) |
303 | + stop_nm(os.environ['YARN_USER']) |
304 | + start_nm(os.environ['YARN_USER']) |
305 | if is_jvm_service_active("DataNode"): |
306 | +<<<<<<< TREE |
307 | stop_datanode(os.environ['HDFS_USER']) |
308 | start_datanode(os.environ['HDFS_USER']) |
309 | |
310 | +======= |
311 | + stop_dn(os.environ['HDFS_USER']) |
312 | + start_dn(os.environ['HDFS_USER']) |
313 | + restart_jh() |
314 | + |
315 | +def wait_for_hadoop_service(service): |
316 | + ticks = time.time() |
317 | + while True: |
318 | + if (time.time() - ticks) > 200: |
319 | + log("Error ==> Reached timeout value for hadoop service {}..".format(service), "ERROR") |
320 | + sys.exit(1) |
321 | + if not is_jvm_service_active(service): |
322 | + time.sleep(2) |
323 | + log("Waiting.. ==> {} not ready..".format(service),"INFO") |
324 | + continue |
325 | + break |
326 | + |
327 | +>>>>>>> MERGE-SOURCE |
328 | def configureHDFS(hostname): |
329 | hdfsConfPath = os.path.join(os.path.sep, os.environ['HADOOP_CONF_DIR'],'hdfs-site.xml') |
330 | coreConfPath = os.path.join(os.path.sep, os.environ['HADOOP_CONF_DIR'],'core-site.xml') |
331 | @@ -300,8 +437,12 @@ |
332 | tarfilenamePre="hdp_manual_install_rpm_helper_files-2.1.1.385" |
333 | bashrc = os.path.join(os.path.sep, home, '.bashrc') |
334 | hadoopMemoryOptimizationData = os.path.join(os.path.sep, hdpScriptPath, "hdpMemOpt.txt"); |
335 | +<<<<<<< TREE |
336 | hosts_path = os.path.join(os.path.sep, 'etc', 'hosts') |
337 | nameNodeReady = False |
338 | +======= |
339 | +hosts_path = os.path.join(os.path.sep, 'etc', 'hosts') |
340 | +>>>>>>> MERGE-SOURCE |
341 | resourceManagerReady = False |
342 | ########################################################################################## |
343 | |
344 | @@ -330,8 +471,14 @@ |
345 | 'hadoop-lzo'] |
346 | install_base_pkg(packages) |
347 | config_hadoop_nodes() |
348 | +<<<<<<< TREE |
349 | fileSetKV(hosts_path, unit_get('private-address')+' ', get_unit_hostname()) |
350 | |
351 | +======= |
352 | + fileSetKV(hosts_path, unit_get('private-address')+' ', get_unit_hostname()) |
353 | + |
354 | + |
355 | +>>>>>>> MERGE-SOURCE |
356 | @hooks.hook('resourcemanager-relation-joined') |
357 | def resourcemanager_relation_joined(): |
358 | log ("==> resourcemanager-relation-joined","INFO") |
359 | @@ -339,12 +486,22 @@ |
360 | relation_set(resourceManagerReady=True) |
361 | relation_set(resourceManager_hostname=get_unit_hostname()) |
362 | return |
363 | + if not is_jvm_service_active("NameNode"): |
364 | + sys.exit(0) |
365 | setHadoopEnvVar() |
366 | +<<<<<<< TREE |
367 | relation_set(resourceManager_ip=unit_get('private-address')) |
368 | relation_set(resourceManager_hostname=get_unit_hostname()) |
369 | configureYarn(unit_get('private-address')) |
370 | start_RM(os.environ["YARN_USER"]) |
371 | startJobHistory() |
372 | +======= |
373 | + relation_set(resourceManager_ip=unit_get('private-address')) |
374 | + relation_set(resourceManager_hostname=get_unit_hostname()) |
375 | + configureYarn(unit_get('private-address')) |
376 | + start_rm(os.environ["YARN_USER"]) |
377 | + start_jh() |
378 | +>>>>>>> MERGE-SOURCE |
379 | open_port(8025) |
380 | open_port(8030) |
381 | open_port(8050) |
382 | @@ -362,11 +519,21 @@ |
383 | if not resourceManagerReady: |
384 | sys.exit(0) |
385 | setHadoopEnvVar() |
386 | +<<<<<<< TREE |
387 | rm_ip = relation_get('private-address') |
388 | configureYarn(rm_ip) |
389 | fileSetKV(hosts_path, rm_ip+' ', relation_get('resourceManager_hostname')) |
390 | start_NM(os.environ["YARN_USER"]) |
391 | |
392 | +======= |
393 | + rm_ip = relation_get('private-address') |
394 | + configureYarn(rm_ip) |
395 | + fileSetKV(hosts_path, rm_ip+' ', relation_get('resourceManager_hostname')) |
396 | + # nodemanager requires data node daemon |
397 | + if not is_jvm_service_active("DataNode"): |
398 | + start_dn(os.environ['HDFS_USER']) |
399 | + start_nm(os.environ["YARN_USER"]) |
400 | +>>>>>>> MERGE-SOURCE |
401 | open_port(8025) |
402 | open_port(8030) |
403 | open_port(8050) |
404 | @@ -374,8 +541,14 @@ |
405 | open_port(19888) |
406 | open_port(8088) |
407 | open_port(10020) |
408 | +<<<<<<< TREE |
409 | relation_set(nodemanager_hostname=get_unit_hostname()) |
410 | |
411 | +======= |
412 | + relation_set(nodemanager_hostname=get_unit_hostname()) |
413 | + |
414 | + |
415 | +>>>>>>> MERGE-SOURCE |
416 | @hooks.hook('resourcemanager-relation-broken') |
417 | def resourcemanager_relation_broken(): |
418 | log ("Configuring resourcemanager - broken phase","INFO") |
419 | @@ -384,11 +557,19 @@ |
420 | @hooks.hook('resourcemanager-relation-changed') |
421 | def resourcemanager_relation_changed(): |
422 | log ("Configuring resourcemanager - changed phase","INFO") |
423 | +<<<<<<< TREE |
424 | nm_host = relation_get('nodemanager_hostname') |
425 | if nm_host == None: |
426 | sys.exit(0) |
427 | setHadoopEnvVar() |
428 | fileSetKV(hosts_path, relation_get('private-address')+' ', nm_host) |
429 | +======= |
430 | + nm_host = relation_get('nodemanager_hostname') |
431 | + if nm_host == None: |
432 | + sys.exit(0) |
433 | + setHadoopEnvVar() |
434 | + fileSetKV(hosts_path, relation_get('private-address')+' ', nm_host) |
435 | +>>>>>>> MERGE-SOURCE |
436 | |
437 | @hooks.hook('namenode-relation-joined') |
438 | def namenode_relation_joined(): |
439 | @@ -403,14 +584,22 @@ |
440 | relation_set(namenode_hostname=get_unit_hostname()) |
441 | configureHDFS(unit_get('private-address')) |
442 | format_namenode(os.environ["HDFS_USER"]) |
443 | +<<<<<<< TREE |
444 | start_namenode(os.environ["HDFS_USER"]) |
445 | |
446 | +======= |
447 | + start_nn(os.environ["HDFS_USER"]) |
448 | + start_jh() |
449 | +>>>>>>> MERGE-SOURCE |
450 | open_port(8020) |
451 | open_port(8010) |
452 | open_port(50070) |
453 | open_port(50075) |
454 | open_port(8480) |
455 | open_port(50470) |
456 | + if not is_jvm_service_active("NameNode"): |
457 | + log("error ==> NameNode failed to start") |
458 | + sys.exit(1) |
459 | sleep(5) |
460 | relation_set(nameNodeReady=True) |
461 | |
462 | @@ -423,20 +612,36 @@ |
463 | if not nameNodeReady: |
464 | sys.exit(0) |
465 | setHadoopEnvVar() |
466 | +<<<<<<< TREE |
467 | nodeType="namenode" |
468 | namenode_hostname = relation_get("namenode_hostname") |
469 | namenode_ip = relation_get('private-address') |
470 | fileSetKV(hosts_path, namenode_ip+' ', namenode_hostname) |
471 | configureHDFS(namenode_hostname) |
472 | +======= |
473 | + nn_hostname = relation_get("namenode_hostname") |
474 | + nn_ip = relation_get('private-address') |
475 | + fileSetKV(hosts_path, nn_ip+' ', nn_hostname) |
476 | + configureHDFS(nn_ip) |
477 | +>>>>>>> MERGE-SOURCE |
478 | setDirPermission(os.environ['DFS_DATA_DIR'], os.environ['HDFS_USER'], os.environ['HADOOP_GROUP'], 0750) |
479 | - start_datanode(os.environ["HDFS_USER"]) |
480 | + start_dn(os.environ["HDFS_USER"]) |
481 | + if not is_jvm_service_active("DataNode"): |
482 | + log("error ==> DataNode failed to start") |
483 | + sys.exit(1) |
484 | open_port(8010) |
485 | open_port(8480) |
486 | open_port(50010) |
487 | open_port(50075) |
488 | - relation_set(dn_hostname = get_unit_hostname()) |
489 | - |
490 | - |
491 | +<<<<<<< TREE |
492 | + relation_set(dn_hostname = get_unit_hostname()) |
493 | + |
494 | + |
495 | +======= |
496 | + relation_set(dn_hostname = get_unit_hostname()) |
497 | + |
498 | + |
499 | +>>>>>>> MERGE-SOURCE |
500 | @hooks.hook('config-changed') |
501 | def config_changed(): |
502 | log( "config-changed called", "INFO") |
503 | @@ -448,11 +653,19 @@ |
504 | |
505 | @hooks.hook('namenode-relation-changed') |
506 | def namenode_relation_changed(): |
507 | +<<<<<<< TREE |
508 | dn_host = relation_get('dn-hostname') |
509 | if dn_host == None: |
510 | sys.exit(0) |
511 | log("Configuring namenode - changed phase", "INFO") |
512 | fileSetKV(hosts_path, relation_get('private-address')+' ', dn_host) |
513 | +======= |
514 | + dn_host = relation_get('dn_hostname') |
515 | + if dn_host == None: |
516 | + sys.exit(0) |
517 | + log("Configuring namenode - changed phase", "INFO") |
518 | + fileSetKV(hosts_path, relation_get('private-address')+' ', dn_host) |
519 | +>>>>>>> MERGE-SOURCE |
520 | |
521 | @hooks.hook('start') |
522 | def start(): |
523 | @@ -463,9 +676,39 @@ |
524 | log( "stop called", "INFO") |
525 | setHadoopEnvVar() |
526 | stop_hadoop_services() |
527 | - |
528 | - |
529 | - |
530 | +<<<<<<< TREE |
531 | + |
532 | + |
533 | + |
534 | +======= |
535 | + |
536 | +@hooks.hook('compute-nodes-relation-changed') |
537 | +def compute_nodes_relation_changed(): |
538 | + log("==> Configuring compute-nodes, local ID = {} - Changed phase".format(local_unit()),"INFO") |
539 | + nodeList = related_units() |
540 | + for n in nodeList: |
541 | + dn_ip = relation_get('private-address', unit=n) |
542 | + hostname = relation_get('hostname', unit=n) |
543 | + log("==> Configuring compute-nodes {}={}".format(dn_ip, hostname),"INFO") |
544 | + if hostname != None: |
545 | + fileSetKV(hosts_path, dn_ip+' ', hostname) |
546 | + else: |
547 | + sys.exit(0) |
548 | + |
549 | +@hooks.hook('compute-nodes-relation-joined') |
550 | +def compute_nodes_relation_joined(): |
551 | + log("==> compute_nodes_relation_joined {}".format(get_unit_hostname()),"INFO") |
552 | + relation_set(hostname=get_unit_hostname()) |
553 | + |
554 | + |
555 | +@hooks.hook('hadoop-nodes-relation-joined') |
556 | +def hadoop_nodes_relation_joined(): |
557 | + log("==> hadoop_nodes_relation_joined {}".format(get_unit_hostname()),"INFO") |
558 | + relation_set(hostname=get_unit_hostname()) |
559 | + |
560 | + |
561 | + |
562 | +>>>>>>> MERGE-SOURCE |
563 | |
564 | if __name__ == "__main__": |
565 | hooks.execute(sys.argv) |
566 | |
567 | === modified file 'hooks/hdputils.py' |
568 | --- hooks/hdputils.py 2014-08-11 03:35:08 +0000 |
569 | +++ hooks/hdputils.py 2014-11-08 05:32:58 +0000 |
570 | @@ -22,6 +22,8 @@ |
571 | subprocess.call(cmd) |
572 | apt_update() |
573 | apt_install(packages) |
574 | + if not os.path.isdir(os.path.join(os.path.sep,'usr','lib', 'hadoop')): |
575 | + log("Error, apt-get install Hadoop failed", "ERROR") |
576 | os.chdir(home); |
577 | wgetPkg("http://public-repo-1.hortonworks.com/HDP/tools/2.1.1.0/hdp_manual_install_rpm_helper_files-2.1.1.385.tar.gz","") |
578 | if tarfile.is_tarfile(tarfilename): |
579 | |
580 | === modified file 'metadata.yaml' |
581 | --- metadata.yaml 2014-07-23 12:16:51 +0000 |
582 | +++ metadata.yaml 2014-11-08 05:32:58 +0000 |
583 | @@ -4,22 +4,19 @@ |
584 | description: | |
585 | Hadoop is a software platform that lets one easily write and |
586 | run applications that process vast amounts of data. |
587 | -categories: ["applications"] |
588 | +tags: ["applications"] |
589 | provides: |
590 | namenode: |
591 | interface: dfs |
592 | resourcemanager: |
593 | interface: mapred |
594 | - ganglia: |
595 | - interface: monitor |
596 | + hadoop-nodes: |
597 | + interface: mapred |
598 | requires: |
599 | datanode: |
600 | interface: dfs |
601 | - secondarynamenode: |
602 | - interface: dfs |
603 | nodemanager: |
604 | interface: mapred |
605 | - mapred-namenode: |
606 | - interface: dfs |
607 | - elasticsearch: |
608 | - interface: elasticsearch |
609 | +peers: |
610 | + compute-nodes: |
611 | + interface: mapred |
612 | |
613 | === modified file 'tests/01-hadoop-cluster-deployment-1.py' |
614 | --- tests/01-hadoop-cluster-deployment-1.py 2014-09-17 15:27:28 +0000 |
615 | +++ tests/01-hadoop-cluster-deployment-1.py 2014-11-08 05:32:58 +0000 |
616 | @@ -6,12 +6,12 @@ |
617 | class TestDeployment(object): |
618 | def __init__(self): |
619 | self.d = amulet.Deployment(series='trusty') |
620 | - bpath = os.path.join(os.path.dirname( __file__), "../hadoop_cluster.yaml") |
621 | + bpath = os.path.join(os.path.dirname( __file__), "hadoop_cluster.yaml") |
622 | f = open(bpath) |
623 | bun = f.read() |
624 | self.d.load(yaml.safe_load(bun)) |
625 | try: |
626 | - self.d.setup(timeout=9000) |
627 | + self.d.setup(timeout=900000) |
628 | self.d.sentry.wait() |
629 | except amulet.helpers.TimeoutError: |
630 | amulet.raise_status(amulet.SKIP, msg="Environment wasn't stood up in time") |
631 | |
632 | === added file 'tests/hadoop_cluster.yaml' |
633 | --- tests/hadoop_cluster.yaml 1970-01-01 00:00:00 +0000 |
634 | +++ tests/hadoop_cluster.yaml 2014-11-08 05:32:58 +0000 |
635 | @@ -0,0 +1,20 @@ |
636 | +hdp-hadoop-cluster: |
637 | + services: |
638 | + "compute-node": |
639 | + charm: "cs:~asanjar/trusty/hdp-hadoop" |
640 | + num_units: 4 |
641 | + annotations: |
642 | + "gui-x": "525.7681500094167" |
643 | + "gui-y": "608.4847070634866" |
644 | + "yarn-hdfs-master": |
645 | + charm: "cs:~asanjar/trusty/hdp-hadoop" |
646 | + num_units: 1 |
647 | + annotations: |
648 | + "gui-x": "532" |
649 | + "gui-y": "236.51529293651342" |
650 | + relations: |
651 | + - - "yarn-hdfs-master:namenode" |
652 | + - "compute-node:datanode" |
653 | + - - "yarn-hdfs-master:resourcemanager" |
654 | + - "compute-node:nodemanager" |
655 | + series: trusty |
Amir - this merge has some great content however its not going to cleanly apply over the existing charm store branch.
I've got a fixup that took your changes and applied them over the charm store series - minus the whitespace changes and pushed it to bigdata-dev and will propose for bigdata-charmers.
https:/ /code.launchpad .net/~bigdata- dev/charms/ trusty/ hdp-hadoop/ trunk
Make sure you backport these changes into your personal namespace when doing future dev, otherwise you will have this problem again.
Thanks for the contribution!