Merge lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/ganglia_metrics into lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk

Proposed by Andrew McLeod
Status: Merged
Merged at revision: 101
Proposed branch: lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/ganglia_metrics
Merge into: lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk
Diff against target: 266 lines (+188/-1) (has conflicts)
8 files modified
DEV-README.md (+5/-0)
config.yaml (+4/-0)
hooks/callbacks.py (+42/-1)
hooks/common.py (+14/-0)
hooks/ganglia-relation-broken (+26/-0)
hooks/ganglia-relation-changed (+26/-0)
metadata.yaml (+2/-0)
templates/hadoop-metrics2.properties.j2 (+69/-0)
Text conflict in DEV-README.md
To merge this branch: bzr merge lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/ganglia_metrics
Reviewer Review Type Date Requested Status
Juju Big Data Development Pending
Review via email: mp+268539@code.launchpad.net

Description of the change

Added capability to send ganglia metrics is relationship exists

To post a comment you must log in.
89. By Andrew McLeod

added ganglia_metrics config value, default false

90. By Andrew McLeod

there is no restart-computeslave action, changed this to a log action with a comment

Revision history for this message
Cory Johns (johnsca) wrote :

See inline comment.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'DEV-README.md'
2--- DEV-README.md 2015-08-20 21:52:26 +0000
3+++ DEV-README.md 2015-08-24 15:15:23 +0000
4@@ -49,8 +49,13 @@
5
6 ## Manual Deployment
7
8+<<<<<<< TREE
9 The easiest way to deploy an Apache Hadoop platform is to use one of
10 the [apache bundles](https://jujucharms.com/u/bigdata-charmers/#bundles).
11+=======
12+The easiest way to deploy the core Apache Hadoop platform is to use one of
13+the [apache bundles](https://jujucharms.com/u/bigdata-charmers/#bundles).
14+>>>>>>> MERGE-SOURCE
15 However, to manually deploy the base Apache Hadoop platform without using one
16 of the bundles, you can use the following:
17
18
19=== modified file 'config.yaml'
20--- config.yaml 2015-04-03 16:49:16 +0000
21+++ config.yaml 2015-08-24 15:15:23 +0000
22@@ -4,3 +4,7 @@
23 default: ''
24 description: |
25 URL from which to fetch resources (e.g., Hadoop binaries) instead of Launchpad.
26+ ganglia_metrics:
27+ type: boolean
28+ default: false
29+ description: if relationship exists to ganglia:master, enable ganglia metrics
30
31=== modified file 'hooks/callbacks.py'
32--- hooks/callbacks.py 2015-08-13 21:19:50 +0000
33+++ hooks/callbacks.py 2015-08-24 15:15:23 +0000
34@@ -18,7 +18,10 @@
35
36 from charmhelpers.core import hookenv
37 from charmhelpers.core import unitdata
38-from jujubigdata.relations import NameNodeMaster, ResourceManagerMaster
39+from jujubigdata.relations import NameNodeMaster, ResourceManagerMaster, Ganglia
40+from charmhelpers.core.templating import render
41+from functools import partial
42+from subprocess import check_call
43
44
45 def update_blocked_status():
46@@ -58,6 +61,16 @@
47 ))
48
49
50+def update_working_status():
51+ if unitdata.kv().get('charm.active', False):
52+ hookenv.status_set('maintenance', 'Updating configuration')
53+ return
54+ yarn_connected = ResourceManagerMaster().connected_units()
55+ hookenv.status_set('maintenance', 'Setting up DataNode%s' % (
56+ ' and NodeManager' if yarn_connected else '',
57+ ))
58+
59+
60 def update_active_status():
61 hdfs_ready = NameNodeMaster().is_ready()
62 yarn_connected = ResourceManagerMaster().connected_units()
63@@ -74,3 +87,31 @@
64
65 def clear_active_flag():
66 unitdata.kv().set('charm.active', False)
67+
68+
69+def conf_ganglia_metrics(purgeConf=False):
70+ """
71+ Send hadoop specific metrics to a ganglia server
72+ """
73+ config = hookenv.config()
74+ ganglia_metrics = config['ganglia_metrics'] and not purgeConf
75+ ganglia_metrics_changed = ganglia_metrics != unitdata.kv().get('ganglia_metrics', False)
76+ unitdata.kv().set('ganglia_metrics', ganglia_metrics)
77+ comment = '#' if not ganglia_metrics else ''
78+ ganglia_host = 'UNSET_BY_JUJU' if not ganglia_metrics else Ganglia().host()
79+ ganglia_sink_str = comment + '*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31'
80+ hookenv.log("Configuring ganglia sink in /etc/hadoop/conf/hadoop-metrics2.properties", level=None)
81+ render(
82+ source='hadoop-metrics2.properties.j2',
83+ target='/etc/hadoop/conf/hadoop-metrics2.properties',
84+ context={
85+ 'ganglia_host': ganglia_host,
86+ 'ganglia_sink_str': ganglia_sink_str,
87+ },
88+ ),
89+ if ganglia_metrics_changed:
90+ #check_call(['actions/restart-hdfs'])
91+ # IMPLEMENT RESTART COMPUTE SLAVE?
92+ hookenv.log("please manually restart compute slave hadoop components", level=None)
93+
94+purge_ganglia_metrics = partial(conf_ganglia_metrics, purgeConf=True)
95
96=== modified file 'hooks/common.py'
97--- hooks/common.py 2015-08-18 15:18:07 +0000
98+++ hooks/common.py 2015-08-24 15:15:23 +0000
99@@ -128,6 +128,20 @@
100 callbacks.update_active_status, # might still be active if HDFS-only
101 ],
102 },
103+ {
104+ 'name': 'ganglia',
105+ 'requires': [
106+ hadoop.is_installed,
107+ jujubigdata.relations.Ganglia,
108+ ],
109+ 'callbacks': [
110+ callbacks.conf_ganglia_metrics,
111+ ],
112+ 'cleanup': [
113+ callbacks.purge_ganglia_metrics
114+ ],
115+ },
116+
117 ])
118 manager.manage()
119
120
121=== added file 'hooks/ganglia-relation-broken'
122--- hooks/ganglia-relation-broken 1970-01-01 00:00:00 +0000
123+++ hooks/ganglia-relation-broken 2015-08-24 15:15:23 +0000
124@@ -0,0 +1,26 @@
125+#!/usr/bin/env python
126+# Licensed under the Apache License, Version 2.0 (the "License");
127+# you may not use this file except in compliance with the License.
128+# You may obtain a copy of the License at
129+#
130+# http://www.apache.org/licenses/LICENSE-2.0
131+#
132+# Unless required by applicable law or agreed to in writing, software
133+# distributed under the License is distributed on an "AS IS" BASIS,
134+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
135+# See the License for the specific language governing permissions and
136+# limitations under the License.
137+
138+"""
139+All hooks in this charm are managed by the Charm Framework.
140+The framework helps manage dependencies and preconditions to ensure that
141+steps are only executed when they can be successful. As such, no additional
142+code should be added to this hook; instead, please integrate new functionality
143+into the 'callbacks' list in hooks/common.py. New callbacks can be placed
144+in hooks/callbacks.py, if necessary.
145+
146+See http://big-data-charm-helpers.readthedocs.org/en/latest/examples/framework.html
147+for more information.
148+"""
149+import common
150+common.manage()
151
152=== added file 'hooks/ganglia-relation-changed'
153--- hooks/ganglia-relation-changed 1970-01-01 00:00:00 +0000
154+++ hooks/ganglia-relation-changed 2015-08-24 15:15:23 +0000
155@@ -0,0 +1,26 @@
156+#!/usr/bin/env python
157+# Licensed under the Apache License, Version 2.0 (the "License");
158+# you may not use this file except in compliance with the License.
159+# You may obtain a copy of the License at
160+#
161+# http://www.apache.org/licenses/LICENSE-2.0
162+#
163+# Unless required by applicable law or agreed to in writing, software
164+# distributed under the License is distributed on an "AS IS" BASIS,
165+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
166+# See the License for the specific language governing permissions and
167+# limitations under the License.
168+
169+"""
170+All hooks in this charm are managed by the Charm Framework.
171+The framework helps manage dependencies and preconditions to ensure that
172+steps are only executed when they can be successful. As such, no additional
173+code should be added to this hook; instead, please integrate new functionality
174+into the 'callbacks' list in hooks/common.py. New callbacks can be placed
175+in hooks/callbacks.py, if necessary.
176+
177+See http://big-data-charm-helpers.readthedocs.org/en/latest/examples/framework.html
178+for more information.
179+"""
180+import common
181+common.manage()
182
183=== modified file 'metadata.yaml'
184--- metadata.yaml 2015-03-06 22:28:48 +0000
185+++ metadata.yaml 2015-08-24 15:15:23 +0000
186@@ -12,3 +12,5 @@
187 interface: dfs-slave
188 nodemanager:
189 interface: mapred-slave
190+ ganglia:
191+ interface: monitor
192
193=== added directory 'templates'
194=== added file 'templates/hadoop-metrics2.properties.j2'
195--- templates/hadoop-metrics2.properties.j2 1970-01-01 00:00:00 +0000
196+++ templates/hadoop-metrics2.properties.j2 2015-08-24 15:15:23 +0000
197@@ -0,0 +1,69 @@
198+#
199+# Licensed to the Apache Software Foundation (ASF) under one or more
200+# contributor license agreements. See the NOTICE file distributed with
201+# this work for additional information regarding copyright ownership.
202+# The ASF licenses this file to You under the Apache License, Version 2.0
203+# (the "License"); you may not use this file except in compliance with
204+# the License. You may obtain a copy of the License at
205+#
206+# http://www.apache.org/licenses/LICENSE-2.0
207+#
208+# Unless required by applicable law or agreed to in writing, software
209+# distributed under the License is distributed on an "AS IS" BASIS,
210+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
211+# See the License for the specific language governing permissions and
212+# limitations under the License.
213+#
214+
215+# syntax: [prefix].[source|sink].[instance].[options]
216+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
217+
218+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
219+# default sampling period, in seconds
220+*.period=10
221+
222+# Defining sink for Ganglia 3.1
223+{{ ganglia_sink_str }}
224+
225+# Default polling period for GangliaSink
226+*.sink.ganglia.period=10
227+
228+# default for supportsparse is false
229+*.sink.ganglia.supportsparse=true
230+
231+# Directing output to ganglia servers
232+
233+*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
234+*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
235+
236+namenode.sink.ganglia.servers={{ ganglia_host }}:8649
237+datanode.sink.ganglia.servers={{ ganglia_host }}:8649
238+jobtracker.sink.ganglia.servers={{ ganglia_host }}:8649
239+tasktracker.sink.ganglia.servers={{ ganglia_host }}:8649
240+maptask.sink.ganglia.servers={{ ganglia_host }}:8649
241+reducetask.sink.ganglia.servers={{ ganglia_host }}:8649
242+resourcemanager.sink.ganglia.servers={{ ganglia_host }}:8649
243+nodemanager.sink.ganglia.servers={{ ganglia_host }}:8649
244+historyserver.sink.ganglia.servers={{ ganglia_host }}:8649
245+journalnode.sink.ganglia.servers={{ ganglia_host }}:8649
246+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
247+
248+# The namen de-metrics. ut will contain metrics from all context
249+#namenode.sink.file.filename=namenode-metrics.out
250+# Specifying a special sampling period for namenode:
251+#namenode.sink.*.period=8
252+
253+#datanode.sink.file.filename=datanode-metrics.out
254+
255+# the following example split metrics of different
256+# context to different sinks (in this case files)
257+#jobtracker.sink.file_jvm.context=jvm
258+#jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out
259+#jobtracker.sink.file_mapred.context=mapred
260+#jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out
261+
262+#tasktracker.sink.file.filename=tasktracker-metrics.out
263+
264+#maptask.sink.file.filename=maptask-metrics.out
265+
266+#reducetask.sink.file.filename=reducetask-metrics.out

Subscribers

People subscribed via source and target branches

to all changes: