Merge lp:~bigdata-dev/charms/trusty/apache-hadoop-hdfs-secondary/ganglia_metrics into lp:~bigdata-dev/charms/trusty/apache-hadoop-hdfs-secondary/trunk

Proposed by Andrew McLeod on 2015-08-19
Status: Merged
Merged at revision: 79
Proposed branch: lp:~bigdata-dev/charms/trusty/apache-hadoop-hdfs-secondary/ganglia_metrics
Merge into: lp:~bigdata-dev/charms/trusty/apache-hadoop-hdfs-secondary/trunk
Diff against target: 253 lines (+179/-2) (has conflicts)
8 files modified
DEV-README.md (+5/-0)
config.yaml (+4/-0)
hooks/callbacks.py (+32/-2)
hooks/common.py (+15/-0)
hooks/ganglia-relation-broken (+26/-0)
hooks/ganglia-relation-changed (+26/-0)
metadata.yaml (+2/-0)
templates/hadoop-metrics2.properties.j2 (+69/-0)
Text conflict in DEV-README.md
To merge this branch: bzr merge lp:~bigdata-dev/charms/trusty/apache-hadoop-hdfs-secondary/ganglia_metrics
Reviewer Review Type Date Requested Status
Juju Big Data Development 2015-08-19 Pending
Review via email: mp+268537@code.launchpad.net

Description of the change

Added capability to send ganglia metrics is relationship exists

To post a comment you must log in.
72. By Andrew McLeod on 2015-08-24

added ganglia_metrics config value, default false

73. By Andrew McLeod on 2015-08-24

there is no restart-hdfs action, changed this to a log action with a comment

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'DEV-README.md'
2--- DEV-README.md 2015-08-20 21:55:56 +0000
3+++ DEV-README.md 2015-08-24 15:14:14 +0000
4@@ -31,8 +31,13 @@
5
6 ## Manual Deployment
7
8+<<<<<<< TREE
9 The easiest way to deploy an Apache Hadoop platform is to use one of
10 the [apache bundles](https://jujucharms.com/u/bigdata-charmers/#bundles).
11+=======
12+The easiest way to deploy the core Apache Hadoop platform is to use one of
13+the [apache bundles](https://jujucharms.com/u/bigdata-charmers/#bundles).
14+>>>>>>> MERGE-SOURCE
15 However, to manually deploy the base Apache Hadoop platform without using one
16 of the bundles, you can use the following:
17
18
19=== modified file 'config.yaml'
20--- config.yaml 2015-04-03 16:49:16 +0000
21+++ config.yaml 2015-08-24 15:14:14 +0000
22@@ -10,3 +10,7 @@
23 default: ''
24 description: |
25 URL from which to fetch resources (e.g., Hadoop binaries) instead of Launchpad.
26+ ganglia_metrics:
27+ type: boolean
28+ default: false
29+ description: if relationship exists to ganglia:master, enable ganglia metrics
30
31=== modified file 'hooks/callbacks.py'
32--- hooks/callbacks.py 2015-08-10 22:58:48 +0000
33+++ hooks/callbacks.py 2015-08-24 15:14:14 +0000
34@@ -18,8 +18,10 @@
35
36 from charmhelpers.core import hookenv
37 from charmhelpers.core import unitdata
38-from jujubigdata.relations import NameNodeMaster
39-
40+from jujubigdata.relations import NameNodeMaster, Ganglia
41+from charmhelpers.core.templating import render
42+from functools import partial
43+from subprocess import check_call
44
45 def update_blocked_status():
46 if unitdata.kv().get('charm.active', False):
47@@ -42,5 +44,33 @@
48 hookenv.status_set('active', 'Ready')
49
50
51+def conf_ganglia_metrics(purgeConf=False):
52+ """
53+ Send hadoop specific metrics to a ganglia server
54+ """
55+ config = hookenv.config()
56+ ganglia_metrics = config['ganglia_metrics'] and not purgeConf
57+ ganglia_metrics_changed = ganglia_metrics != unitdata.kv().get('ganglia_metrics', False)
58+ unitdata.kv().set('ganglia_metrics', ganglia_metrics)
59+ comment = '#' if not ganglia_metrics else ''
60+ ganglia_host = 'UNSET_BY_JUJU' if not ganglia_metrics else Ganglia().host()
61+ ganglia_sink_str = comment + '*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31'
62+ hookenv.log("Configuring ganglia sink in /etc/hadoop/conf/hadoop-metrics2.properties", level=None)
63+ render(
64+ source='hadoop-metrics2.properties.j2',
65+ target='/etc/hadoop/conf/hadoop-metrics2.properties',
66+ context={
67+ 'ganglia_host': ganglia_host,
68+ 'ganglia_sink_str': ganglia_sink_str,
69+ },
70+ ),
71+ if ganglia_metrics_changed:
72+ #check_call(['actions/restart-hdfs'])
73+ # RESTART-HDFS TO BE IMPLEMENTED
74+ hookenv.log("please restart hdfs manually", level=None)
75+
76+purge_ganglia_metrics = partial(conf_ganglia_metrics, purgeConf=True)
77+
78+
79 def clear_active_flag():
80 unitdata.kv().set('charm.active', False)
81
82=== modified file 'hooks/common.py'
83--- hooks/common.py 2015-08-10 22:58:48 +0000
84+++ hooks/common.py 2015-08-24 15:14:14 +0000
85@@ -94,6 +94,21 @@
86 callbacks.update_blocked_status,
87 ],
88 },
89+ {
90+ 'name': 'ganglia',
91+ 'requires': [
92+ hadoop.is_installed,
93+ jujubigdata.relations.Ganglia,
94+ ],
95+ 'callbacks': [
96+ callbacks.conf_ganglia_metrics,
97+ ],
98+ 'cleanup': [
99+ callbacks.purge_ganglia_metrics
100+ ],
101+
102+ },
103+
104 ])
105 manager.manage()
106
107
108=== added file 'hooks/ganglia-relation-broken'
109--- hooks/ganglia-relation-broken 1970-01-01 00:00:00 +0000
110+++ hooks/ganglia-relation-broken 2015-08-24 15:14:14 +0000
111@@ -0,0 +1,26 @@
112+#!/usr/bin/env python
113+# Licensed under the Apache License, Version 2.0 (the "License");
114+# you may not use this file except in compliance with the License.
115+# You may obtain a copy of the License at
116+#
117+# http://www.apache.org/licenses/LICENSE-2.0
118+#
119+# Unless required by applicable law or agreed to in writing, software
120+# distributed under the License is distributed on an "AS IS" BASIS,
121+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
122+# See the License for the specific language governing permissions and
123+# limitations under the License.
124+
125+"""
126+All hooks in this charm are managed by the Charm Framework.
127+The framework helps manage dependencies and preconditions to ensure that
128+steps are only executed when they can be successful. As such, no additional
129+code should be added to this hook; instead, please integrate new functionality
130+into the 'callbacks' list in hooks/common.py. New callbacks can be placed
131+in hooks/callbacks.py, if necessary.
132+
133+See http://big-data-charm-helpers.readthedocs.org/en/latest/examples/framework.html
134+for more information.
135+"""
136+import common
137+common.manage()
138
139=== added file 'hooks/ganglia-relation-changed'
140--- hooks/ganglia-relation-changed 1970-01-01 00:00:00 +0000
141+++ hooks/ganglia-relation-changed 2015-08-24 15:14:14 +0000
142@@ -0,0 +1,26 @@
143+#!/usr/bin/env python
144+# Licensed under the Apache License, Version 2.0 (the "License");
145+# you may not use this file except in compliance with the License.
146+# You may obtain a copy of the License at
147+#
148+# http://www.apache.org/licenses/LICENSE-2.0
149+#
150+# Unless required by applicable law or agreed to in writing, software
151+# distributed under the License is distributed on an "AS IS" BASIS,
152+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
153+# See the License for the specific language governing permissions and
154+# limitations under the License.
155+
156+"""
157+All hooks in this charm are managed by the Charm Framework.
158+The framework helps manage dependencies and preconditions to ensure that
159+steps are only executed when they can be successful. As such, no additional
160+code should be added to this hook; instead, please integrate new functionality
161+into the 'callbacks' list in hooks/common.py. New callbacks can be placed
162+in hooks/callbacks.py, if necessary.
163+
164+See http://big-data-charm-helpers.readthedocs.org/en/latest/examples/framework.html
165+for more information.
166+"""
167+import common
168+common.manage()
169
170=== modified file 'metadata.yaml'
171--- metadata.yaml 2015-05-12 21:52:31 +0000
172+++ metadata.yaml 2015-08-24 15:14:14 +0000
173@@ -11,3 +11,5 @@
174 provides:
175 secondary:
176 interface: dfs-secondary
177+ ganglia:
178+ interface: monitor
179
180=== added directory 'templates'
181=== added file 'templates/hadoop-metrics2.properties.j2'
182--- templates/hadoop-metrics2.properties.j2 1970-01-01 00:00:00 +0000
183+++ templates/hadoop-metrics2.properties.j2 2015-08-24 15:14:14 +0000
184@@ -0,0 +1,69 @@
185+#
186+# Licensed to the Apache Software Foundation (ASF) under one or more
187+# contributor license agreements. See the NOTICE file distributed with
188+# this work for additional information regarding copyright ownership.
189+# The ASF licenses this file to You under the Apache License, Version 2.0
190+# (the "License"); you may not use this file except in compliance with
191+# the License. You may obtain a copy of the License at
192+#
193+# http://www.apache.org/licenses/LICENSE-2.0
194+#
195+# Unless required by applicable law or agreed to in writing, software
196+# distributed under the License is distributed on an "AS IS" BASIS,
197+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
198+# See the License for the specific language governing permissions and
199+# limitations under the License.
200+#
201+
202+# syntax: [prefix].[source|sink].[instance].[options]
203+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
204+
205+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
206+# default sampling period, in seconds
207+*.period=10
208+
209+# Defining sink for Ganglia 3.1
210+{{ ganglia_sink_str }}
211+
212+# Default polling period for GangliaSink
213+*.sink.ganglia.period=10
214+
215+# default for supportsparse is false
216+*.sink.ganglia.supportsparse=true
217+
218+# Directing output to ganglia servers
219+
220+*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
221+*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
222+
223+namenode.sink.ganglia.servers={{ ganglia_host }}:8649
224+datanode.sink.ganglia.servers={{ ganglia_host }}:8649
225+jobtracker.sink.ganglia.servers={{ ganglia_host }}:8649
226+tasktracker.sink.ganglia.servers={{ ganglia_host }}:8649
227+maptask.sink.ganglia.servers={{ ganglia_host }}:8649
228+reducetask.sink.ganglia.servers={{ ganglia_host }}:8649
229+resourcemanager.sink.ganglia.servers={{ ganglia_host }}:8649
230+nodemanager.sink.ganglia.servers={{ ganglia_host }}:8649
231+historyserver.sink.ganglia.servers={{ ganglia_host }}:8649
232+journalnode.sink.ganglia.servers={{ ganglia_host }}:8649
233+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
234+
235+# The namen de-metrics. ut will contain metrics from all context
236+#namenode.sink.file.filename=namenode-metrics.out
237+# Specifying a special sampling period for namenode:
238+#namenode.sink.*.period=8
239+
240+#datanode.sink.file.filename=datanode-metrics.out
241+
242+# the following example split metrics of different
243+# context to different sinks (in this case files)
244+#jobtracker.sink.file_jvm.context=jvm
245+#jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out
246+#jobtracker.sink.file_mapred.context=mapred
247+#jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out
248+
249+#tasktracker.sink.file.filename=tasktracker-metrics.out
250+
251+#maptask.sink.file.filename=maptask-metrics.out
252+
253+#reducetask.sink.file.filename=reducetask-metrics.out

Subscribers

People subscribed via source and target branches

to all changes: