Merge lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk into lp:charms/trusty/apache-hadoop-compute-slave

Proposed by Kevin W Monroe
Status: Merged
Merged at revision: 88
Proposed branch: lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk
Merge into: lp:charms/trusty/apache-hadoop-compute-slave
Diff against target: 412 lines (+289/-35) (has conflicts)
9 files modified
README.md (+25/-0)
config.yaml (+10/-0)
hooks/callbacks.py (+106/-30)
hooks/common.py (+23/-3)
hooks/ganglia-relation-broken (+26/-0)
hooks/ganglia-relation-changed (+26/-0)
metadata.yaml (+2/-0)
templates/hadoop-metrics2.properties.j2 (+69/-0)
tests/01-basic-deployment.py (+2/-2)
Text conflict in hooks/callbacks.py
Text conflict in hooks/common.py
To merge this branch: bzr merge lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk
Reviewer Review Type Date Requested Status
Kevin W Monroe Approve
Review via email: mp+271162@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Kevin W Monroe (kwmonroe) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'README.md'
--- README.md 2015-08-24 23:09:31 +0000
+++ README.md 2015-09-15 17:34:38 +0000
@@ -37,6 +37,26 @@
37 juju add-unit compute-slave -n 337 juju add-unit compute-slave -n 3
3838
3939
40## Monitoring
41
42This charm supports monitoring via Ganglia. To enable monitoring, you must
43do **both** of the following (the order does not matter):
44
45 * Add a relation to the [Ganglia charm][] via the `:master` relation
46 * Enable the `ganglia_metrics` config option
47
48You must **also** enable metrics on [yarn-master][] and / or [hdfs-master][]
49to initiate the restart of the NodeManager and / or DataNode components for
50them to begin collecting metrics.
51
52For example:
53
54 juju add-relation compute-slave ganglia:master
55 juju add-relation yarn-master ganglia:master
56 juju set compute-slave ganglia_metrics=true
57 juju set yarn-master ganglia_metrics=true
58
59
40## Deploying in Network-Restricted Environments60## Deploying in Network-Restricted Environments
4161
42The Apache Hadoop charms can be deployed in environments with limited network62The Apache Hadoop charms can be deployed in environments with limited network
@@ -85,3 +105,8 @@
85- [Apache Hadoop bug trackers](http://hadoop.apache.org/issue_tracking.html)105- [Apache Hadoop bug trackers](http://hadoop.apache.org/issue_tracking.html)
86- [Apache Hadoop mailing lists](http://hadoop.apache.org/mailing_lists.html)106- [Apache Hadoop mailing lists](http://hadoop.apache.org/mailing_lists.html)
87- [Apache Hadoop Juju Charm](http://jujucharms.com/?text=hadoop)107- [Apache Hadoop Juju Charm](http://jujucharms.com/?text=hadoop)
108
109
110[Ganglia charm]: http://jujucharms.com/ganglia/
111[yarn-master]: http://jujucharms.com/apache-hadoop-yarn-master/
112[hdfs-master]: http://jujucharms.com/apache-hadoop-hdfs-master/
88113
=== modified file 'config.yaml'
--- config.yaml 2015-04-03 16:49:16 +0000
+++ config.yaml 2015-09-15 17:34:38 +0000
@@ -4,3 +4,13 @@
4 default: ''4 default: ''
5 description: |5 description: |
6 URL from which to fetch resources (e.g., Hadoop binaries) instead of Launchpad.6 URL from which to fetch resources (e.g., Hadoop binaries) instead of Launchpad.
7 ganglia_metrics:
8 type: boolean
9 default: false
10 description: |
11 Enable metrics using Ganglia. Note that enabling this option will
12 have no effect if the service is not related to a ganglia service
13 via the ganglia:master relation. Enabling this option also will
14 *not* restart the DataNode nor NodeManager components, so it will
15 also be necessary to enable metrics on one or more of the hdfs-master
16 or yarn-master services. See the README for more information.
717
=== modified file 'hooks/callbacks.py'
--- hooks/callbacks.py 2015-08-13 21:19:50 +0000
+++ hooks/callbacks.py 2015-09-15 17:34:38 +0000
@@ -18,7 +18,10 @@
1818
19from charmhelpers.core import hookenv19from charmhelpers.core import hookenv
20from charmhelpers.core import unitdata20from charmhelpers.core import unitdata
21from jujubigdata.relations import NameNodeMaster, ResourceManagerMaster21from jujubigdata.relations import NameNodeMaster, ResourceManagerMaster, Ganglia
22from charmhelpers.core.templating import render
23from functools import partial
24from subprocess import check_call
2225
2326
24def update_blocked_status():27def update_blocked_status():
@@ -42,35 +45,108 @@
42 ' and '.join(unready_rels),45 ' and '.join(unready_rels),
43 ' and '.join(unready_ress),46 ' and '.join(unready_ress),
44 ))47 ))
45 elif missing_hosts:48<<<<<<< TREE
46 hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % (49 elif missing_hosts:
47 ' and '.join(missing_hosts),50 hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % (
48 ))51 ' and '.join(missing_hosts),
4952 ))
5053
51def update_working_status():54
52 if unitdata.kv().get('charm.active', False):55def update_working_status():
53 hookenv.status_set('maintenance', 'Updating configuration')56 if unitdata.kv().get('charm.active', False):
54 return57 hookenv.status_set('maintenance', 'Updating configuration')
55 yarn_connected = ResourceManagerMaster().connected_units()58 return
56 hookenv.status_set('maintenance', 'Setting up DataNode%s' % (59 yarn_connected = ResourceManagerMaster().connected_units()
57 ' and NodeManager' if yarn_connected else '',60 hookenv.status_set('maintenance', 'Setting up DataNode%s' % (
58 ))61 ' and NodeManager' if yarn_connected else '',
62 ))
63=======
64 elif missing_hosts:
65 hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % (
66 ' and '.join(missing_hosts),
67 ))
68
69
70def update_working_status():
71 if unitdata.kv().get('charm.active', False):
72 hookenv.status_set('maintenance', 'Updating configuration')
73 return
74 yarn_connected = ResourceManagerMaster().connected_units()
75 hookenv.status_set('maintenance', 'Setting up DataNode%s' % (
76 ' and NodeManager' if yarn_connected else '',
77 ))
78
79
80def update_working_status():
81 if unitdata.kv().get('charm.active', False):
82 hookenv.status_set('maintenance', 'Updating configuration')
83 return
84 yarn_connected = ResourceManagerMaster().connected_units()
85 hookenv.status_set('maintenance', 'Setting up DataNode%s' % (
86 ' and NodeManager' if yarn_connected else '',
87 ))
88>>>>>>> MERGE-SOURCE
5989
6090
61def update_active_status():91def update_active_status():
62 hdfs_ready = NameNodeMaster().is_ready()92<<<<<<< TREE
63 yarn_connected = ResourceManagerMaster().connected_units()93 hdfs_ready = NameNodeMaster().is_ready()
64 yarn_ready = ResourceManagerMaster().is_ready()94 yarn_connected = ResourceManagerMaster().connected_units()
65 if hdfs_ready and (not yarn_connected or yarn_ready):95 yarn_ready = ResourceManagerMaster().is_ready()
66 unitdata.kv().set('charm.active', True)96 if hdfs_ready and (not yarn_connected or yarn_ready):
67 hookenv.status_set('active', 'Ready%s' % (97 unitdata.kv().set('charm.active', True)
68 '' if yarn_ready else ' (HDFS only)'98 hookenv.status_set('active', 'Ready%s' % (
69 ))99 '' if yarn_ready else ' (HDFS only)'
70 else:100 ))
71 clear_active_flag()101 else:
72 update_blocked_status()102 clear_active_flag()
73103 update_blocked_status()
74104
75def clear_active_flag():105
76 unitdata.kv().set('charm.active', False)106def clear_active_flag():
107 unitdata.kv().set('charm.active', False)
108=======
109 hdfs_ready = NameNodeMaster().is_ready()
110 yarn_connected = ResourceManagerMaster().connected_units()
111 yarn_ready = ResourceManagerMaster().is_ready()
112 if hdfs_ready and (not yarn_connected or yarn_ready):
113 unitdata.kv().set('charm.active', True)
114 hookenv.status_set('active', 'Ready%s' % (
115 '' if yarn_ready else ' (HDFS only)'
116 ))
117 else:
118 clear_active_flag()
119 update_blocked_status()
120
121
122def clear_active_flag():
123 unitdata.kv().set('charm.active', False)
124
125
126def conf_ganglia_metrics(purgeConf=False):
127 """
128 Send hadoop specific metrics to a ganglia server
129 """
130 config = hookenv.config()
131 ganglia_metrics = config['ganglia_metrics'] and not purgeConf
132 ganglia_metrics_changed = ganglia_metrics != unitdata.kv().get('ganglia_metrics', False)
133 unitdata.kv().set('ganglia_metrics', ganglia_metrics)
134 comment = '#' if not ganglia_metrics else ''
135 ganglia_host = 'UNSET_BY_JUJU' if not ganglia_metrics else Ganglia().host()
136 ganglia_sink_str = comment + '*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31'
137 hookenv.log("Configuring ganglia sink in /etc/hadoop/conf/hadoop-metrics2.properties", level=None)
138 render(
139 source='hadoop-metrics2.properties.j2',
140 target='/etc/hadoop/conf/hadoop-metrics2.properties',
141 context={
142 'ganglia_host': ganglia_host,
143 'ganglia_sink_str': ganglia_sink_str,
144 },
145 ),
146 if ganglia_metrics_changed:
147 #check_call(['actions/restart-hdfs'])
148 # IMPLEMENT RESTART COMPUTE SLAVE?
149 hookenv.log("please manually restart compute slave hadoop components", level=None)
150
151purge_ganglia_metrics = partial(conf_ganglia_metrics, purgeConf=True)
152>>>>>>> MERGE-SOURCE
77153
=== modified file 'hooks/common.py' (properties changed: +x to -x)
--- hooks/common.py 2015-08-18 15:18:07 +0000
+++ hooks/common.py 2015-09-15 17:34:38 +0000
@@ -125,9 +125,29 @@
125 charmframework.helpers.close_ports(125 charmframework.helpers.close_ports(
126 dist_config.exposed_ports('compute-slave-yarn')),126 dist_config.exposed_ports('compute-slave-yarn')),
127 yarn.stop_nodemanager,127 yarn.stop_nodemanager,
128 callbacks.update_active_status, # might still be active if HDFS-only128<<<<<<< TREE
129 ],129 callbacks.update_active_status, # might still be active if HDFS-only
130 },130 ],
131 },
132=======
133 callbacks.update_active_status, # might still be active if HDFS-only
134 ],
135 },
136 {
137 'name': 'ganglia',
138 'requires': [
139 hadoop.is_installed,
140 jujubigdata.relations.Ganglia,
141 ],
142 'callbacks': [
143 callbacks.conf_ganglia_metrics,
144 ],
145 'cleanup': [
146 callbacks.purge_ganglia_metrics
147 ],
148 },
149
150>>>>>>> MERGE-SOURCE
131 ])151 ])
132 manager.manage()152 manager.manage()
133153
134154
=== added file 'hooks/ganglia-relation-broken'
--- hooks/ganglia-relation-broken 1970-01-01 00:00:00 +0000
+++ hooks/ganglia-relation-broken 2015-09-15 17:34:38 +0000
@@ -0,0 +1,26 @@
1#!/usr/bin/env python
2# Licensed under the Apache License, Version 2.0 (the "License");
3# you may not use this file except in compliance with the License.
4# You may obtain a copy of the License at
5#
6# http://www.apache.org/licenses/LICENSE-2.0
7#
8# Unless required by applicable law or agreed to in writing, software
9# distributed under the License is distributed on an "AS IS" BASIS,
10# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11# See the License for the specific language governing permissions and
12# limitations under the License.
13
14"""
15All hooks in this charm are managed by the Charm Framework.
16The framework helps manage dependencies and preconditions to ensure that
17steps are only executed when they can be successful. As such, no additional
18code should be added to this hook; instead, please integrate new functionality
19into the 'callbacks' list in hooks/common.py. New callbacks can be placed
20in hooks/callbacks.py, if necessary.
21
22See http://big-data-charm-helpers.readthedocs.org/en/latest/examples/framework.html
23for more information.
24"""
25import common
26common.manage()
027
=== added file 'hooks/ganglia-relation-changed'
--- hooks/ganglia-relation-changed 1970-01-01 00:00:00 +0000
+++ hooks/ganglia-relation-changed 2015-09-15 17:34:38 +0000
@@ -0,0 +1,26 @@
1#!/usr/bin/env python
2# Licensed under the Apache License, Version 2.0 (the "License");
3# you may not use this file except in compliance with the License.
4# You may obtain a copy of the License at
5#
6# http://www.apache.org/licenses/LICENSE-2.0
7#
8# Unless required by applicable law or agreed to in writing, software
9# distributed under the License is distributed on an "AS IS" BASIS,
10# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11# See the License for the specific language governing permissions and
12# limitations under the License.
13
14"""
15All hooks in this charm are managed by the Charm Framework.
16The framework helps manage dependencies and preconditions to ensure that
17steps are only executed when they can be successful. As such, no additional
18code should be added to this hook; instead, please integrate new functionality
19into the 'callbacks' list in hooks/common.py. New callbacks can be placed
20in hooks/callbacks.py, if necessary.
21
22See http://big-data-charm-helpers.readthedocs.org/en/latest/examples/framework.html
23for more information.
24"""
25import common
26common.manage()
027
=== modified file 'metadata.yaml'
--- metadata.yaml 2015-03-06 22:28:48 +0000
+++ metadata.yaml 2015-09-15 17:34:38 +0000
@@ -12,3 +12,5 @@
12 interface: dfs-slave12 interface: dfs-slave
13 nodemanager:13 nodemanager:
14 interface: mapred-slave14 interface: mapred-slave
15 ganglia:
16 interface: monitor
1517
=== added file 'resources/python/jujuresources-0.2.11.tar.gz'
16Binary files resources/python/jujuresources-0.2.11.tar.gz 1970-01-01 00:00:00 +0000 and resources/python/jujuresources-0.2.11.tar.gz 2015-09-15 17:34:38 +0000 differ18Binary files resources/python/jujuresources-0.2.11.tar.gz 1970-01-01 00:00:00 +0000 and resources/python/jujuresources-0.2.11.tar.gz 2015-09-15 17:34:38 +0000 differ
=== added directory 'templates'
=== added file 'templates/hadoop-metrics2.properties.j2'
--- templates/hadoop-metrics2.properties.j2 1970-01-01 00:00:00 +0000
+++ templates/hadoop-metrics2.properties.j2 2015-09-15 17:34:38 +0000
@@ -0,0 +1,69 @@
1#
2# Licensed to the Apache Software Foundation (ASF) under one or more
3# contributor license agreements. See the NOTICE file distributed with
4# this work for additional information regarding copyright ownership.
5# The ASF licenses this file to You under the Apache License, Version 2.0
6# (the "License"); you may not use this file except in compliance with
7# the License. You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17
18# syntax: [prefix].[source|sink].[instance].[options]
19# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
20
21*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
22# default sampling period, in seconds
23*.period=10
24
25# Defining sink for Ganglia 3.1
26{{ ganglia_sink_str }}
27
28# Default polling period for GangliaSink
29*.sink.ganglia.period=10
30
31# default for supportsparse is false
32*.sink.ganglia.supportsparse=true
33
34# Directing output to ganglia servers
35
36*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
37*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
38
39namenode.sink.ganglia.servers={{ ganglia_host }}:8649
40datanode.sink.ganglia.servers={{ ganglia_host }}:8649
41jobtracker.sink.ganglia.servers={{ ganglia_host }}:8649
42tasktracker.sink.ganglia.servers={{ ganglia_host }}:8649
43maptask.sink.ganglia.servers={{ ganglia_host }}:8649
44reducetask.sink.ganglia.servers={{ ganglia_host }}:8649
45resourcemanager.sink.ganglia.servers={{ ganglia_host }}:8649
46nodemanager.sink.ganglia.servers={{ ganglia_host }}:8649
47historyserver.sink.ganglia.servers={{ ganglia_host }}:8649
48journalnode.sink.ganglia.servers={{ ganglia_host }}:8649
49resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
50
51# The namen de-metrics. ut will contain metrics from all context
52#namenode.sink.file.filename=namenode-metrics.out
53# Specifying a special sampling period for namenode:
54#namenode.sink.*.period=8
55
56#datanode.sink.file.filename=datanode-metrics.out
57
58# the following example split metrics of different
59# context to different sinks (in this case files)
60#jobtracker.sink.file_jvm.context=jvm
61#jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out
62#jobtracker.sink.file_mapred.context=mapred
63#jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out
64
65#tasktracker.sink.file.filename=tasktracker-metrics.out
66
67#maptask.sink.file.filename=maptask-metrics.out
68
69#reducetask.sink.file.filename=reducetask-metrics.out
070
=== modified file 'tests/01-basic-deployment.py'
--- tests/01-basic-deployment.py 2015-03-04 00:56:45 +0000
+++ tests/01-basic-deployment.py 2015-09-15 17:34:38 +0000
@@ -16,8 +16,8 @@
16 def setUpClass(cls):16 def setUpClass(cls):
17 cls.d = amulet.Deployment(series='trusty')17 cls.d = amulet.Deployment(series='trusty')
18 cls.d.add('apache-hadoop-compute-slave')18 cls.d.add('apache-hadoop-compute-slave')
19 cls.d.setup(timeout=9000)19 cls.d.setup(timeout=900)
20 cls.d.sentry.wait()20 cls.d.sentry.wait(timeout=1800)
21 cls.unit = cls.d.sentry.unit['apache-hadoop-compute-slave/0']21 cls.unit = cls.d.sentry.unit['apache-hadoop-compute-slave/0']
2222
23 def test_deploy(self):23 def test_deploy(self):

Subscribers

People subscribed via source and target branches