Merge lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk into lp:charms/trusty/apache-hadoop-compute-slave
- Trusty Tahr (14.04)
- trunk
- Merge into trunk
Proposed by
Kevin W Monroe
Status: | Merged |
---|---|
Merged at revision: | 88 |
Proposed branch: | lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk |
Merge into: | lp:charms/trusty/apache-hadoop-compute-slave |
Diff against target: |
412 lines (+289/-35) (has conflicts) 9 files modified
README.md (+25/-0) config.yaml (+10/-0) hooks/callbacks.py (+106/-30) hooks/common.py (+23/-3) hooks/ganglia-relation-broken (+26/-0) hooks/ganglia-relation-changed (+26/-0) metadata.yaml (+2/-0) templates/hadoop-metrics2.properties.j2 (+69/-0) tests/01-basic-deployment.py (+2/-2) Text conflict in hooks/callbacks.py Text conflict in hooks/common.py |
To merge this branch: | bzr merge lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Kevin W Monroe | Approve | ||
Review via email: mp+271162@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Revision history for this message
Kevin W Monroe (kwmonroe) : | # |
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'README.md' | |||
2 | --- README.md 2015-08-24 23:09:31 +0000 | |||
3 | +++ README.md 2015-09-15 17:34:38 +0000 | |||
4 | @@ -37,6 +37,26 @@ | |||
5 | 37 | juju add-unit compute-slave -n 3 | 37 | juju add-unit compute-slave -n 3 |
6 | 38 | 38 | ||
7 | 39 | 39 | ||
8 | 40 | ## Monitoring | ||
9 | 41 | |||
10 | 42 | This charm supports monitoring via Ganglia. To enable monitoring, you must | ||
11 | 43 | do **both** of the following (the order does not matter): | ||
12 | 44 | |||
13 | 45 | * Add a relation to the [Ganglia charm][] via the `:master` relation | ||
14 | 46 | * Enable the `ganglia_metrics` config option | ||
15 | 47 | |||
16 | 48 | You must **also** enable metrics on [yarn-master][] and / or [hdfs-master][] | ||
17 | 49 | to initiate the restart of the NodeManager and / or DataNode components for | ||
18 | 50 | them to begin collecting metrics. | ||
19 | 51 | |||
20 | 52 | For example: | ||
21 | 53 | |||
22 | 54 | juju add-relation compute-slave ganglia:master | ||
23 | 55 | juju add-relation yarn-master ganglia:master | ||
24 | 56 | juju set compute-slave ganglia_metrics=true | ||
25 | 57 | juju set yarn-master ganglia_metrics=true | ||
26 | 58 | |||
27 | 59 | |||
28 | 40 | ## Deploying in Network-Restricted Environments | 60 | ## Deploying in Network-Restricted Environments |
29 | 41 | 61 | ||
30 | 42 | The Apache Hadoop charms can be deployed in environments with limited network | 62 | The Apache Hadoop charms can be deployed in environments with limited network |
31 | @@ -85,3 +105,8 @@ | |||
32 | 85 | - [Apache Hadoop bug trackers](http://hadoop.apache.org/issue_tracking.html) | 105 | - [Apache Hadoop bug trackers](http://hadoop.apache.org/issue_tracking.html) |
33 | 86 | - [Apache Hadoop mailing lists](http://hadoop.apache.org/mailing_lists.html) | 106 | - [Apache Hadoop mailing lists](http://hadoop.apache.org/mailing_lists.html) |
34 | 87 | - [Apache Hadoop Juju Charm](http://jujucharms.com/?text=hadoop) | 107 | - [Apache Hadoop Juju Charm](http://jujucharms.com/?text=hadoop) |
35 | 108 | |||
36 | 109 | |||
37 | 110 | [Ganglia charm]: http://jujucharms.com/ganglia/ | ||
38 | 111 | [yarn-master]: http://jujucharms.com/apache-hadoop-yarn-master/ | ||
39 | 112 | [hdfs-master]: http://jujucharms.com/apache-hadoop-hdfs-master/ | ||
40 | 88 | 113 | ||
41 | === modified file 'config.yaml' | |||
42 | --- config.yaml 2015-04-03 16:49:16 +0000 | |||
43 | +++ config.yaml 2015-09-15 17:34:38 +0000 | |||
44 | @@ -4,3 +4,13 @@ | |||
45 | 4 | default: '' | 4 | default: '' |
46 | 5 | description: | | 5 | description: | |
47 | 6 | URL from which to fetch resources (e.g., Hadoop binaries) instead of Launchpad. | 6 | URL from which to fetch resources (e.g., Hadoop binaries) instead of Launchpad. |
48 | 7 | ganglia_metrics: | ||
49 | 8 | type: boolean | ||
50 | 9 | default: false | ||
51 | 10 | description: | | ||
52 | 11 | Enable metrics using Ganglia. Note that enabling this option will | ||
53 | 12 | have no effect if the service is not related to a ganglia service | ||
54 | 13 | via the ganglia:master relation. Enabling this option also will | ||
55 | 14 | *not* restart the DataNode nor NodeManager components, so it will | ||
56 | 15 | also be necessary to enable metrics on one or more of the hdfs-master | ||
57 | 16 | or yarn-master services. See the README for more information. | ||
58 | 7 | 17 | ||
59 | === modified file 'hooks/callbacks.py' | |||
60 | --- hooks/callbacks.py 2015-08-13 21:19:50 +0000 | |||
61 | +++ hooks/callbacks.py 2015-09-15 17:34:38 +0000 | |||
62 | @@ -18,7 +18,10 @@ | |||
63 | 18 | 18 | ||
64 | 19 | from charmhelpers.core import hookenv | 19 | from charmhelpers.core import hookenv |
65 | 20 | from charmhelpers.core import unitdata | 20 | from charmhelpers.core import unitdata |
67 | 21 | from jujubigdata.relations import NameNodeMaster, ResourceManagerMaster | 21 | from jujubigdata.relations import NameNodeMaster, ResourceManagerMaster, Ganglia |
68 | 22 | from charmhelpers.core.templating import render | ||
69 | 23 | from functools import partial | ||
70 | 24 | from subprocess import check_call | ||
71 | 22 | 25 | ||
72 | 23 | 26 | ||
73 | 24 | def update_blocked_status(): | 27 | def update_blocked_status(): |
74 | @@ -42,35 +45,108 @@ | |||
75 | 42 | ' and '.join(unready_rels), | 45 | ' and '.join(unready_rels), |
76 | 43 | ' and '.join(unready_ress), | 46 | ' and '.join(unready_ress), |
77 | 44 | )) | 47 | )) |
92 | 45 | elif missing_hosts: | 48 | <<<<<<< TREE |
93 | 46 | hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % ( | 49 | elif missing_hosts: |
94 | 47 | ' and '.join(missing_hosts), | 50 | hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % ( |
95 | 48 | )) | 51 | ' and '.join(missing_hosts), |
96 | 49 | 52 | )) | |
97 | 50 | 53 | ||
98 | 51 | def update_working_status(): | 54 | |
99 | 52 | if unitdata.kv().get('charm.active', False): | 55 | def update_working_status(): |
100 | 53 | hookenv.status_set('maintenance', 'Updating configuration') | 56 | if unitdata.kv().get('charm.active', False): |
101 | 54 | return | 57 | hookenv.status_set('maintenance', 'Updating configuration') |
102 | 55 | yarn_connected = ResourceManagerMaster().connected_units() | 58 | return |
103 | 56 | hookenv.status_set('maintenance', 'Setting up DataNode%s' % ( | 59 | yarn_connected = ResourceManagerMaster().connected_units() |
104 | 57 | ' and NodeManager' if yarn_connected else '', | 60 | hookenv.status_set('maintenance', 'Setting up DataNode%s' % ( |
105 | 58 | )) | 61 | ' and NodeManager' if yarn_connected else '', |
106 | 62 | )) | ||
107 | 63 | ======= | ||
108 | 64 | elif missing_hosts: | ||
109 | 65 | hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % ( | ||
110 | 66 | ' and '.join(missing_hosts), | ||
111 | 67 | )) | ||
112 | 68 | |||
113 | 69 | |||
114 | 70 | def update_working_status(): | ||
115 | 71 | if unitdata.kv().get('charm.active', False): | ||
116 | 72 | hookenv.status_set('maintenance', 'Updating configuration') | ||
117 | 73 | return | ||
118 | 74 | yarn_connected = ResourceManagerMaster().connected_units() | ||
119 | 75 | hookenv.status_set('maintenance', 'Setting up DataNode%s' % ( | ||
120 | 76 | ' and NodeManager' if yarn_connected else '', | ||
121 | 77 | )) | ||
122 | 78 | |||
123 | 79 | |||
124 | 80 | def update_working_status(): | ||
125 | 81 | if unitdata.kv().get('charm.active', False): | ||
126 | 82 | hookenv.status_set('maintenance', 'Updating configuration') | ||
127 | 83 | return | ||
128 | 84 | yarn_connected = ResourceManagerMaster().connected_units() | ||
129 | 85 | hookenv.status_set('maintenance', 'Setting up DataNode%s' % ( | ||
130 | 86 | ' and NodeManager' if yarn_connected else '', | ||
131 | 87 | )) | ||
132 | 88 | >>>>>>> MERGE-SOURCE | ||
133 | 59 | 89 | ||
134 | 60 | 90 | ||
135 | 61 | def update_active_status(): | 91 | def update_active_status(): |
151 | 62 | hdfs_ready = NameNodeMaster().is_ready() | 92 | <<<<<<< TREE |
152 | 63 | yarn_connected = ResourceManagerMaster().connected_units() | 93 | hdfs_ready = NameNodeMaster().is_ready() |
153 | 64 | yarn_ready = ResourceManagerMaster().is_ready() | 94 | yarn_connected = ResourceManagerMaster().connected_units() |
154 | 65 | if hdfs_ready and (not yarn_connected or yarn_ready): | 95 | yarn_ready = ResourceManagerMaster().is_ready() |
155 | 66 | unitdata.kv().set('charm.active', True) | 96 | if hdfs_ready and (not yarn_connected or yarn_ready): |
156 | 67 | hookenv.status_set('active', 'Ready%s' % ( | 97 | unitdata.kv().set('charm.active', True) |
157 | 68 | '' if yarn_ready else ' (HDFS only)' | 98 | hookenv.status_set('active', 'Ready%s' % ( |
158 | 69 | )) | 99 | '' if yarn_ready else ' (HDFS only)' |
159 | 70 | else: | 100 | )) |
160 | 71 | clear_active_flag() | 101 | else: |
161 | 72 | update_blocked_status() | 102 | clear_active_flag() |
162 | 73 | 103 | update_blocked_status() | |
163 | 74 | 104 | ||
164 | 75 | def clear_active_flag(): | 105 | |
165 | 76 | unitdata.kv().set('charm.active', False) | 106 | def clear_active_flag(): |
166 | 107 | unitdata.kv().set('charm.active', False) | ||
167 | 108 | ======= | ||
168 | 109 | hdfs_ready = NameNodeMaster().is_ready() | ||
169 | 110 | yarn_connected = ResourceManagerMaster().connected_units() | ||
170 | 111 | yarn_ready = ResourceManagerMaster().is_ready() | ||
171 | 112 | if hdfs_ready and (not yarn_connected or yarn_ready): | ||
172 | 113 | unitdata.kv().set('charm.active', True) | ||
173 | 114 | hookenv.status_set('active', 'Ready%s' % ( | ||
174 | 115 | '' if yarn_ready else ' (HDFS only)' | ||
175 | 116 | )) | ||
176 | 117 | else: | ||
177 | 118 | clear_active_flag() | ||
178 | 119 | update_blocked_status() | ||
179 | 120 | |||
180 | 121 | |||
181 | 122 | def clear_active_flag(): | ||
182 | 123 | unitdata.kv().set('charm.active', False) | ||
183 | 124 | |||
184 | 125 | |||
185 | 126 | def conf_ganglia_metrics(purgeConf=False): | ||
186 | 127 | """ | ||
187 | 128 | Send hadoop specific metrics to a ganglia server | ||
188 | 129 | """ | ||
189 | 130 | config = hookenv.config() | ||
190 | 131 | ganglia_metrics = config['ganglia_metrics'] and not purgeConf | ||
191 | 132 | ganglia_metrics_changed = ganglia_metrics != unitdata.kv().get('ganglia_metrics', False) | ||
192 | 133 | unitdata.kv().set('ganglia_metrics', ganglia_metrics) | ||
193 | 134 | comment = '#' if not ganglia_metrics else '' | ||
194 | 135 | ganglia_host = 'UNSET_BY_JUJU' if not ganglia_metrics else Ganglia().host() | ||
195 | 136 | ganglia_sink_str = comment + '*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31' | ||
196 | 137 | hookenv.log("Configuring ganglia sink in /etc/hadoop/conf/hadoop-metrics2.properties", level=None) | ||
197 | 138 | render( | ||
198 | 139 | source='hadoop-metrics2.properties.j2', | ||
199 | 140 | target='/etc/hadoop/conf/hadoop-metrics2.properties', | ||
200 | 141 | context={ | ||
201 | 142 | 'ganglia_host': ganglia_host, | ||
202 | 143 | 'ganglia_sink_str': ganglia_sink_str, | ||
203 | 144 | }, | ||
204 | 145 | ), | ||
205 | 146 | if ganglia_metrics_changed: | ||
206 | 147 | #check_call(['actions/restart-hdfs']) | ||
207 | 148 | # IMPLEMENT RESTART COMPUTE SLAVE? | ||
208 | 149 | hookenv.log("please manually restart compute slave hadoop components", level=None) | ||
209 | 150 | |||
210 | 151 | purge_ganglia_metrics = partial(conf_ganglia_metrics, purgeConf=True) | ||
211 | 152 | >>>>>>> MERGE-SOURCE | ||
212 | 77 | 153 | ||
213 | === modified file 'hooks/common.py' (properties changed: +x to -x) | |||
214 | --- hooks/common.py 2015-08-18 15:18:07 +0000 | |||
215 | +++ hooks/common.py 2015-09-15 17:34:38 +0000 | |||
216 | @@ -125,9 +125,29 @@ | |||
217 | 125 | charmframework.helpers.close_ports( | 125 | charmframework.helpers.close_ports( |
218 | 126 | dist_config.exposed_ports('compute-slave-yarn')), | 126 | dist_config.exposed_ports('compute-slave-yarn')), |
219 | 127 | yarn.stop_nodemanager, | 127 | yarn.stop_nodemanager, |
223 | 128 | callbacks.update_active_status, # might still be active if HDFS-only | 128 | <<<<<<< TREE |
224 | 129 | ], | 129 | callbacks.update_active_status, # might still be active if HDFS-only |
225 | 130 | }, | 130 | ], |
226 | 131 | }, | ||
227 | 132 | ======= | ||
228 | 133 | callbacks.update_active_status, # might still be active if HDFS-only | ||
229 | 134 | ], | ||
230 | 135 | }, | ||
231 | 136 | { | ||
232 | 137 | 'name': 'ganglia', | ||
233 | 138 | 'requires': [ | ||
234 | 139 | hadoop.is_installed, | ||
235 | 140 | jujubigdata.relations.Ganglia, | ||
236 | 141 | ], | ||
237 | 142 | 'callbacks': [ | ||
238 | 143 | callbacks.conf_ganglia_metrics, | ||
239 | 144 | ], | ||
240 | 145 | 'cleanup': [ | ||
241 | 146 | callbacks.purge_ganglia_metrics | ||
242 | 147 | ], | ||
243 | 148 | }, | ||
244 | 149 | |||
245 | 150 | >>>>>>> MERGE-SOURCE | ||
246 | 131 | ]) | 151 | ]) |
247 | 132 | manager.manage() | 152 | manager.manage() |
248 | 133 | 153 | ||
249 | 134 | 154 | ||
250 | === added file 'hooks/ganglia-relation-broken' | |||
251 | --- hooks/ganglia-relation-broken 1970-01-01 00:00:00 +0000 | |||
252 | +++ hooks/ganglia-relation-broken 2015-09-15 17:34:38 +0000 | |||
253 | @@ -0,0 +1,26 @@ | |||
254 | 1 | #!/usr/bin/env python | ||
255 | 2 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
256 | 3 | # you may not use this file except in compliance with the License. | ||
257 | 4 | # You may obtain a copy of the License at | ||
258 | 5 | # | ||
259 | 6 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
260 | 7 | # | ||
261 | 8 | # Unless required by applicable law or agreed to in writing, software | ||
262 | 9 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
263 | 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
264 | 11 | # See the License for the specific language governing permissions and | ||
265 | 12 | # limitations under the License. | ||
266 | 13 | |||
267 | 14 | """ | ||
268 | 15 | All hooks in this charm are managed by the Charm Framework. | ||
269 | 16 | The framework helps manage dependencies and preconditions to ensure that | ||
270 | 17 | steps are only executed when they can be successful. As such, no additional | ||
271 | 18 | code should be added to this hook; instead, please integrate new functionality | ||
272 | 19 | into the 'callbacks' list in hooks/common.py. New callbacks can be placed | ||
273 | 20 | in hooks/callbacks.py, if necessary. | ||
274 | 21 | |||
275 | 22 | See http://big-data-charm-helpers.readthedocs.org/en/latest/examples/framework.html | ||
276 | 23 | for more information. | ||
277 | 24 | """ | ||
278 | 25 | import common | ||
279 | 26 | common.manage() | ||
280 | 0 | 27 | ||
281 | === added file 'hooks/ganglia-relation-changed' | |||
282 | --- hooks/ganglia-relation-changed 1970-01-01 00:00:00 +0000 | |||
283 | +++ hooks/ganglia-relation-changed 2015-09-15 17:34:38 +0000 | |||
284 | @@ -0,0 +1,26 @@ | |||
285 | 1 | #!/usr/bin/env python | ||
286 | 2 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||
287 | 3 | # you may not use this file except in compliance with the License. | ||
288 | 4 | # You may obtain a copy of the License at | ||
289 | 5 | # | ||
290 | 6 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
291 | 7 | # | ||
292 | 8 | # Unless required by applicable law or agreed to in writing, software | ||
293 | 9 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
294 | 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
295 | 11 | # See the License for the specific language governing permissions and | ||
296 | 12 | # limitations under the License. | ||
297 | 13 | |||
298 | 14 | """ | ||
299 | 15 | All hooks in this charm are managed by the Charm Framework. | ||
300 | 16 | The framework helps manage dependencies and preconditions to ensure that | ||
301 | 17 | steps are only executed when they can be successful. As such, no additional | ||
302 | 18 | code should be added to this hook; instead, please integrate new functionality | ||
303 | 19 | into the 'callbacks' list in hooks/common.py. New callbacks can be placed | ||
304 | 20 | in hooks/callbacks.py, if necessary. | ||
305 | 21 | |||
306 | 22 | See http://big-data-charm-helpers.readthedocs.org/en/latest/examples/framework.html | ||
307 | 23 | for more information. | ||
308 | 24 | """ | ||
309 | 25 | import common | ||
310 | 26 | common.manage() | ||
311 | 0 | 27 | ||
312 | === modified file 'metadata.yaml' | |||
313 | --- metadata.yaml 2015-03-06 22:28:48 +0000 | |||
314 | +++ metadata.yaml 2015-09-15 17:34:38 +0000 | |||
315 | @@ -12,3 +12,5 @@ | |||
316 | 12 | interface: dfs-slave | 12 | interface: dfs-slave |
317 | 13 | nodemanager: | 13 | nodemanager: |
318 | 14 | interface: mapred-slave | 14 | interface: mapred-slave |
319 | 15 | ganglia: | ||
320 | 16 | interface: monitor | ||
321 | 15 | 17 | ||
322 | === added file 'resources/python/jujuresources-0.2.11.tar.gz' | |||
323 | 16 | Binary files resources/python/jujuresources-0.2.11.tar.gz 1970-01-01 00:00:00 +0000 and resources/python/jujuresources-0.2.11.tar.gz 2015-09-15 17:34:38 +0000 differ | 18 | Binary files resources/python/jujuresources-0.2.11.tar.gz 1970-01-01 00:00:00 +0000 and resources/python/jujuresources-0.2.11.tar.gz 2015-09-15 17:34:38 +0000 differ |
324 | === added directory 'templates' | |||
325 | === added file 'templates/hadoop-metrics2.properties.j2' | |||
326 | --- templates/hadoop-metrics2.properties.j2 1970-01-01 00:00:00 +0000 | |||
327 | +++ templates/hadoop-metrics2.properties.j2 2015-09-15 17:34:38 +0000 | |||
328 | @@ -0,0 +1,69 @@ | |||
329 | 1 | # | ||
330 | 2 | # Licensed to the Apache Software Foundation (ASF) under one or more | ||
331 | 3 | # contributor license agreements. See the NOTICE file distributed with | ||
332 | 4 | # this work for additional information regarding copyright ownership. | ||
333 | 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 | ||
334 | 6 | # (the "License"); you may not use this file except in compliance with | ||
335 | 7 | # the License. You may obtain a copy of the License at | ||
336 | 8 | # | ||
337 | 9 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
338 | 10 | # | ||
339 | 11 | # Unless required by applicable law or agreed to in writing, software | ||
340 | 12 | # distributed under the License is distributed on an "AS IS" BASIS, | ||
341 | 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
342 | 14 | # See the License for the specific language governing permissions and | ||
343 | 15 | # limitations under the License. | ||
344 | 16 | # | ||
345 | 17 | |||
346 | 18 | # syntax: [prefix].[source|sink].[instance].[options] | ||
347 | 19 | # See javadoc of package-info.java for org.apache.hadoop.metrics2 for details | ||
348 | 20 | |||
349 | 21 | *.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink | ||
350 | 22 | # default sampling period, in seconds | ||
351 | 23 | *.period=10 | ||
352 | 24 | |||
353 | 25 | # Defining sink for Ganglia 3.1 | ||
354 | 26 | {{ ganglia_sink_str }} | ||
355 | 27 | |||
356 | 28 | # Default polling period for GangliaSink | ||
357 | 29 | *.sink.ganglia.period=10 | ||
358 | 30 | |||
359 | 31 | # default for supportsparse is false | ||
360 | 32 | *.sink.ganglia.supportsparse=true | ||
361 | 33 | |||
362 | 34 | # Directing output to ganglia servers | ||
363 | 35 | |||
364 | 36 | *.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both | ||
365 | 37 | *.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 | ||
366 | 38 | |||
367 | 39 | namenode.sink.ganglia.servers={{ ganglia_host }}:8649 | ||
368 | 40 | datanode.sink.ganglia.servers={{ ganglia_host }}:8649 | ||
369 | 41 | jobtracker.sink.ganglia.servers={{ ganglia_host }}:8649 | ||
370 | 42 | tasktracker.sink.ganglia.servers={{ ganglia_host }}:8649 | ||
371 | 43 | maptask.sink.ganglia.servers={{ ganglia_host }}:8649 | ||
372 | 44 | reducetask.sink.ganglia.servers={{ ganglia_host }}:8649 | ||
373 | 45 | resourcemanager.sink.ganglia.servers={{ ganglia_host }}:8649 | ||
374 | 46 | nodemanager.sink.ganglia.servers={{ ganglia_host }}:8649 | ||
375 | 47 | historyserver.sink.ganglia.servers={{ ganglia_host }}:8649 | ||
376 | 48 | journalnode.sink.ganglia.servers={{ ganglia_host }}:8649 | ||
377 | 49 | resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue | ||
378 | 50 | |||
379 | 51 | # The namen de-metrics. ut will contain metrics from all context | ||
380 | 52 | #namenode.sink.file.filename=namenode-metrics.out | ||
381 | 53 | # Specifying a special sampling period for namenode: | ||
382 | 54 | #namenode.sink.*.period=8 | ||
383 | 55 | |||
384 | 56 | #datanode.sink.file.filename=datanode-metrics.out | ||
385 | 57 | |||
386 | 58 | # the following example split metrics of different | ||
387 | 59 | # context to different sinks (in this case files) | ||
388 | 60 | #jobtracker.sink.file_jvm.context=jvm | ||
389 | 61 | #jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out | ||
390 | 62 | #jobtracker.sink.file_mapred.context=mapred | ||
391 | 63 | #jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out | ||
392 | 64 | |||
393 | 65 | #tasktracker.sink.file.filename=tasktracker-metrics.out | ||
394 | 66 | |||
395 | 67 | #maptask.sink.file.filename=maptask-metrics.out | ||
396 | 68 | |||
397 | 69 | #reducetask.sink.file.filename=reducetask-metrics.out | ||
398 | 0 | 70 | ||
399 | === modified file 'tests/01-basic-deployment.py' | |||
400 | --- tests/01-basic-deployment.py 2015-03-04 00:56:45 +0000 | |||
401 | +++ tests/01-basic-deployment.py 2015-09-15 17:34:38 +0000 | |||
402 | @@ -16,8 +16,8 @@ | |||
403 | 16 | def setUpClass(cls): | 16 | def setUpClass(cls): |
404 | 17 | cls.d = amulet.Deployment(series='trusty') | 17 | cls.d = amulet.Deployment(series='trusty') |
405 | 18 | cls.d.add('apache-hadoop-compute-slave') | 18 | cls.d.add('apache-hadoop-compute-slave') |
408 | 19 | cls.d.setup(timeout=9000) | 19 | cls.d.setup(timeout=900) |
409 | 20 | cls.d.sentry.wait() | 20 | cls.d.sentry.wait(timeout=1800) |
410 | 21 | cls.unit = cls.d.sentry.unit['apache-hadoop-compute-slave/0'] | 21 | cls.unit = cls.d.sentry.unit['apache-hadoop-compute-slave/0'] |
411 | 22 | 22 | ||
412 | 23 | def test_deploy(self): | 23 | def test_deploy(self): |