Merge lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/datanode-blocking into lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk

Proposed by Cory Johns
Status: Merged
Merged at revision: 97
Proposed branch: lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/datanode-blocking
Merge into: lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk
Diff against target: 187 lines (+91/-22)
4 files modified
hooks/callbacks.py (+28/-12)
hooks/common.py (+31/-10)
hooks/datanode-relation-departed (+16/-0)
hooks/nodemanager-relation-departed (+16/-0)
To merge this branch: bzr merge lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/datanode-blocking
Reviewer Review Type Date Requested Status
Kevin W Monroe Approve
Review via email: mp+267597@code.launchpad.net

Description of the change

Fixed datanode component being blocked by nodemanager component, and thus blocking hdfs-master on yarn-master
Fixed status reporting not being accurate when relations were removed

To post a comment you must log in.
Revision history for this message
Kevin W Monroe (kwmonroe) wrote :

My head hurts with your status wizardry. I'm pretty sure it LGTM :)

review: Approve
98. By Cory Johns

Improved status reporting for HDFS-only configuration

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/callbacks.py'
2--- hooks/callbacks.py 2015-06-24 22:12:57 +0000
3+++ hooks/callbacks.py 2015-08-13 21:20:20 +0000
4@@ -24,37 +24,53 @@
5 def update_blocked_status():
6 if unitdata.kv().get('charm.active', False):
7 return
8- rels = (
9- ('Yarn', 'ResourceManager', ResourceManagerMaster()),
10+ rels = [
11 ('HDFS', 'NameNode', NameNodeMaster()),
12- )
13+ ]
14 missing_rel = [rel for rel, res, impl in rels if not impl.connected_units()]
15- missing_hosts = [rel for rel, res, impl in rels if not impl.am_i_registered()]
16- not_ready = [(rel, res) for rel, res, impl in rels if not impl.is_ready()]
17+ rels.append(('Yarn', 'ResourceManager', ResourceManagerMaster()))
18+ not_ready = [(rel, res) for rel, res, impl in rels if impl.connected_units() and not impl.is_ready()]
19+ missing_hosts = [rel for rel, res, impl in rels if impl.connected_units() and not impl.am_i_registered()]
20 if missing_rel:
21 hookenv.status_set('blocked', 'Waiting for relation to %s master%s' % (
22 ' and '.join(missing_rel),
23 's' if len(missing_rel) > 1 else '',
24 )),
25- elif missing_hosts:
26- hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % (
27- ' and '.join(missing_hosts),
28- ))
29 elif not_ready:
30 unready_rels, unready_ress = zip(*not_ready)
31 hookenv.status_set('waiting', 'Waiting for %s to provide %s' % (
32 ' and '.join(unready_rels),
33 ' and '.join(unready_ress),
34 ))
35+ elif missing_hosts:
36+ hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % (
37+ ' and '.join(missing_hosts),
38+ ))
39
40
41 def update_working_status():
42 if unitdata.kv().get('charm.active', False):
43 hookenv.status_set('maintenance', 'Updating configuration')
44 return
45- hookenv.status_set('maintenance', 'Setting up NodeManager and DataNode')
46+ yarn_connected = ResourceManagerMaster().connected_units()
47+ hookenv.status_set('maintenance', 'Setting up DataNode%s' % (
48+ ' and NodeManager' if yarn_connected else '',
49+ ))
50
51
52 def update_active_status():
53- unitdata.kv().set('charm.active', True)
54- hookenv.status_set('active', 'Ready')
55+ hdfs_ready = NameNodeMaster().is_ready()
56+ yarn_connected = ResourceManagerMaster().connected_units()
57+ yarn_ready = ResourceManagerMaster().is_ready()
58+ if hdfs_ready and (not yarn_connected or yarn_ready):
59+ unitdata.kv().set('charm.active', True)
60+ hookenv.status_set('active', 'Ready%s' % (
61+ '' if yarn_ready else ' (HDFS only)'
62+ ))
63+ else:
64+ clear_active_flag()
65+ update_blocked_status()
66+
67+
68+def clear_active_flag():
69+ unitdata.kv().set('charm.active', False)
70
71=== modified file 'hooks/common.py'
72--- hooks/common.py 2015-06-24 22:12:57 +0000
73+++ hooks/common.py 2015-08-13 21:20:20 +0000
74@@ -71,40 +71,61 @@
75 ],
76 },
77 {
78- 'name': 'compute-slave',
79+ 'name': 'datanode',
80 'provides': [
81 jujubigdata.relations.DataNode(),
82+ ],
83+ 'requires': [
84+ hadoop.is_installed,
85+ hdfs_relation,
86+ hdfs_relation.am_i_registered,
87+ ],
88+ 'callbacks': [
89+ callbacks.update_working_status,
90+ hdfs_relation.register_provided_hosts,
91+ jujubigdata.utils.manage_etc_hosts,
92+ hdfs_relation.install_ssh_keys,
93+ hdfs.configure_datanode,
94+ hdfs.start_datanode,
95+ charmframework.helpers.open_ports(
96+ dist_config.exposed_ports('compute-slave-hdfs')),
97+ callbacks.update_active_status,
98+ ],
99+ 'cleanup': [
100+ callbacks.clear_active_flag,
101+ charmframework.helpers.close_ports(
102+ dist_config.exposed_ports('compute-slave-hdfs')),
103+ hdfs.stop_datanode,
104+ callbacks.update_blocked_status,
105+ ],
106+ },
107+ {
108+ 'name': 'nodemanager',
109+ 'provides': [
110 jujubigdata.relations.NodeManager(),
111 ],
112 'requires': [
113 hadoop.is_installed,
114- hdfs_relation,
115 yarn_relation,
116- hdfs_relation.am_i_registered,
117 yarn_relation.am_i_registered,
118 ],
119 'callbacks': [
120 callbacks.update_working_status,
121- hdfs_relation.register_provided_hosts,
122 yarn_relation.register_provided_hosts,
123 jujubigdata.utils.manage_etc_hosts,
124- hdfs_relation.install_ssh_keys,
125 yarn_relation.install_ssh_keys,
126- hdfs.configure_datanode,
127 yarn.configure_nodemanager,
128- hdfs.start_datanode,
129 yarn.start_nodemanager,
130 charmframework.helpers.open_ports(
131- dist_config.exposed_ports('compute-slave-hdfs') +
132 dist_config.exposed_ports('compute-slave-yarn')),
133 callbacks.update_active_status,
134 ],
135 'cleanup': [
136+ callbacks.clear_active_flag,
137 charmframework.helpers.close_ports(
138- dist_config.exposed_ports('compute-slave-hdfs') +
139 dist_config.exposed_ports('compute-slave-yarn')),
140- hdfs.stop_datanode,
141 yarn.stop_nodemanager,
142+ callbacks.update_active_status, # might still be active w/ for HDFS-only
143 ],
144 },
145 ])
146
147=== added file 'hooks/datanode-relation-departed'
148--- hooks/datanode-relation-departed 1970-01-01 00:00:00 +0000
149+++ hooks/datanode-relation-departed 2015-08-13 21:20:20 +0000
150@@ -0,0 +1,16 @@
151+#!/usr/bin/env python
152+# Licensed under the Apache License, Version 2.0 (the "License");
153+# you may not use this file except in compliance with the License.
154+# You may obtain a copy of the License at
155+#
156+# http://www.apache.org/licenses/LICENSE-2.0
157+#
158+# Unless required by applicable law or agreed to in writing, software
159+# distributed under the License is distributed on an "AS IS" BASIS,
160+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
161+# See the License for the specific language governing permissions and
162+# limitations under the License.
163+
164+import common
165+
166+common.manage()
167
168=== added file 'hooks/nodemanager-relation-departed'
169--- hooks/nodemanager-relation-departed 1970-01-01 00:00:00 +0000
170+++ hooks/nodemanager-relation-departed 2015-08-13 21:20:20 +0000
171@@ -0,0 +1,16 @@
172+#!/usr/bin/env python
173+# Licensed under the Apache License, Version 2.0 (the "License");
174+# you may not use this file except in compliance with the License.
175+# You may obtain a copy of the License at
176+#
177+# http://www.apache.org/licenses/LICENSE-2.0
178+#
179+# Unless required by applicable law or agreed to in writing, software
180+# distributed under the License is distributed on an "AS IS" BASIS,
181+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
182+# See the License for the specific language governing permissions and
183+# limitations under the License.
184+
185+import common
186+
187+common.manage()

Subscribers

People subscribed via source and target branches

to all changes: