1.core-site.xml
<?xml version="1.0" encoding="UTF-8"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <!-- Put site-specific property overrides in this file. --> <configuration> <property> <name>fs.defaultFS</name> <value>hdfs://mycluster</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/usr/local/hadoop/tmpdir</value> </property> <property> <name>fs.checkpoint.period</name> <value>300</value> </property> <property> <name>fs.checkpoint.dir</name> <value>/usr/local/hadoop/tmpdir/namesecondary</value> </property> <property> <name>dfs.journalnode.edits.dir</name> <value>/usr/local/hadoop/tmpdir/journal/data</value> </property> <property> <name>ha.zookeeper.quorum</name> <value>10.67.2.21:2181,10.67.2.20:2181</value> </property> </configuration>
<?xml version="1.0" encoding="UTF-8"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <!-- Put site-specific property overrides in this file. --> <configuration> <property> <name>dfs.replication</name> <value>1</value> </property> <property> <name>dfs.httpd.address</name> <value>ubuntu21:50070</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/usr/local/hadoop/tmpdir</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>/usr/local/hadoop/tmpdir/dfs/data</value> </property> <property> <name>dfs.webhdfs.enabled</name> <value>true</value> </property> <property> <name>dfs.nameservices</name> <value>mycluster</value> </property> <property> <name>dfs.ha.namenodes.mycluster</name> <value>nn1,nn2</value> </property> <property> <name>dfs.namenode.rpc-address.mycluster.nn1</name> <value>ubuntu21:8020</value> </property> <property> <name>dfs.namenode.rpc-address.mycluster.nn2</name> <value>ubuntu20:8020</value> </property> <property> <name>dfs.namenode.http-address.mycluster.nn1</name> <value>ubuntu21:50070</value> </property> <property> <name>dfs.namenode.http-address.mycluster.nn2</name> <value>ubuntu20:50070</value> </property> <property> <name>dfs.namenode.shared.edits.dir</name> <value>qjournal://ubuntu21:8485;ubuntu20:8485/mycluster</value> </property> <property> <name>dfs.client.failover.proxy.provider.mycluster</name> <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value> </property> <property> <name>dfs.ha.fencing.methods</name> <value>shell(/bin/true)</value> </property> <property> <name>dfs.ha.automatic-failover.enabled</name> <value>true</value> </property> </configuration>
<?xml version="1.0"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <configuration> <!-- Site specific YARN configuration properties --> <property> <name>yarn.resourcemanager.hostname</name> <value>ubuntu21</value> </property> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> <property> <name>yarn.resourcemanager.ha.enabled</name> <value>true</value> </property> <property> <name>yarn.resourcemanager.cluster-id</name> <value>pijing-yarn-ha</value> </property> <property> <name>yarn.resourcemanager.ha.rm-ids</name> <value>rm1,rm2</value> </property> <property> <name>yarn.resourcemanager.hostname.rm1</name> <value>ubuntu21</value> </property> <property> <name>yarn.resourcemanager.hostname.rm2</name> <value>ubuntu20</value> </property> <property> <name>yarn.resourcemanager.webapp.address.rm1</name> <value>ubuntu21:8088</value> </property> <property> <name>yarn.resourcemanager.webapp.address.rm2</name> <value>ubuntu20:8088</value> </property> <property> <name>yarn.resourcemanager.zk-address</name> <value>10.67.2.21:2181,10.67.2.20:2181</value> </property> </configuration>
<?xml version="1.0"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <!-- Put site-specific property overrides in this file. --> <configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> </configuration>
5.在ubuntu21和ubuntu20上分别运行命令:
nohup $HADOOP_HOME/sbin/hadoop-daemon.sh start journalnode >/dev/null 2>&1 &
启动Journal Nodes (因为format namenode时它需要连接JNs)
6.在ubuntu21上运行:
$HADOOP_HOME/bin/hdfs namenode -format
格式化namenode
7.因为我们配置的dfs.datanode.data.dir是/usr/local/hadoop/tmpdir/dfs/data
所以我们把整个dfs打包,然后解压缩到ubuntu20的/usr/local/hadoop/tmpdir下(即它们用相同的namenode metadata)
8.在ubuntu21上运行
$HADOOP_HOME/bin/hdfs zkfc -formatZK
可以看到ubuntu21和ubuntu20的/usr/local/hadoop/tmpdir/journal/data下都会生成mycluster文件夹
9.在ubuntu21上运行
$HADOOP_HOME/sbin/start-all.sh可以自动启动ZKFC daemon
此时JPS查看ubuntu21上会有
NameNode
DataNode
ResourceManager
NodeManager
DFSZKFailoverController
JournalNode
此时JPS查看ubuntu20上会有
DataNode
NodeManager
DFSZKFailoverController
JournalNode
10.在ubuntu20上运行
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager
可以看到ubuntu20上也会多一个ResourceManager
11.进入$HADOOP_HOME/bin
hdfs haadmin -getServiceState nn1 可以看到是active状态
hdfs haadmin -getServiceState nn2 可以看到是standby状态
yarn rmadmin -getServiceState rm1 可以看到是active状态
yarn rmadmin -getServiceState rm2 可以看到是standby状态