HDFS | YARN | zk | |
---|---|---|---|
hadoop01 | namenode+zkfc+journalnode+datanode | nodemanager+resourcemanager | QuorumPeerMain |
hadoop02 | namenode+zkfc+journalnode+datanode | nodemanager | QuorumPeerMain |
hadoop03 | journalnode+datanode | nodemanager+resourcemanager | QuorumPeerMain |
进入到hadoop配置文件目录
cd /home/hadoop/apps/hadoop-2.7.6/etc/hadoop
改一下JAVA_HOME
export JAVA_HOME=/home/hadoop/apps/jdk1.8.0_73
<property>
<name>fs.defaultFSname>
<value>hdfs://bd1906/value>
property>
<property>
<name>hadoop.tmp.dirname>
<value>/home/hadoop/data/hadoopdata/value>
property>
<property>
<name>ha.zookeeper.quorumname>
<value>hadoop01:2181,hadoop02:2181,hadoop03:2181value>
property>
<property>
<name>dfs.replicationname>
<value>2value>
property>
<property>
<name>dfs.nameservicesname>
<value>bd1906value>
property>
<property>
<name>dfs.ha.namenodes.bd1906name>
<value>nn1,nn2value>
property>
<property>
<name>dfs.namenode.rpc-address.bd1906.nn1name>
<value>hadoop01:8020value>
property>
<property>
<name>dfs.namenode.http-address.bd1906.nn1name>
<value>hadoop01:50070value>
property>
<property>
<name>dfs.namenode.rpc-address.bd1906.nn2name>
<value>hadoop02:8020value>
property>
<property>
<name>dfs.namenode.http-address.bd1906.nn2name>
<value>hadoop02:50070value>
property>
<property>
<name>dfs.namenode.shared.edits.dirname>
<value>qjournal://hadoop01:8485;hadoop02:8485;hadoop03:8485/bd1906value>
property>
<property>
<name>dfs.journalnode.edits.dirname>
<value>/home/hadoop/data/hadoopdata/journaldatavalue>
property>
<property>
<name>dfs.ha.automatic-failover.enabledname>
<value>truevalue>
property>
<property>
<name>dfs.client.failover.proxy.provider.bd1906name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvidervalue>
property>
<property>
<name>dfs.ha.fencing.methodsname>
<value>
sshfence
shell(/bin/true)
value>
property>
<property>
<name>dfs.ha.fencing.ssh.private-key-filesname>
<value>/home/hadoop/.ssh/id_rsavalue>
property>
<property>
<name>dfs.ha.fencing.ssh.connect-timeoutname>
<value>30000value>
property>
<property>
<name>mapreduce.framework.namename>
<value>yarnvalue>
property>
<property>
<name>mapreduce.jobhistory.addressname>
<value>hadoop02:10020value>
property>
<property>
<name>mapreduce.jobhistory.webapp.addressname>
<value>hadoop02:19888value>
property>
<property>
<name>yarn.resourcemanager.ha.enabledname>
<value>truevalue>
property>
<property>
<name>yarn.resourcemanager.cluster-idname>
<value>yarn_bd1906value>
property>
<property>
<name>yarn.resourcemanager.ha.rm-idsname>
<value>rm1,rm2value>
property>
<property>
<name>yarn.resourcemanager.hostname.rm1name>
<value>hadoop01value>
property>
<property>
<name>yarn.resourcemanager.hostname.rm2name>
<value>hadoop03value>
property>
<property>
<name>yarn.resourcemanager.zk-addressname>
<value>hadoop01:2181,hadoop02:2181,hadoop03:2181value>
property>
<property>
<name>yarn.nodemanager.aux-servicesname>
<value>mapreduce_shufflevalue>
property>
<property>
<name>yarn.log-aggregation-enablename>
<value>truevalue>
property>
<property>
<name>yarn.log-aggregation.retain-secondsname>
<value>86400value>
property>
<property>
<name>yarn.resourcemanager.recovery.enabledname>
<value>truevalue>
property>
<property>
<name>yarn.resourcemanager.store.classname>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStorevalue>
property>
vi slaves
加入
hadoop01
hadoop02
hadoop03
cd /home/hadoop/apps
发送
scp -r hadoop-2.7.6 hadoop02:/home/hadoop/apps/
scp -r hadoop-2.7.6 hadoop03:/home/hadoop/apps/
sudo scp /etc/profile hadoop02:/etc/
sudo scp /etc/profile hadoop03:/etc/
三台机器同时执行
source /etc/profile
hadoop version
三台机器都要启动
zkServer.sh start
三台机器都要启动
hadoop-daemon.sh start journalnode
在hadoop01(任意一个namenode节点)中,看到如图所示的successfully再继续往下
hadoop namenode -format
将hadoop1的元数据发送到hadoop02中
scp -r /home/hadoop/data/hadoopdata/dfs hadoop02:/home/hadoop/data/hadoopdata
在hadoop01(任意一个namenode节点)中,看到如图所示的successfully再继续往下
hdfs zkfc -formatZK
启动hdfs(任意节点)
start-dfs.sh
启动yarn(在hadoop01)
start-yarn.sh
在另一个节点启动resourcemanager(hadoop03)
yarn-daemon.sh start resourcemanager
最后查看一下进程
jps