> 1、hadoop-env.sh配置

export JAVA_HOME=(jdk安装地址)

> 2、hdfs-site.xml配置文件

<此新名称服务的逻辑名称>

dfs.nameservices
mycluster

<名称服务中每个NameNode的唯一标识符>

dfs.ha.namenodes.mycluster
master,slave1



dfs.namenode.rpc-address.mycluster.master
master:50070


dfs.namenode.rpc-address.mycluster.slave1
slave1:50070

<每个NameNode监听的标准HTTP地址>

dfs.namenode.http-address.mycluster.master
master:9000


dfs.namenode.http-address.mycluster.slave1
slave1:9000

<配置JournalNodes 集群的URI>

dfs.namenode.shared.edits.dir
qjournal://master:8485;slave1:8485;slave2:8485/mycluster

<配置故障转移代理类>

dfs.client.failover.proxy.provider.mycluster
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider



dfs.ha.fencing.methods
sshfence


dfs.ha.fencing.ssh.private-key-files
/root/.ssh/id_rsa



dfs.ha.fencing.methods
shell(/bin/true)



dfs.journalnode.edits.dir
/usr/local/src/hadoop/data/journalnode/jn

<自动故障转移>

dfs.ha.automatic-failover.enabled
true

> 3、core-site.xml配置文件

<指定hdfs的nameservice>

fs.defaultFS
hdfs://mycluster

<指定hadoop临时目录>

hadoop.tmp.dir
/opt/tmp/hadoop/ha



ha.zookeeper.quorum
master:2181,slave1:2181,slave2:2181

> 4、mapred-site.xml配置


mapreduce.farmework.name
yarn

> 5、yarn-site.xml配置

<打开高可用>

yarn.resourcemanager.ha.enabled
true

<启用自动故障转移 >

yarn.resourcemanager.ha.automatic-failover.enabled
true



yarn.resourcemanager.cluster-id
yarn-rm-cluster



yarn.resourcemanager.ha.rm-ids
rm1,rm2



yarn.resourcemanager.hostname.rm1
master


yarn.resourcemanager.hostname.rm2
server1



yarn.resourcemanager.zk-address
master:2181,server1:2181,server2:2181

<配置YARN的http端口>

yarn.resourcemanager.webapp.address.rm1
master:8088


yarn.resourcemanager.webapp.address.rm2
slave1:8088


yarn.nodemanager.aux-services
mapreduce_shuffle

> 6、slaves配置(DataNode)
master
slave1
slave2