software | version |
---|---|
java | jdk-8u171-linux-x64.rpm |
hadoop | hadoop-2.7.6.tar.gz |
zookeeper | zookeeper-3.4.12.tar.gz |
hostname | os | ip | software | deamon |
---|---|---|---|---|
master | centos7 | 192.168.3.100 | jdk, hadoop | NameNode, ResourceManager, DFSZKFailoverController(zkfc) |
master1 | centos7 | 192.168.3.101 | jdk, hadoop | NameNode, ResourceManager, DFSZKFailoverController(zkfc) |
slave1 | centos7 | 192.168.3.102 | jdk, hadoop, zookeeper | DateNode, NodeManager, JournalNode, QuorumPeerMain |
slave2 | centos7 | 192.168.3.103 | jdk, hadoop, zookeeper | DateNode, NodeManager, JournalNode, QuorumPeerMain |
slave3 | centos7 | 192.168.3.104 | jdk, hadoop, zookeeper | DateNode, NodeManager, JournalNode, QuorumPeerMain |
基础环境配置参考基础环境配置章节
zookeeper安装参考zookeeper分布式安装
fs.defaultFS
hdfs://master
hadoop.tmp.dir
/usr/local/hadoop/tmp
Abase for other temporary directories.
ha.zookeeper.quorum
slave1:2181,slave1:2181,slave1:2181
此处 /usr/local/hadoop/tmp如果写成file:/usr/local/hadoop/tmp会报java.lang.IllegalArgumentException: URI has an authority component错误
dfs.nameservices
master
dfs.ha.namenodes.master
nn1,nn2
dfs.namenode.rpc-address.master.nn1
master:9000
dfs.namenode.http-address.master.nn1
master:50070
dfs.namenode.rpc-address.master.nn2
master1:9000
dfs.namenode.http-address.ns1.nn2
master1:50070
dfs.namenode.shared.edits.dir
qjournal://slave1:8485;slave2:8485;slave3:8485/ns1
dfs.journalnode.edits.dir
/usr/local/hadoop/journaldata
dfs.ha.automatic-failover.enabled
true
dfs.client.failover.proxy.provider.master
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
dfs.ha.fencing.methods
sshfence
shell(/bin/true)
dfs.ha.fencing.ssh.private-key-files
/root/.ssh/id_rsa
dfs.ha.fencing.ssh.connect-timeout
30000
mapreduce.framework.name
yarn
yarn.resourcemanager.ha.enabled
true
yarn.resourcemanager.cluster-id
yrc
yarn.resourcemanager.ha.rm-ids
rm1,rm2
yarn.resourcemanager.hostname.rm1
master
yarn.resourcemanager.hostname.rm2
master1
yarn.resourcemanager.zk-address
slave1:2181,slave1:2181,slave1:2181
yarn.nodemanager.aux-services
mapreduce_shuffle
slave1
slave2
slave3
export JAVA_HOME=/usr/java/default
scp -r /usr/local/hadoop slave1:/usr/local/
scp -r /usr/local/hadoop slave2:/usr/local/
scp -r /usr/local/hadoop slave3:/usr/local/
scp -r /usr/local/hadoop master1:/usr/local/
在slave1, slave2, slave3上分别启动zookeeper
zkServer.sh start # 启动
zkServer.sh status # 查看状态
zkServer.sh stop # 停止
在slave1, slave2, slave3上分别启动journalnode
hadoop-daemon.sh start journalnode
jps # 查看JournalNode进程
hdfs namenode -format
格式化后会在根据core-site.xml中的hadoop.tmp.dir配置生成个文件,/usr/local/hadoop/tmp拷贝到master1的/usr/local/hadoop
scp -r /usr/local/hadoop/tmp master1:/usr/local/hadoop
hdfs zkfc -formatZK
start-dfs.sh
start-yarn.sh
NameNode http://192.168.3.100:50070 Default HTTP port is 50070
ResourceManager http://192.168.3.100:8088 Default HTTP port is 8088