请参看zookeeper 的安装:
大数据集群安装系列1:Zookeeper 的安装
欢迎留言哦,欢迎提问。
[root@master /opt/bigdata/component]# pwd
/opt/bigdata/component
[root@master /opt/bigdata/component]# ls
azkaban.tar.gz GsFaceLib.tar.gz hadoop.tar.gz hbase.tar.gz kafka.tar.gz spark.tgz zookeeper
elastic.tar.gz hadoop haproxy-1.7.9.tar.gz hive.tar.gz rocketmq.zip zabbix-3.4.6.tar.gz zookeeper.tar.gz
[root@master /opt/bigdata/component]#
<configuration>
<property>
<name>dfs.nameservicesname>
<value>haclustervalue>
property>
<property>
<name>dfs.ha.namenodes.haclustername>
<value>nn1,nn2value>
property>
<property>
<name>dfs.namenode.rpc-address.hacluster.nn1name>
<value>master:9000value>
property>
<property>
<name>dfs.namenode.http-address.hacluster.nn1name>
<value>master:50070value>
property>
<property>
<name>dfs.namenode.rpc-address.hacluster.nn2name>
<value>workerI:9000value>
property>
<property>
<name>dfs.namenode.http-address.hacluster.nn2name>
<value>workerI:50070value>
property>
<property>
<name>dfs.namenode.shared.edits.dirname>
<value>qjournal://workerII:8485;workerI:8485;master:8485;/haclustervalue>
property>
<property>
<name>dfs.journalnode.edits.dirname>
<value>/opt/bigdata/Hadoop/hadoop/dfs_journalnode_edits_dirvalue>
property>
<property>
<name>dfs.ha.automatic-failover.enabledname>
<value>truevalue>
property>
<property>
<name>dfs.client.failover.proxy.provider.haclustername>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvidervalue>
property>
<property>
<name>dfs.ha.fencing.methodsname>
<value>
sshfence
shell(/bin/true)
value>
property>
<property>
<name>dfs.ha.fencing.ssh.private-key-filesname>
<value>/root/.ssh/id_rsavalue>
property>
<property>
<name>dfs.ha.fencing.ssh.connect-timeoutname>
<value>30000value>
property>
<property>
<name>dfs.replicationname>
<value>3value>
property>
-->
<configuration>
<property>
<name>yarn.resourcemanager.ha.enabledname>
<value>truevalue>
property>
<property>
<name>yarn.resourcemanager.cluster-idname>
<value>yrcvalue>
property>
<property>
<name>yarn.resourcemanager.ha.rm-idsname>
<value>rm1,rm2value>
property>
<property>
<name>yarn.resourcemanager.hostname.rm1name>
<value>mastervalue>
property>
<property>
<name>yarn.resourcemanager.hostname.rm2name>
<value>workerIvalue>
property>
<property>
<name>yarn.resourcemanager.zk-addressname>
<value>workerII:2181,workerI:2181,master:2181,value>
property>
<property>
<name>yarn.nodemanager.aux-servicesname>
<value>mapreduce_shuffle,spark_shufflevalue>
property>
<property>
<name>yarn.nodemanager.resource.memory-mbname>
<value>19000value>
property>
<property>
<name>yarn.scheduler.minimum-allocation-mbname>
<value>8192value>
property>
<property>
<name>yarn.scheduler.maximum-allocation-mbname>
<value>19000value>
property>
<property>
<name>yarn.app.mapreduce.am.resource.mbname>
<value>8192value>
property>
<property>
<name>yarn.app.mapreduce.am.command-optsname>
<value>-Xmx6553mvalue>
property>
<property>
<name>yarn.resourcemanager.webapp.address.rm1name>
<value>master:8088value>
property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2name>
<value>workerI:8088value>
property>
<property>
<name>yarn.nodemanager.aux-services.spark_shuffle.classname>
<value>org.apache.spark.network.yarn.YarnShuffleServicevalue>
property>
<property>
<name>spark.shuffle.service.portname>
<value>7337value>
property>
configuration>
export JAVA_HOME=/opt/bigdata/basic/jdk
[root@master /opt/bigdata/component/hadoop/etc/hadoop]# vim slaves
[root@master /opt/bigdata/component/hadoop/etc/hadoop]# cat slaves
master
workerI
workerII
scp -r hadoop workerI:pwd
scp -r hadoop workerII:pwd
在$HADOOP_INSTALL_HOME/bin
./hdfs zkfc -formatZK -force
在$HADOOP_INSTALL_HOME/sbin
./hadoop-daemon.sh start zkfc
在$HADOOP_INSTALL_HOME/sbin
./hadoop-daemon.sh start journalnode
在$HADOOP_INSTALL_HOME/bin
./hadoop namenode -format -force
在$HADOOP_INSTALL_HOME/sbin
./start-dfs.sh
在$HADOOP_INSTALL_HOME/sbin
./start-yarn.sh
本文备用节点是workerI
在HADOOP_INSTALL_HOME/sbin
yarn-daemon.sh start resourcemanager
在$HADOOP_INSTALL_HOME/sbin
hdfs namenode -bootstrapStandby
在$HADOOP_INSTALL_HOME/sbin
hadoop-daemon.sh start namenode
[root@workerI ~]# jps
1969 WrapperSimpleApp
6961 Jps
5682 JournalNode
5956 DFSZKFailoverController
5527 QuorumPeerMain
5832 DataNode
6057 NodeManager
6203 ResourceManager
[root@workerI ~]#
[root@master ~]# jps
6560 DFSZKFailoverController
6644 JournalNode
7460 ResourceManager
7112 DataNode
7563 NodeManager
8396 Jps
2076 WrapperSimpleApp
6957 NameNode
6174 QuorumPeerMain
[root@workerII ~]# jps
4067 NodeManager
3923 DataNode
3848 JournalNode
2200 WrapperSimpleApp
4606 Jps
3679 QuorumPeerMain
(请不要在一Jps 和WrapperSimpleApp 这两个不需要关注 )