Hadoop 版本:2.9.2
1.1、所有节点修改 /etc/hosts
192.168.11.72 master
192.168.11.73 node1
192.168.11.74 node2
192.168.11.75 node3
192.168.11.76 node4
1.2、所有的节点安装 JDK
1.下载对应的 jdk 版本
2.解压到 /usr/local/lib 下, tar -zxvf jdk-xxx.tar.gz -C /usr/local/lib
3.配置系统环境变量,编辑/etc/profile文件,在文件的末尾添加如下:
export JAVA_HOME=/usr/local/lib/jdk1.8.0_181
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$CLASSPATH:$JAVA_HOME/lib:$JRE_HOME/lib
export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
4.source /etc/profile命令使刚才配置的信息生效
5.验证是否安装成功 java -version
1.3、创建账户
sudo useradd -m bigdata -s /bin/bash //添加用户
sudo passwd bigdata //创建密码
sudo adduser bigdata sudo //sudo 授权
1.4、免密 ssh
# 在master
sudo apt-get install openssh-server
ssh-keygen -t rsa 回车 回车 回车
cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
scp $HOME/.ssh/id_rsa.pub [email protected]:.ssh/
scp $HOME/.ssh/id_rsa.pub [email protected]:.ssh/
# 在node1,node2
cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
下载 hadoop-2.9.2.tar.gz,解压,修改配置:
hadoop/etc/hadoop/hadoop-env.sh
vi $HADOOP_HOME/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/local/lib/jdk1.8.0_181
将 export JAVA_HOME=${JAVA_HOME} 修改为正确的路径
hadoop/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFSname>
<value>hdfs://bigdata-ha/value>
property>
<property>
<name>hadoop.tmp.dirname>
<value>/home/bigdata/data/hadoopdata/value>
property>
<property>
<name>ha.zookeeper.quorumname>
<value>intellif-bigdata-node1:2181,intellif-bigdata-node2:2181,intellif-bigdata-node3:2181value>
property>
<property>
<name>ha.zookeeper.session-timeout.msname>
<value>5000value>
<description>msdescription>
property>
configuration>
hadoop/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.replicationname>
<value>3value>
property>
<property>
<name>dfs.namenode.name.dirname>
<value>/home/bigdata/data/hadoopdata/dfs/namevalue>
property>
<property>
<name>dfs.datanode.data.dirname>
<value>/home/bigdata/data/hadoopdata/dfs/datavalue>
property>
<property>
<name>dfs.nameservicesname>
<value>bigdata-havalue>
property>
<property>
<name>dfs.ha.namenodes.bigdata-haname>
<value>nn1,nn2value>
property>
<property>
<name>dfs.namenode.rpc-address.bigdata-ha.nn1name>
<value>intellif-bigdata-master:9000value>
property>
<property>
<name>dfs.namenode.rpc-address.bigdata-ha.nn2name>
<value>intellif-bigdata-node1:9000value>
property>
<property>
<name>dfs.namenode.http-address.bigdata-ha.nn1name>
<value>intellif-bigdata-master:50070value>
property>
<property>
<name>dfs.namenode.http-address.bigdata-ha.nn2name>
<value>intellif-bigdata-node1:50070value>
property>
<property>
<name>dfs.namenode.shared.edits.dirname>
<value>qjournal://intellif-bigdata-master:8485;intellif-bigdata-node1:8485;intellif-bigdata-node2:8485/bigdata-havalue>
property>
<property>
<name>dfs.journalnode.edits.dirname>
<value>/home/bigdata/data/journaldatavalue>
property>
<property>
<name>dfs.ha.automatic-failover.enabledname>
<value>truevalue>
property>
<property>
<name>dfs.client.failover.proxy.provider.bigdata-haname>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvidervalue>
property>
<property>
<name>dfs.ha.fencing.methodsname>
<value>
sshfence
shell(/bin/true)
value>
property>
<property>
<name>dfs.ha.fencing.ssh.private-key-filesname>
<value>/home/bigdata/.ssh/id_rsavalue>
property>
<property>
<name>dfs.ha.fencing.ssh.connect-timeoutname>
<value>30000value>
property>
<property>
<name>ha.failover-controller.cli-check.rpc-timeout.msname>
<value>60000value>
property>
configuration>
hadoop/etc/hadoop/yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.ha.enabledname>
<value>truevalue>
property>
<property>
<name>yarn.resourcemanager.cluster-idname>
<value>yarn-clustervalue>
property>
<property>
<name>yarn.resourcemanager.ha.rm-idsname>
<value>rm1,rm2value>
property>
<property>
<name>yarn.resourcemanager.hostname.rm1name>
<value>intellif-bigdata-mastervalue>
property>
<property>
<name>yarn.resourcemanager.hostname.rm2name>
<value>intellif-bigdata-node2value>
property>
<property>
<name>yarn.resourcemanager.zk-addressname>
<value>intellif-bigdata-node1:2181,intellif-bigdata-node2:2181,intellif-bigdata-node3:2181value>
property>
<property>
<name>yarn.nodemanager.aux-servicesname>
<value>mapreduce_shufflevalue>
property>
<property>
<name>yarn.log-aggregation-enablename>
<value>truevalue>
property>
<property>
<name>yarn.log-aggregation.retain-secondsname>
<value>106800value>
property>
<property>
<name>yarn.log.server.urlname>
<value>http://intellif-bigdata-master:19888/jobhistory/logsvalue>
property>
<property>
<name>yarn.resourcemanager.recovery.enabledname>
<value>truevalue>
property>
<property>
<name>yarn.resourcemanager.store.classname>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStorevalue>
property>
<property>
<name>yarn.resourcemanager.address.rm1name>
<value>intellif-bigdata-master:8032value>
property>
<property>
<name>yarn.resourcemanager.scheduler.address.rm1name>
<value>intellif-bigdata-master:8030value>
property>
<property>
<name>yarn.resourcemanager.webapp.address.rm1name>
<value>intellif-bigdata-master:8088value>
property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm1name>
<value>intellif-bigdata-master:8031value>
property>
<property>
<name>yarn.resourcemanager.admin.address.rm1name>
<value>intellif-bigdata-master:8033value>
property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rm1name>
<value>intellif-bigdata-master:23142value>
property>
<property>
<name>yarn.resourcemanager.address.rm2name>
<value>intellif-bigdata-node2:8032value>
property>
<property>
<name>yarn.resourcemanager.scheduler.address.rm2name>
<value>intellif-bigdata-node2:8030value>
property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2name>
<value>intellif-bigdata-node2:8088value>
property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm2name>
<value>intellif-bigdata-node2:8031value>
property>
<property>
<name>yarn.resourcemanager.admin.address.rm2name>
<value>intellif-bigdata-node2:8033value>
property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rm2name>
<value>intellif-bigdata-node2:23142value>
property>
<property>
<name>yarn.scheduler.maximum-allocation-mbname>
<value>30720value>
property>
<property>
<name>yarn.nodemanager.resource.memory-mbname>
<value>30720value>
property>
<property>
<name>yarn.nodemanager.pmem-check-enabledname>
<value>falsevalue>
property>
<property>
<name>yarn.nodemanager.vmem-check-enabledname>
<value>falsevalue>
property>
<property>
<name>yarn.scheduler.capacity.maximum-am-resource-percentname>
<value>0.6value>
property>
configuration>
hadoop/etc/hadoop/mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.namename>
<value>yarnvalue>
property>
<property>
<name>mapreduce.jobhistory.addressname>
<value>intellif-bigdata-master:10020value>
<description>MapReduce JobHistory Server IPC host:portdescription>
property>
<property>
<name>mapreduce.jobhistory.webapp.addressname>
<value>intellif-bigdata-master:19888value>
<description>MapReduce JobHistory Server Web UI host:portdescription>
property>
<property>
<name>mapred.compress.map.outputname>
<value>truevalue>
property>
configuration>
hadoop/etc/hadoop/slaves
intellif-bigdata-node1
intellif-bigdata-node2
intellif-bigdata-node3
环境变量 ~/.bashrc
HADOOP_HOME=/home/bigdata/hadoop
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
source ~/.bashrc
复制所有的 hadoop 文件夹到其他节点**
scp -r hadoop bigdata@intellif-bigdata-node1:/home/bigdata/
scp -r hadoop bigdata@intellif-bigdata-node2:/home/bigdata/
scp -r hadoop bigdata@intellif-bigdata-node3:/home/bigdata/
给所有节点 配置 Hadoop 环境变量, master 前面已经配置,这里只配其他的节点就好
HADOOP_HOME=/home/bigdata/hadoop
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
source ~/.bashrc
启动zookeeper
启动journal(每个机器都执行)
cd /home/bigdata/data
rm -rf hadoopdata
hadoop-daemon.sh stop journalnode
hadoop-daemon.sh start journalnode
进入master的~/hadoop目录,执行以下操作格式化namenode
hdfs namenode -format
格式化namenode,第一次启动服务前执行的操作,以后不需要执行。
复制元数据到另外一个namenode
cd /home/bigdata/data/hadoopdata/dfs
scp -r name/ bigdata@intellif-bigdata-node1:$PWD
格式化ZK(在master上执行即可)
hdfs zkfc -formatZK
启动 hdfs
start-dfs.sh
查看集群状态:
hadoop dfsadmin -report
查看namenode failover
hdfs haadmin -failover nn1 nn2
启动 yarn
start-yarn.sh