ubuntu16.04
hadoop
hadoop3.1.0
本文尝试源码安装部署hadoop3.1.0
系统为ubuntu16.04
需要具有一定基础,一些基本操作只是简略带过
我主要把各个部署方案的配置文件综合了起来,把一些常用的配置以及说明都写了上来
适合快速部署hadoop的朋友
序号 | 主机名 | IP | 配置 | 功能 | 备注 |
---|---|---|---|---|---|
1 | master | 192.168.136.148 | 内存:4G CPU:2vCores |
主节点 namenode resourcemanager |
|
2 | standby | 192.168.136 | 内存:4G CPU:2vCores |
备用主节点 secondaryNameNode |
默认在master上 |
3 | slave1 | 192.168.136.149 | 内存:2G CPU:1vCores |
从节点 DataNode NodeManager |
|
4 | slave2 | 192.168.136.150 | 内存:2G CPU:1vCores |
从节点 DataNode NodeManager |
root@master:~# cat /etc/hosts
127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
192.168.136.148 master
192.168.136.149 slave1
192.168.136.150 slave2
源码安装或者apt安装
环境变量(apt安装不需要配置环境变量)
export JAVA_HOME=/opt/java/jdk1.8.0_172
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
下载hadoop,将其解压至/opt/目录下
[root@node1 opt]# cd /opt/ & mkdir hadoop && cd hadoop
#解压hadoop-3.1.0.tar.gz
[root@node1 hadoop]# tar -zxvf hadoop-3.1.0.tar.gz
#修改环境变量
[root@node1 hadoop]# vim /etc/profile
# 在最后下添加
export HADOOP_HOME=/opt/hadoop/hadoop-3.1.0
export PATH=$PATH:$HADOOP_HOME/bin
source /etc/profile
修改配置文件
> 共需要配置/opt/hadoop/hadoop-3.1.0/etc/hadoop/下的六个个文件,分别是
>
> hadoop-env.sh、core-site.xml、hdfs-site.xml、yarn-site.xml、mapred-site.xml、workers
vim hadoop-env.sh
export JAVA_HOME=/opt/java/jdk1.8.0_172/
export HDFS_NAMENODE_USER="root"
export HDFS_DATANODE_USER="root"
export HDFS_SECONDARYNAMENODE_USER="root"
export YARN_RESOURCEMANAGER_USER="root"
export YARN_NODEMANAGER_USER="root"
<configuration>
<property>
<name>fs.defaultFSname>
<value>hdfs://master:9000value>
property>
<property>
<name>hadoop.tmp.dirname>
<value>/opt/hadoop/data/tmpvalue>
property>
<property>
<name>io.file.buffer.sizename>
<value>131072value>
property>
configuration>
<configuration>
<property>
<name>dfs.namenode.http-addressname>
<value>master:50070value>
property>
<property>
<name>dfs.namenode.secondary.http-addressname>
<value>standby:50090value>
property>
<property>
<name>dfs.blocksizename>
<value>268435456value>
property>
<property>
<name>dfs.namenode.handler.count name>
<value>100value>
property>
<property>
<name>dfs.namenode.name.dirname>
<value>/opt/hadoop/data/namevalue>
property>
<property>
<name>dfs.replicationname>
<value>2value>
property>
<property>
<name>dfs.datanode.data.dirname>
<value>/opt/hadoop/data/datanodevalue>
property>
<property>
<name>dfs.permissionsname>
<value>falsevalue>
property>
configuration>
<configuration>
<property>
<name>mapreduce.framework.namename>
<value>yarnvalue>
property>
<property>
<name>mapreduce.application.classpathname>
<value>
/opt/hadoop/hadoop-3.1.0/etc/hadoop,
/opt/hadoop/hadoop-3.1.0/share/hadoop/common/*,
/opt/hadoop/hadoop-3.1.0/share/hadoop/common/lib/*,
/opt/hadoop/hadoop-3.1.0/share/hadoop/hdfs/*,
/opt/hadoop/hadoop-3.1.0/share/hadoop/hdfs/lib/*,
/opt/hadoop/hadoop-3.1.0/share/hadoop/mapreduce/*,
/opt/hadoop/hadoop-3.1.0/share/hadoop/mapreduce/lib/*,
/opt/hadoop/hadoop-3.1.0/share/hadoop/yarn/*,
/opt/hadoop/hadoop-3.1.0/share/hadoop/yarn/lib/*
value>
property>
configuration>
<configuration>
<property>
<name>yarn.resourcemanager.hostnamename>
<value>mastervalue>
property>
<property>
<name>yarn.nodemanager.aux-servicesname>
<value>mapreduce_shufflevalue>
property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.classname>
<value>org.apache.hadoop.mapred.ShuffleHandlevalue>
property>
<property>
<name>yarn.resourcemanager.resource-tracker.addressname>
<value>master:8025value>
property>
<property>
<name>yarn.resourcemanager.scheduler.addressname>
<value>master:8030value>
property>
<property>
<name>yarn.resourcemanager.addressname>
<value>master:8040value>
property>
configuration>
新建一个masters的文件,这里指定的是secondary namenode 的主机
[root@node1 hadoop]# touch /opt/hadoop/hadoop-3.1.0/etc/hadoop/masters
[root@node1 hadoop]# vim /opt/hadoop/hadoop-3.1.0/etc/hadoop/masters
#添加
standby
在workers文件中添加slave节点
[root@node1 hadoop]# vim /opt/hadoop/hadoop-3.1.0/etc/hadoop/workers
#添加
slave1
slave2
[root@node1 hadoop]# mkdir -p /opt/hadoop/data/tmp
[root@node1 hadoop]# mkdir -p /opt/hadoop/data/name
[root@node1 hadoop]# mkdir -p /opt/hadoop/data/datanode
[root@node1 opt]# scp -r /opt/hadoop spark.node2:/opt/
[root@node1 opt]# scp -r /opt/hadoop spark.node3:/opt/
[root@node1 opt]# /opt/hadoop/hadoop-3.1.0/bin/hdfs namenode -format
/opt/hadoop/hadoop-3.1.0/sbin/start-dfs.sh
/opt/hadoop/hadoop-3.1.0/sbin/start-yarn.sh
jps
/opt/hadoop/hadoop-3.1.0/sbin/stop-dfs.sh
/opt/hadoop/hadoop-3.1.0/sbin/stop-yarn.sh
重置hadoop环境 [移除hadoop hdfs log文件]
rm -rf /opt/hadoop/hadoop-3.1.0/logs/*
rm -rf /opt/hadoop/data
这是本人的个人博客,欢迎关注:
CSDN:弓昭的技术博客
简书:弓昭的简书
如果有问题可以Email或留言与我交流:
PS: 每篇博文都有PDF版本,想要的朋友可以email我
参考链接: