目录
一、环境准备
二、JDK的安装
三、安装haddoop
四、配置hadoop配置文件
五、将jdk、hadoop分发到slave1、slave2节点
六、hdfs格式化
七、环境变量配置
八、集群启动
九、进程查看
十、WEB UI查看
节点名称【主机名】 | IP |
---|---|
master | 170.158.10.101 |
slave1 | 170.158.10.102 |
slave2 | 170.158.10.103 |
# 在root用户下执行
systemctl stop firewalld
systemctl disable firewalld.service
vim /etc/hosts
# 添加以下内容
170.158.10.101 master
170.158.10.102 slave1
170.158.10.103 slave2
# 使用以下的命令
vim /etc/hostname
# 2、创建用户,在root用户下
useradd hadoop
passwd hadoop
cd /usr/local
# 存放安装好的软件
mkdir server
# 存放安装包
mkdir software
# 给server和software授权
chown -R hadoop:hadoop ./server
chown -R hadoop:hadoop ./software
# 其他两个节点做以上相同的操作
# vim /etc/sudoers
# 在这%whell这一行下面添加hadoop
#Allows people in group wheel to run all commands
%wheel ALL=(ALL) ALL
hadoop ALL=(ALL) NOPASSWD:ALL
# 在命令行输入,根据提示输入密码即可
ssh localhost
# 切换到~/.ssh目录下
cd ~/.ssh
# 输入以下命令,期间连续按三次回车
ssh-keygen -t rsa
# 分发密钥,期间在输入密码即可免密登录
ssh-copy-id master
ssh-copy-id slave1
ssh-copy-id slave2
# 三个节点依次按照以上命令操作,再次输入ssh 相应的主机名,已经不需要密码登录master、slave1、slave2节点了
# 查看系统自带的jdk
rpm -qa | grep jdk
# 卸载找到的jdk
yum -y remove 找到的jdk
# 或者使用以下的命令删除
rpm -qa | grep -i java | xargs -n1 rpm -e --nodeps
# 安装jdk,安装包可以到官网进行下载
链接:Index of /dist/hadoop/core/hadoop-3.1.3http://archive.apache.org/dist/hadoop/core/hadoop-3.1.3/hadoop-3.1.3.tar.gzIndex of /dist/hadoop/core/hadoop-3.1.3
tar -zxvf hadoop-3.1.3/hadoop-3.1.3.tar.gz -C /usr/local
fs.defaultFS
hdfs://master:8020
hadoop.tmp.dir
/usr/local/hadoop-3.1.3/data
hadoop.http.staticuser.user
hadoop
dfs.namenode.http-address
master:9870
dfs.namenode.secondary.http-address
master:9868
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
slave1:10020
mapreduce.jobhistory.webapp.address
salve1:19888
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.resourcemanager.hostname
slave2
yarn.nodemanager.env-whitelist
JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME
yarn.log-aggregation-enable
true
yarn.log.server.url
http://slave1:19888/jobhistory/logs
yarn.log-aggregation.retain-seconds
604800
master
slave1
slave2
scp -r /usr/local/server/jdk1.8.0_301 hadoop@slave1:/usr/local/server
scp -r /usr/local/server/jdk1.8.0_301 hadoop@slave2:/usr/local/server
scp -r /usr/local/server/hadoop-3.1.3 hadoop@slave1:/usr/local/server
scp -r /usr/local/server/hadoop-3.1.3 hadoop@slave2:/usr/local/server
# 切换到bin目录下
cd /usr/local/hadoop-3.1.3/bin
# 格式化namenode
./bin/hdfs namenode -format
vim /etc/profile.d/my_env.sh
# 每台节点都添加以下内容
export JAVA_HOME=/usr/local/server/jdk1.8.0_301
export PATH=$PATH:$JAVA_HOME/bin
export HADOOP_HOME=/usr/local/server/hadoop-3.1.3
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
# master节点
start-dfs.sh
# slave1节点
mapred --daemon start historyserver
# slave2节点
start-yarn.sh
# master节点有NameNode和SecondNameNode说明启动成功
# slave1和slave2有DataNode
# slave2有ResourceManager和NodeManager
# http://master:9870
# http://slave2:19888