CentOS7 hadoop集群搭建

搭建 Hadoop集群

配置名字hadoop1、hadoop2、hadoop3

hostnamectl set-hostname hadoop1
hostnamectl set-hostname hadoop2
hostnamectl set-hostname hadoop3

修 改 /etc/hosts

127.0.0.1 localhost localhost.localdomain localhost4 

localhost4.localdomain4

::1 localhost localhost.localdomain localhost6 

localhost6.localdomain6

192.168.253.128 hadoop1
192.168.253.129 hadoop2
192.168.253.130 hadoop3

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

hadoop1输入以下命令

做ssh 公私钥 无秘;中途直接回车

ssh-keygen -t rsa -P ''

copy公钥到hadoop2,hadoop3;输入yes,再输入密码

ssh-copy-id hadoop1
ssh-copy-id hadoop2
ssh-copy-id hadoop3

hadoop2,hadoop3分别输入以下命令

cd .ssh/
ls
#出现authorized_keys就是没问题

在hadoop1中分别输入命名,测试看能否进入

ssh hadoop2 
ssh hadoop3
#exit 退出

hadoop2和hadoop3同上一样做ssh 公私钥,无秘

对hadoop1、hadoop2、hadoop3安装chrony

yum -y install chrony

安装wget

yum install -y gcc vim wget

配置chrony

vim /etc/chrony.conf

文件添加如下内容,注释server0.centos.pool.ntp.org iburst

server ntp1.aliyun.com 
server ntp2.aliyun.com 
server ntp3.aliyun.com

启动chrony

systemctl start chronyd

安装psmisc

yum install -y psmisc

备份原始源

mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup

下载源

wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

清除缓存

yum clean all
yum makecache

安装jdk

将jdk放入/usr/local目录下

cd /usr/local #在此目录下放入jdk
#解压
tar -zxf jdk-8u111-linux-x64.tar.gz
#重命名
mv jdk1.8.0_111 jdk

配置环境变量

vi /etc/profile
#配置如下内容
export JAVA_HOME=/usr/local/jdk
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

重新加载配置文件

source /etc/profile

验证jdk

java -version

安装zookeeper

将zookeeper放入/usr/local目录下

#在此目录下放入zookeeper
cd /usr/local 
#解压
tar -zxf zookeeper-3.4.5-cdh5.14.2.tar.gz
#重命名
mv zookeeper-3.4.5-cdh5.14.2 zookeeper

修改zookeeper配置文件

cd zookeeper/conf
cp zoo_sample.cfg zoo.cfg
vi zoo.cfg

修改dataDir=/usr/local/zookeeper/datas:

文件末尾加上以下内容:

server.1=hadoop1:2888:3888 
server.2=hadoop2:2888:3888 
server.3=hadoop3:2888:3888

创建datas文件夹

cd /usr/local/zookeeper
mkdir datas

在hadoop1中运行

cd /usr/local/zookeeper/datas
echo "1"> myid

在hadoop2中运行

cd /usr/local/zookeeper/datas
echo "2"> myid

在hadoop3中运行

cd /usr/local/zookeeper/datas
echo "3"> myid

配置hadoop1、hadoop2、hadoop3运行环境

vi /etc/profile
#Zookeeper env
export ZOOKEEPER_HOME=/usr/local/zookeeper
export PATH=$PATH:$ZOOKEEPER_HOME/bin

重新加载配置文件

source /etc/profile

启动zookeeper集群

zkServer.sh start

jps命令查看,必须要有进程QuorumPeerMain

jps

如果端口被占用

 #查看端口号2181是否被占用
 netstat -nltp | grep 2181
 #如果没有netstat指令,可以执行如下指令安装后再去查看:
 netstat -ano | findstr "2181"
 #杀死进
 kill -9 进程pid

安装hadoop

将Hadoop安装包分别拖到hadoop1、hadoop2、hadoop3的/usr/local/目录下

#在此目录下放入Hadoop
cd /usr/local
#解压
tar -zxvf hadoop-2.6.0-cdh5.14.2.tar.gz
#重命名
mv hadoop-2.6.0-cdh5.14.2 hadoop

添加对应各个文件夹

mkdir -p /usr/local/hadoop/tmp 
mkdir -p /usr/local/hadoop/dfs/journalnode_data 
mkdir -p /usr/local/hadoop/dfs/edits 
mkdir -p /usr/local/hadoop/dfs/datanode_data
mkdir -p /usr/local/hadoop/dfs/namenode_data

配置hadoop-env.sh

cd /usr/local/hadoop/etc/hadoop
vim hadoop-env.sh

修改JAVA_HOME和HADOOP_CONF_DIR的值如下:

export JAVA_HOME=/usr/local/jdk 
export HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoop

配置core-site.xml

vim core-site.xml
 
 
 
fs.defaultFS 
hdfs://hacluster 
 
 
 
hadoop.tmp.dir 
file:///usr/local/hadoop/tmp 
 
  
io.file.buffer.size 
4096 
 
 
 
ha.zookeeper.quorum 
hadoop1:2181,hadoop2:2181,hadoop3:2181 
 
 
 
hadoop.proxyuser.root.hosts
* 
 
 
 
hadoop.proxyuser.root.groups 
* 
 

配置hdfs-site.xml

vim hdfs-site.xml
 
 
 
dfs.block.size 
134217728 
 
 
 
dfs.replication 
3 
 
 
 
dfs.name.dir 
file:///usr/local/hadoop/dfs/namenode_data 
 
 
 
dfs.data.dir 
file:///usr/local/hadoop/dfs/datanode_data 


 
dfs.webhdfs.enabled 
true 
 
 
 
dfs.datanode.max.transfer.threads 
4096  
 
 
dfs.nameservices 
hacluster 
 
 
 
dfs.ha.namenodes.hacluster 
nn1,nn2 
 
 
 
dfs.namenode.rpc-address.hacluster.nn1 
hadoop1:9000 

 
dfs.namenode.servicepc-address.hacluster.nn1 
hadoop1:53310 
 
 
dfs.namenode.http-address.hacluster.nn1 
hadoop1:50070 
 
 
 
dfs.namenode.rpc-address.hacluster.nn2 
hadoop2:9000 
 
 
dfs.namenode.servicepc-address.hacluster.nn2 
hadoop2:53310 
 
 
dfs.namenode.http-address.hacluster.nn2 
hadoop2:50070 
 
 
 
dfs.namenode.shared.edits.dir 
qjournal://hadoop1:8485;hadoop2:8485;hadoop3:8485/hacluster 
 
 
 
dfs.journalnode.edits.dir 
/usr/local/hadoop/dfs/journalnode_data 
 
 
 
dfs.namenode.edits.dir 
/usr/local/hadoop/dfs/edits 
 
 
 
dfs.ha.automatic-failover.enabled 
true 
 
 
 
dfs.client.failover.proxy.provider.hacluster 
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider 
 
 
 
dfs.ha.fencing.methods 
sshfence 
 
 
 
dfs.ha.fencing.ssh.private-key-files 
/root/.ssh/id_rsa
 
 
 
dfs.premissions 
false 
 

配置mapred-site.xml

cp mapred-site.xml.template mapred-site.xml
vim mapred-site.xml
 
 
 
mapreduce.framework.name 
yarn 
 
 
 
mapreduce.jobhistory.address 
hadoop1:10020 
 
 
 
mapreduce.jobhistory.webapp.address 
hadoop1:19888 
 
 
 
mapreduce.job.ubertask.enable 
true 
 

配置yarn-site.xml

vim yarn-site.xml
 
 
 
yarn.resourcemanager.ha.enabled 
true 
 
 
 
yarn.resourcemanager.cluster-id 
hayarn 
 
 
 
yarn.resourcemanager.ha.rm-ids 
rm1,rm2 
 
 
 
yarn.resourcemanager.hostname.rm1 
hadoop2 

 
 
yarn.resourcemanager.hostname.rm2 
hadoop3 
 
 
 
yarn.resourcemanager.zk-address 
hadoop1:2181,hadoop2:2181,hadoop3:2181 
  
 
yarn.resourcemanager.recovery.enabled 
true 
 
 
 
yarn.resourcemanager.store.class 
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore 
 
 
 
yarn.resourcemanager.hostname 
hadoop3 
 
 
 
yarn.nodemanager.aux-services 
mapreduce_shuffle 
 
 
 
yarn.log-aggregation-enable 
true 
 
 
 
yarn.log-aggregation.retain-seconds 
604800 
 

配置slaves

vim slaves

快捷键dd删除localhost,添加如下内容

hadoop1
hadoop2
hadoop3

配置hadoop环境变量

vim /etc/profile
#hadoop env
export HADOOP_HOME=/usr/local/hadoop 
export HADOOP_MAPRED_HOME=$HADOOP_HOME 
export HADOOP_COMMON_HOME=$HADOOP_HOME 
export HADOOP_HDFS_HOME=$HADOOP_HOME 
export YARN_HOME=$HADOOP_HOME 
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native 
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin 
export HADOOP_INSTALL=$HADOOP_HOME
source /etc/profile

启动Hadoop集群

hadoop-daemon.sh start journalnode

输入jps命令,会发现多了一个进程JournalNode

jps

格式化namenode(只在hadoop1主机上)(hadoop2和hadoop3的窗口状态改成ON)

hdfs namenode -format

将hadoop1上的Namenode的元数据复制到hadoop2相同位置

scp -r /usr/local/hadoop/dfs/namenode_data/current/ root@hadoop2:/usr/local/hadoop/dfs/namenode_data

在hadoop1上格式化故障转移控制器zkfc

hdfs zkfc -formatZK

在hadoop1上启动dfs服务,再输入jps查看进程

start-dfs.sh
jps
#jps结果如下:
DataNode
QuorumPeerMain
JournalNode
NameNode
DFSZKFailoverController
jps

在hadoop3上启动yarn服务,再输入jps查看进程

start-yarn.sh
jps
#jps结果如下:
ResourceManager
NodeManager
JournalNode
DataNode
QuorumPeerMain
jps

在hadoop2上输入jps查看进程

jps
#jps结果如下:
JournalNode
DFSZKFailoverController
NodeManager
Jps
DataNode
QuorumPeerMain
NameNode

在hadoop1上启动history服务器,jps则会多了一个JobHistoryServer的进程

mr-jobhistory-daemon.sh start historyserver
jps

检查集群情况

在hadoop1上查看服务状态,hdfs haadmin -getServiceState nn1则会对应显示active,nn2则显示standby

hdfs haadmin -getServiceState nn1
hdfs haadmin -getServiceState nn2

浏览器输入IP地址:50070,对比以下图片

hadoop1的IP地址,注意查看是否为“active”

CentOS7 hadoop集群搭建_第1张图片

hadoop2的IP地址,注意查看是否为“standby”

CentOS7 hadoop集群搭建_第2张图片

最后选择上方的Datanodes,查看是否是三个节点,如何是,则高可用hadoop集群搭建成功!!!

CentOS7 hadoop集群搭建_第3张图片

你可能感兴趣的:(大数据,hadoop,大数据,centos)