PC01 | PC02 | PC03 |
---|---|---|
NameNode | NameNode | hive |
ZKFC | ZKFC | |
ntpd | ResourceManager | ResourceManager |
DataNode | DataNode | DataNode |
JournalNode | JournalNode | JournalNode |
NodeManager | NodeManager | NodeManager |
ZooKeeper | ZooKeeper | ZooKeeper |
hbase | hbase | hbase |
【pc1】
hostname hodoop01
【pc2】
hostname hodoop02
【pc3】
hostname hodoop03
【pc1】
vi /etc/sysconfig/network
======================
NETWORKING=yes
HOSTNAME=hadoop01
【pc2】
vi /etc/sysconfig/network
======================
NETWORKING=yes
HOSTNAME=hadoop02
【pc3】
vi /etc/sysconfig/network
======================
NETWORKING=yes
HOSTNAME=hadoop03
配置主机映射【三台pc】
vi /etc/hosts
======================
192.168.91.101 hadoop01
192.168.91.102 hadoop02
192.168.91.103 hadoop03
【pc01】
vi /etc/sysconfig/network-scripts/ifcfg-eth0
============================
DEVICE=eth0
TYPE=Ethernet
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=static
IPADDR=192.168.91.101
NETMASK=255.255.255.0
GATEWAY=192.168.91.2
DNS1=8.8.8.8
【pc02】
vi /etc/sysconfig/network-scripts/ifcfg-eth0
============================
DEVICE=eth0
TYPE=Ethernet
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=static
IPADDR=192.168.91.102
NETMASK=255.255.255.0
GATEWAY=192.168.91.2
DNS1=8.8.8.8
【pc03】
vi /etc/sysconfig/network-scripts/ifcfg-eth0
============================
DEVICE=eth0
TYPE=Ethernet
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=static
IPADDR=192.168.91.103
NETMASK=255.255.255.0
GATEWAY=192.168.91.2
DNS1=8.8.8.8
service iptables stop
chkconfig iptables off
yum -y install openssh.x86_64
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
ssh-copy-id hadoop01
ssh-copy-id hadoop02
ssh-copy-id hadoop03
yum -y install ntp.x86_64
vi /etc/ntp.conf
================================================
# Hosts on local network are less restricted.
#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
restrict 192.168.91.0 mask 255.255.255.0 nomodify notrap ##添加网段
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool. ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server 127.127.1.0##先禁用4个自带的服务,添加server 127.127.1.0
service ntpd start
yum -y install ntpdate.x96_64
【pc02】
ntpdate hadoop01
【pc03】
ntpdate hadoop01
tar -zxvf /data/jdk...gz -C /usr/local
vi /etc/profile
==================
export JAVA_NOME=/opt/app/jdk1.8.0_181
export PATH=$PATH:$JAVA_HOME/bin:
java -version
===================
java version "1.8.0_181"
Java(TM) SE Runtime Environment (build 1.8.0_181-b13)
Java HotSpot(TM) 64-Bit Server VM (build 25.181-b13, mixed mode)
tar -zxvf zookeeper-3.4.10.tar.gz -C /opt/app/
cp zoo_sample.cfg zoo.cfg
vi zoo.cfg
=====================
dataDir=/opt/modules/zookeeper-3.4.5/zkData
server.1=hadoop01:2888:3888
server.2=hadoop02:2888:3888
server.3=hadoop03:2888:3888
mkdir zkData
在zkData目录下创建myid文件,编辑myid文件,内容就是此台server的zk的id号
【pc01】
vi zkData/myid
==============
1
【pc02】
vi zkData/myid
==============
2
【pc03】
vi zkData/myid
==============
3
bin/zkServer.sh start ##启动服务
bin/zkServer.sh status ##查看状态
tar -zxvf hadoop-2.7.2.tar.gz -C /opt/app/
vi etc/hadoop/hadoop-env.sh
=================================
export JAVA_HOME=/opt/app/jdk1.8.0_181
vi etc/hadoop/mapred-env.sh
=================================
export JAVA_HOME=/opt/app/jdk1.8.0_181
vi etc/hadoop/yarn-env.sh
=================================
export JAVA_HOME=/opt/app/jdk1.8.0_181
vi etc/hadoop/hdfs-site.xml
======================
fs.defaultFS
hdfs://ns1
ha.zookeeper.quorum
hadoop01:2181,hadoop02:2181,hadoop03:2181
hadoop.tmp.dir
/opt/app/hadoop-2.7.2/data/tmp
vi etc/hadoop/hdfs-site.xml
======================
dfs.nameservices
ns1
dfs.ha.namenodes.ns1
nn1,nn2
dfs.namenode.rpc-address.ns1.nn1
hadoop01:8020
dfs.namenode.http-address.ns1.nn1
hadoop01:50070
dfs.namenode.rpc-address.ns1.nn2
hadoop02:8020
dfs.namenode.http-address.ns1.nn2
hadoop02:50070
dfs.namenode.shared.edits.dir
qjournal://hadoop01:8485;hadoop02:8485;hadoop03:8485/ns1
dfs.journalnode.edits.dir
/opt/app/hadoop-2.7.2/journal
dfs.ha.fencing.methods
sshfence
shell(/bin/true)
dfs.ha.fencing.ssh.private-key-files
/home/hadoop/.ssh/id_rsa
dfs.ha.fencing.ssh.connect-timeout
30000
dfs.ha.automatic-failover.enabled
true
dfs.client.failover.proxy.provider.ns1
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
dfs.permissions.enabled
false
vi etc/hadoop/mapred-site.xm
======================
mapreduce.framework.name
yarn
mapreduce.jobhistory.webapp.address
hadoop01:19888
mapreduce.job.ubertask.enable
true
vi etc/hadoop/yarn-site.xml
======================
yarn.resourcemanager.ha.enabled
true
yarn.resourcemanager.cluster-id
rmcluster
yarn.resourcemanager.ha.rm-ids
rm1,rm2
yarn.resourcemanager.hostname.rm1
hadoop02
yarn.resourcemanager.hostname.rm2
hadoop03
yarn.resourcemanager.zk-address
hadoop01:2181,hadoop02:2181,hadoop03:2181
yarn.resourcemanager.recovery.enabled
true
yarn.resourcemanager.store.class
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.log-aggregation-enable
true
yarn.nodemanager.log.retain-seconds
10800
vi etc/hadoop/slaves
======================
hadoop01
hadoop02
hadoop03
bin/zkServer.sh start
sbin/hadoop-daemon.sh start journalnode
bin/hdfs namenode -format ##格式化
sbin/hadoop-daemon.sh start namenode ##启动namenode
bin/hdfs namenode -bootstrapStandby ##同步数据
sbin/hadoop-daemon.sh start namenode ##启动namenode
bin/zkServer.sh start ##启动zk
bin/hdfs zkfc -formatZK
sbin/start-dfs.sh ##启动hdfs
bin/hdfs haadmin -getServiceState nn1 #查看nn1状态
bin/hdfs haadmin -getServiceState nn2 #查看nn2状态
sbin/start-yarn.sh
sbin/yarn-daemon.sh start resourcemanager
bin/yarn rmadmin -getServiceState rm1 ##查看rm1的状态
bin/yarn rmadmin -getServiceState rm2 ##查看rm2的状态
sudo tar -zvxf hbase-1.3.1-bin.tar.gz -C /opt/app/
sudo chown -R hadoop:hadoop hbase-1.3.1/
vi $HBASE/conf/hvase-env.sh
======================================
// 修改JAVA_HOME
export JAVA_HOME=/opt/app/jdk1.8.0_181
// 关闭自带的zookeeper, 不需要HBASE自带的zookeeper
export HBASE_MANAGES_ZK=false
vi hbase-site.xml
// 配置HBASE临时目录
hbase.tmp.dir
/opt/app/hbase-1.3.1/hbase
// 配置hbase 临时目录保存位置
hbase.rootdir
hdfs://ns1/hbase
//设置hbase的存储模式为分布式存储
hbase.cluster.distributed
true
//设置hbase中zookeeper 的信息,地址为自己配置的主机名称
hbase.zookeeper.quorum
hadoop01,hadoop02,hadoop03
cp core-site.xml /opt/app/hbase-1.3.1/conf/
cp hdfs-site.xml /opt/app/hbase-1.3.1/conf/
该文件类似于 salves 文件,描述的是 regionserver的节点信息. 在配置过程中存在几个regionserver 就写几个主机名称
hadoop01
hadoop02
hadoop03
由于hbase要操作hdfs,所以hbase需要hadoop的jar包支持。所以在hbase中配置环境中hadoop的jar包
vi /etc/profile
--------------------------------
export HBASE_HOME=/opt/app/hbase
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:`$HBASE_HOME/bin/hbase mapredcp`
-------------------------------
source /etc/profile
bin/hbase-daemon.sh start master
bin/hbase-daemon.sh start regionserver