软件名称 | 软件包名 |
---|---|
Zookeeper | zookeeper-3.4.8.tar.gz |
hive | apache-hive-2.0.0-src.tar.gz |
jdk | jdk-8u65-linux-x64.tar.gz |
hadoop | hadoop-2.7.1.tar.gz |
mysql-connector | mysql-connector-java-5.1.26-bin.jar |
CentOS | CentOS 7.4(mini 英文版本) |
# master
hostname master && bash
echo master > /etc/hostname
vi /etc/hostname
# slave1
hostname slave1 && bash
echo slave1 > /etc/hostname
vi /etc/hostname
# slave2
hostname slave2 && bash
echo slave2 > /etc/hostname
vi /etc/hostname
# 查看ip
ip a
#master slave1 slave2
vi /etc/hosts
# 例如,ip不代表你本机ip,记得更换
# 三台都需要配置hosts文件
#请根据本机ip : ip a
192.168.10.3 master
192.168.10.4 slave1
192.168.10.5 slave2
在/etc/sysconfig/ntpd文件添加
YS_HWLOCK=yes
在执行命令
systemctl start ntpd
#查看是否同步时间成功
date
#master slave1 slave2 一起执行,三台机子执行间隔越小越好
date -s "20220517 11:31:01"
#master slalve1 slave2
systemctl stop firewalld.service
systemctl disable firewalld.service
tar -zxvf /opt/software/jdk-8u65-linux-x64.tar.gz -C /usr/local/src
mv /usr/local/src/jdk1.8.0_65 /usr/local/src/java
chown -R root:root /usr/local/src/java
vi /etc/profile
添加如下内容并截图
export JAVA_HOME=/usr/local/src/java
export PATH=$PATH:$JAVA_HOME/bin
保存退出
source /etc/profile
update-alternatives --install /usr/bin/java java /usr/local/src/java/bin/java 200
update-alternatives --set java /usr/local/src/java/bin/java
在指定目录下安装ssh服务,查看ssh进程并截图(安装包统一在“/h3cu/”)
rpm -ivh /h3cu/*.rpm
用下面的命令查看ssh服务
rpm -qa | grep ssh
出现以下结果表示安装成功:
[root@master ~]# rpm -qa | grep ssh
openssh-7.4p1-22.el7_9.x86_64
openssh-clients-7.4p1-22.el7_9.x86_64
openssh-server-7.4p1-22.el7_9.x86_64
libssh2-1.8.0-4.el7.x86_64
下面四条命令需要在三台机器上都要执行,其中ssh-keygen -t rsa 是第一个执行的
[root@master .ssh]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:M0qlJuG3H2LvgI4Av1IEitpWKo5I6VGm/mi+Yy97EU8 root@master
The key's randomart image is:
+---[RSA 2048]----+
| |
|. |
|o. . . |
|o .=.E o |
|oo=o* = S |
|+*=. B o o |
|O+o o * . |
|+X.= . = . |
|+*@o. .+ |
+----[SHA256]-----+
[root@master ~]$ ssh-copy-id master
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@master's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'master'"
and check to make sure that only the key(s) you wanted were added.
[root@master ~]$ ssh-copy-id slave1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'slave1 (192.168.28.112)' can't be established.
ECDSA key fingerprint is SHA256:P0A5pFfF46dyYzSPdTMrfrOxUrLXPK7zbjO0AJggJww.
ECDSA key fingerprint is MD5:e5:fc:9b:bd:90:b6:52:09:0b:91:a8:05:94:b8:6e:c7.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@slave1's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'slave1'"
and check to make sure that only the key(s) you wanted were added.
[root@master ~]$ ssh-copy-id slave2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'slave2 (192.168.28.113)' can't be established.
ECDSA key fingerprint is SHA256:P0A5pFfF46dyYzSPdTMrfrOxUrLXPK7zbjO0AJggJww.
ECDSA key fingerprint is MD5:e5:fc:9b:bd:90:b6:52:09:0b:91:a8:05:94:b8:6e:c7.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@slave2's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'slave2'"
and check to make sure that only the key(s) you wanted were added.
截取主节点登录其中一 个从节点的结果
[root@master ~]# ssh slave1
Last login: Sun May 15 11:10:28 2022 from 192.168.10.1
[root@slave1 ~]#
#master
scp -r /usr/local/src/java slave1:/usr/local/src
scp -r /usr/local/src/java slave2:/usr/local/src
scp -r /etc/profile slave1:/etc/
scp -r /etc/profile slave2:/etc/
在slave1机器执行如下代码
source /etc/profile
update-alternatives --install /usr/bin/java java /usr/local/src/java/bin/java 200
update-alternatives --set java /usr/local/src/java/bin/java
在slave2机器执行如下代码
source /etc/profile
update-alternatives --install /usr/bin/java java /usr/local/src/java/bin/java 200
update-alternatives --set java /usr/local/src/java/bin/java
tar -zxvf /opt/software/hadoop-2.7.1.tar.gz -C /usr/local/src/
cd /usr/local/src/
mv hadoop-2.7.1/ hadoop
chown -R root:root hadoop
vi /etc/profile
#添加如下内容
export HADOOP_HOME=/usr/local/src/hadoop
export HADOOP_PREFIX=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_OPTS="-Djava.library.path=$HADOOP_INSTALL/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
#:wq! 保存退出
source /etc/profile
#master
scp -r /etc/profile slave1:/etc/
scp -r /etc/profile slave2:/etc/
#在slave1机器执行如下代码
source /etc/profile
#在slave2机器执行如下代码
source /etc/profile
#配置文件/1+X/ha配置
将给定的core-site.xml,mapred-site.xml,hdfs-site.xml,yarn-site.xml文件复制到/usr/local/src/hadoop/etc/hadoop目录下
vi /usr/local/src/hadoop/etc/hadoop/core-site.xml
fs.defaultFS
hdfs://mycluster
hadoop.tmp.dir
file:/usr/local/src/hadoop/tmp
ha.zookeeper.quorum
master:2181,slave1:2181,slave2:2181
ha.zookeeper.session-timeout.ms
30000
ms
fs.trash.interval
1440
hadoop.proxyuser.root.hosts
*
hadoop.proxyuser.root.groups
*
mv /usr/local/src/hadoop/etc/hadoop/mapred-site.xml.template /usr/local/src/hadoop/etc/hadoop/mapred-site.xml
vi /usr/local/src/hadoop/etc/hadoop/mapred-site.xml
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
master:10020
mapreduce.jobhistory.webapp.address
master:19888
vi /usr/local/src/hadoop/etc/hadoop/yarn-site.xml
yarn.resourcemanager.ha.enabled
true
yarn.resourcemanager.cluster-id
yrc
yarn.resourcemanager.ha.rm-ids
rm1,rm2
yarn.resourcemanager.hostname.rm1
master
yarn.resourcemanager.hostname.rm2
slave1
yarn.resourcemanager.zk-address
master:2181,slave1:2181,slave2:2181
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.log-aggregation-enable
true
yarn.log-aggregation.retain-seconds
86400
yarn.resourcemanager.recovery.enabled
true
yarn.resourcemanager.store.class
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
vi /usr/local/src/hadoop/etc/hadoop/hdfs-site.xml
dfs.qjournal.start-segment.timeout.ms
60000
dfs.nameservices
mycluster
dfs.ha.namenodes.mycluster
master,slave1
dfs.namenode.rpc-address.mycluster.master
master:8020
dfs.namenode.rpc-address.mycluster.slave1
slave1:8020
dfs.namenode.http-address.mycluster.master
master:50070
dfs.namenode.http-address.mycluster.slave1
slave1:50070
dfs.namenode.shared.edits.dir
qjournal://master:8485;slave1:8485;slave2:8485/mycluster
dfs.client.failover.proxy.provider.mycluster
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
dfs.ha.fencing.methods
sshfence
shell(/bin/true)
dfs.permissions.enabled
false
dfs.support.append
true
dfs.ha.fencing.ssh.private-key-files
/root/.ssh/id_rsa
dfs.replication
2
dfs.namenode.name.dir
/usr/local/src/hadoop/tmp/hdfs/nn
dfs.datanode.data.dir
/usr/local/src/hadoop/tmp/hdfs/dn
dfs.journalnode.edits.dir
/usr/local/src/hadoop/tmp/hdfs/jn
dfs.ha.automatic-failover.enabled
true
dfs.webhdfs.enabled
true
dfs.ha.fencing.ssh.connect-timeout
30000
ha.failover-controller.cli-check.rpc-timeout.ms
60000
vi /usr/local/src/hadoop/etc/hadoop/slaves
对此文件添加如下内容
master
slave1
slave2
vi /usr/local/src/hadoop/etc/hadoop/hadoop-env.sh
对此文件添加如下内容
export JAVA_HOME=/usr/local/src/java
mkdir -p /usr/local/src/hadoop/tmp/hdfs/{nn,dn,jn}
mkdir -p /usr/local/src/hadoop/tmp/logs
#master
scp -r /usr/local/src/hadoop slave1:/usr/local/src/
#移动到slave1机器
scp -r /usr/local/src/hadoop slave2:/usr/local/src/
#移动到slave2机器
tar -zxvf 文件 -C 指定位置
tar -zxvf /opt/software/zookeeper-3.4.8.tar.gz -C /usr/local/src/
cd /usr/local/src/
mv zookeeper-3.4.8/ zookeeper
chown -R root:root zookeeper
vi /etc/profile
export ZOOKEEPER_HOME=/usr/local/src/zookeeper
export PATH=$PATH:$ZOOKEEPER_HOME/bin
source /etc/profile
scp -r /etc/profile root@slave1:/etc/
scp -r /etc/profile root@slave2:/etc/
#在slave1机器执行如下代码
source /etc/profile
#在slave2机器执行如下代码
source /etc/profile
首先进入zookeeper的配置文件目录,复制一份zookeeper的配置文件并修改
cd /usr/local/src/zookeeper/conf/
cp zoo_sample.cfg zoo.cfg
vi zoo.cfg
在zoo.cfg中做如下修改
#修改
dataDir=/usr/local/src/zookeeper/data
#增加
dataLogDir=/usr/local/src/zookeeper/logs
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
在zookeeper目录下创建data,在data目录下创建myid文件
cd /usr/local/src/zookeeper
mkdir {logs,data}
cd data/
echo 1 > myid
vi myid
1
在myid文件里只写一个1
scp -r /usr/local/src/zookeeper/ slave1:/usr/local/src/
scp -r /usr/local/src/zookeeper/ slave2:/usr/local/src/
#在slave1机器执行如下代码
cd /usr/local/src/zookeeper/data/
vi myid
2
在myid文件里只写一个2
#在slave2机器执行如下代码
cd /usr/local/src/zookeeper/data/
vi myid
3
在myid文件里只写一个3
分别在三台机器执行如下命令,来启动zookeeper
/usr/local/src/zookeeper/bin/zkServer.sh start
如果出现以下权限错误
JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/…/conf/zoo.cfg
Starting zookeeper … /usr/local/src/zookeeper/bin/zkServer.sh:行109: ./zookeeper.out: 权限不够
STARTED
则进去zookeeper的bin目录下,用下面的命令启动
./zkServer.sh start
修改 namenode、datanode、journalnode 等存放数据的公共目录为 /usr/local/hadoop/tmp;
# 根据题目自由修改value
dfs.namenode.name.dir
/usr/local/src/hadoop/tmp/hdfs/nn
dfs.datanode.data.dir
/usr/local/src/hadoop/tmp/hdfs/dn
dfs.journalnode.edits.dir
/usr/local/src/hadoop/tmp/hdfs/jn
初始化元数据前保证三台都处于hadoop用户下,并保证zookeeper服务正常启动
执行如下命令
hadoop-daemons.sh start journalnode
执行后会出现如下效果以及增加一个JournalNode进程
[root@master hadoop]$ hadoop-daemons.sh start journalnode
WARNING: Use of this script to start HDFS daemons is deprecated.
WARNING: Attempting to execute replacement “hdfs --workers --daemon start” instead.
[root@master hadoop]$ jps
4944 Jps
3273 QuorumPeerMain
4895 JournalNode
执行如下命令
hdfs namenode -format
部分效果如下所示
2022-05-13 22:17:03,436 INFO namenode.FSImage: Allocated new BlockPoolId: BP-683251328-192.168.28.111-1652451423436
2022-05-13 22:17:03,447 INFO common.Storage: Storage directory /usr/local/src/hadoop/tmp/hdfs/nn has been successfully formatted.
2022-05-13 22:17:03,541 INFO namenode.FSImageFormatProtobuf: Saving image file /usr/local/src/hadoop/tmp/hdfs/nn/current/fsimage.ckpt_0000000000000000000 using no compression
2022-05-13 22:17:03,788 INFO namenode.FSImageFormatProtobuf: Image file /usr/local/src/hadoop/tmp/hdfs/nn/current/fsimage.ckpt_0000000000000000000 of size 390 bytes saved in 0 seconds .
2022-05-13 22:17:03,791 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
2022-05-13 22:17:03,819 INFO namenode.FSImage: FSImageSaver clean checkpoint: txid = 0 when meet shutdown.
2022-05-13 22:17:03,819 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.28.111
************************************************************/
执行如下命令
hdfs zkfc -formatZK
部分效果如下所示
2022-05-13 22:19:22,621 INFO zookeeper.ZooKeeper: Client environment:java.library.path=/usr/local/src/hadoop/lib/native
2022-05-13 22:19:22,621 INFO zookeeper.ZooKeeper: Client environment:java.io.tmpdir=/tmp
2022-05-13 22:19:22,621 INFO zookeeper.ZooKeeper: Client environment:java.compiler=
2022-05-13 22:19:22,621 INFO zookeeper.ZooKeeper: Client environment:os.name=Linux
2022-05-13 22:19:22,621 INFO zookeeper.ZooKeeper: Client environment:os.arch=amd64
2022-05-13 22:19:22,621 INFO zookeeper.ZooKeeper: Client environment:os.version=3.10.0-862.el7.x86_64
2022-05-13 22:19:22,621 INFO zookeeper.ZooKeeper: Client environment:user.name=hadoop
2022-05-13 22:19:22,621 INFO zookeeper.ZooKeeper: Client environment:user.home=/root
2022-05-13 22:19:22,621 INFO zookeeper.ZooKeeper: Client environment:user.dir=/usr/local/src/hadoop/etc/hadoop
2022-05-13 22:19:22,621 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=master:2181,slave1:2181,slave2:2181 sessionTimeout=30000 watcher=org.apache.hadoop.ha.ActiveStandbyElector W a t c h e r W i t h C l i e n t R e f @ 32 e f f 876 2022 − 05 − 1322 : 19 : 22 , 633 I N F O z o o k e e p e r . C l i e n t C n x n : O p e n i n g s o c k e t c o n n e c t i o n t o s e r v e r s l a v e 2 / 192.168.28.113 : 2181. W i l l n o t a t t e m p t t o a u t h e n t i c a t e u s i n g S A S L ( u n k n o w n e r r o r ) 2022 − 05 − 1322 : 19 : 22 , 636 I N F O z o o k e e p e r . C l i e n t C n x n : S o c k e t c o n n e c t i o n e s t a b l i s h e d t o s l a v e 2 / 192.168.28.113 : 2181 , i n i t i a t i n g s e s s i o n 2022 − 05 − 1322 : 19 : 22 , 659 I N F O z o o k e e p e r . C l i e n t C n x n : S e s s i o n e s t a b l i s h m e n t c o m p l e t e o n s e r v e r s l a v e 2 / 192.168.28.113 : 2181 , s e s s i o n i d = 0 x f f 80 b d 967 d 0 b 0000 , n e g o t i a t e d t i m e o u t = 30000 2022 − 05 − 1322 : 19 : 22 , 660 I N F O h a . A c t i v e S t a n d b y E l e c t o r : S e s s i o n c o n n e c t e d . 2022 − 05 − 1322 : 19 : 22 , 683 I N F O h a . A c t i v e S t a n d b y E l e c t o r : S u c c e s s f u l l y c r e a t e d / h a d o o p − h a / m y c l u s t e r i n Z K . 2022 − 05 − 1322 : 19 : 22 , 687 I N F O z o o k e e p e r . Z o o K e e p e r : S e s s i o n : 0 x f f 80 b d 967 d 0 b 0000 c l o s e d 2022 − 05 − 1322 : 19 : 22 , 689 I N F O z o o k e e p e r . C l i e n t C n x n : E v e n t T h r e a d s h u t d o w n f o r s e s s i o n : 0 x f f 80 b d 967 d 0 b 0000 2022 − 05 − 1322 : 19 : 22 , 690 I N F O t o o l s . D F S Z K F a i l o v e r C o n t r o l l e r : S H U T D O W N M S G : / ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ S H U T D O W N M S G : S h u t t i n g d o w n D F S Z K F a i l o v e r C o n t r o l l e r a t m a s t e r / 192.168.28.111 ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ ∗ / [ r o o t @ m a s t e r h a d o o p ] WatcherWithClientRef@32eff876 2022-05-13 22:19:22,633 INFO zookeeper.ClientCnxn: Opening socket connection to server slave2/192.168.28.113:2181. Will not attempt to authenticate using SASL (unknown error) 2022-05-13 22:19:22,636 INFO zookeeper.ClientCnxn: Socket connection established to slave2/192.168.28.113:2181, initiating session 2022-05-13 22:19:22,659 INFO zookeeper.ClientCnxn: Session establishment complete on server slave2/192.168.28.113:2181, sessionid = 0xff80bd967d0b0000, negotiated timeout = 30000 2022-05-13 22:19:22,660 INFO ha.ActiveStandbyElector: Session connected. 2022-05-13 22:19:22,683 INFO ha.ActiveStandbyElector: Successfully created /hadoop-ha/mycluster in ZK. 2022-05-13 22:19:22,687 INFO zookeeper.ZooKeeper: Session: 0xff80bd967d0b0000 closed 2022-05-13 22:19:22,689 INFO zookeeper.ClientCnxn: EventThread shut down for session: 0xff80bd967d0b0000 2022-05-13 22:19:22,690 INFO tools.DFSZKFailoverController: SHUTDOWN_MSG: /************************************************************ SHUTDOWN_MSG: Shutting down DFSZKFailoverController at master/192.168.28.111 ************************************************************/ [root@master hadoop] WatcherWithClientRef@32eff8762022−05−1322:19:22,633INFOzookeeper.ClientCnxn:Openingsocketconnectiontoserverslave2/192.168.28.113:2181.WillnotattempttoauthenticateusingSASL(unknownerror)2022−05−1322:19:22,636INFOzookeeper.ClientCnxn:Socketconnectionestablishedtoslave2/192.168.28.113:2181,initiatingsession2022−05−1322:19:22,659INFOzookeeper.ClientCnxn:Sessionestablishmentcompleteonserverslave2/192.168.28.113:2181,sessionid=0xff80bd967d0b0000,negotiatedtimeout=300002022−05−1322:19:22,660INFOha.ActiveStandbyElector:Sessionconnected.2022−05−1322:19:22,683INFOha.ActiveStandbyElector:Successfullycreated/hadoop−ha/myclusterinZK.2022−05−1322:19:22,687INFOzookeeper.ZooKeeper:Session:0xff80bd967d0b0000closed2022−05−1322:19:22,689INFOzookeeper.ClientCnxn:EventThreadshutdownforsession:0xff80bd967d0b00002022−05−1322:19:22,690INFOtools.DFSZKFailoverController:SHUTDOWNMSG:/∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗SHUTDOWNMSG:ShuttingdownDFSZKFailoverControlleratmaster/192.168.28.111∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗∗/[root@masterhadoop]
start-dfs.sh
start-yarn.sh
[root@master hadoop]$ jps
3273 QuorumPeerMain
7099 Jps
[root@master hadoop]$ start-dfs.sh
Starting namenodes on [master slave1]
Starting datanodes
Starting journal nodes [slave2 slave1 master]
Starting ZK Failover Controllers on NN hosts [master slave1]
[root@master hadoop]$ jps
7664 JournalNode
7285 NameNode
3273 QuorumPeerMain
7961 Jps
7419 DataNode
7886 DFSZKFailoverController
[root@slave1 hadoop]$ jps
5330 DataNode
2964 QuorumPeerMain
5556 DFSZKFailoverController
5604 Jps
5445 JournalNode
[root@slave2 hadoop]$ jps
5042 JournalNode
5106 Jps
3108 QuorumPeerMain
4926 DataNode
[root@master hadoop]$ start-yarn.sh
Starting resourcemanagers on [ master slave1]
Starting nodemanagers
[root@master hadoop]$ jps
9667 JournalNode
9284 NameNode
10292 ResourceManager
10774 Jps
10440 NodeManager
3273 QuorumPeerMain
9900 DFSZKFailoverController
9421 DataNode
[root@slave1 hadoop]$ jps
6560 ResourceManager
6209 DataNode
6657 NodeManager
6323 JournalNode
2964 QuorumPeerMain
6442 DFSZKFailoverController
6783 Jps
[root@slave2 hadoop]$ jps
3108 QuorumPeerMain
5556 JournalNode
5812 Jps
5705 NodeManager
5438 DataNode
yarn-daemon.sh start proxyserver
mr-jobhistory-daemon.sh start historyserver
效果如下
[root@master ~]$ yarn-daemon.sh start proxyserver
WARNING: Use of this script to start YARN daemons is deprecated.
WARNING: Attempting to execute replacement "yarn --daemon start" instead.
效果如下
[root@master ~]$ mr-jobhistory-daemon.sh start historyserver
WARNING: Use of this script to start the MR JobHistory daemon is deprecated.
WARNING: Attempting to execute replacement "mapred --daemon start" instead.
本题就是将master的namenode的元数据发送到slave1和slave2机器上
scp -r /usr/local/src/hadoop/tmp/* slave1:/usr/local/src/hadoop/tmp/
scp -r /usr/local/src/hadoop/tmp/* slave2:/usr/local/src/hadoop/tmp/
在三台机器jps查看的结果在只有master上有一个namenode,这就需要去slave1机器上单点去启动namenode
#slave1
hadoop-daemon.sh start namenode
yarn-daemon.sh start resourcemanager
检查下resourcemanager进程在master和slave1都存在,所以不需要单独启动,如有缺失,执行如下代码
# master slave1 slave2
jps
#访问两个namenode 和 resourcemanager web 界面
master:50070
master:8088
slave1:50070
slave1:8088
使用内置游览器访问master:50070和slave1:50070查看状态并截图
访问master:8088页面并在左侧选项栏中点击nodes后截图
在master机器执行如下代码终止active的namenode进程
hadoop-daemon.sh stop namenode
效果如下
[root@master hadoop]# hadoop-daemon.sh stop namenode
stopping namenode
# master slave1 slave2
jps
#访问两个namenode 和 resourcemanager web 界面
master:50070
master:8088
slave1:50070
slave1:8088
#master
hadoop-daemon.sh start namenode
hdfs haadmin -getServiceState master
hdfs haadmin -getServiceState slave
#测试
hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-3.3.1.jar pi 10 10
hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-3.3.1.jar wordcount /wordcount/input/1.txt /wordcount/output/1