原文链接: Zookeeper(二、分布式集群安装与命令行操作).
安装Hadoop集群
链接: Hadoop(二、安装Hadoop-3.2.1集群).
在hadoop100、hadoop101和hadoop102三个节点上部署Zookeeper。
使用的是Zookeeper-3.6.1版本
链接:https://pan.baidu.com/s/1PKpjntrFyT-yiF20EdlL_Q
提取码:c0ol
# 使用rz命令上传压缩包
[root@hadoop100 ~]# cd /opt/tar.gz/
[root@hadoop100 tar.gz]# rz
[root@hadoop100 tar.gz]# ls
apache-zookeeper-3.6.1-bin.tar.gz hadoop-3.2.1.tar.gz jdk-8u251-linux-x64.tar.gz
[root@hadoop100 tar.gz]# tar -zxvf apache-zookeeper-3.6.1-bin.tar.gz -C /opt/software/
[root@hadoop100 tar.gz]# cd /opt/software/
[root@hadoop100 software]# ls
apache-zookeeper-3.6.1-bin hadoop-3.2.1 jdk1.8.0_251
[root@hadoop100 software]# xsync apache-zookeeper-3.6.1-bin/
[root@hadoop100 software]# ssh hadoop101
Last login: Fri Jun 19 00:29:21 2020 from hadoop100
[root@hadoop101 ~]# cd /opt/software/
[root@hadoop101 software]# ls
apache-zookeeper-3.6.1-bin hadoop-3.2.1 jdk1.8.0_251
[root@hadoop101 software]# ssh hadoop102
Last login: Fri Jun 19 00:29:30 2020 from hadoop101
[root@hadoop102 ~]# cd /opt/software/
[root@hadoop102 software]# ls
apache-zookeeper-3.6.1-bin hadoop-3.2.1 jdk1.8.0_251
[root@hadoop102 software]#
[root@hadoop102 software]# ssh hadoop100
[root@hadoop100 ~]# cd /opt/software/apache-zookeeper-3.6.1-bin/
[root@hadoop100 apache-zookeeper-3.6.1-bin]# mkdir zkData
[root@hadoop100 apache-zookeeper-3.6.1-bin]# ls
bin conf docs lib LICENSE.txt NOTICE.txt README.md README_packaging.md zkData
[root@hadoop100 apache-zookeeper-3.6.1-bin]# cd zkData/
[root@hadoop100 zkData]# touch myid
[root@hadoop100 zkData]# ls
myid
[root@hadoop100 zkData]# cd ..
[root@hadoop100 apache-zookeeper-3.6.1-bin]# xsync zkData/ # 分发文件到hadoop101,hadoop102
[root@hadoop100 apache-zookeeper-3.6.1-bin]# cd zkData/
[root@hadoop100 zkData]# vi myid
100
:wq
[root@hadoop100 zkData]# ssh hadoop101
Last login: Tue Jun 23 23:15:35 2020 from hadoop100
[root@hadoop101 ~]# cd /opt/software/apache-zookeeper-3.6.1-bin/zkData/
[root@hadoop101 zkData]# vi myid
101
:wq
[root@hadoop101 zkData]# ssh hadoop102
Last login: Tue Jun 23 23:15:55 2020 from hadoop101
[root@hadoop102 ~]# cd /opt/software/apache-zookeeper-3.6.1-bin/zkData/
[root@hadoop102 zkData]# vi myid
102
:wq
Zookeeper中的配置文件zoo.cfg中参数含义解读如下:
[root@hadoop100 ~]# cd /opt/software/apache-zookeeper-3.6.1-bin/conf/
[root@hadoop100 conf]# ls
configuration.xsl log4j.properties zoo_sample.cfg
[root@hadoop100 conf]# mv zoo_sample.cfg zoo.cfg
[root@hadoop100 conf]# ls
configuration.xsl log4j.properties zoo.cfg
vi zoo.cfg
[root@hadoop100 conf]# xsync zoo.cfg
[root@hadoop100 apache-zookeeper-3.6.1-bin]# bin/zkServer.sh start
[root@hadoop101 apache-zookeeper-3.6.1-bin]# bin/zkServer.sh start
[root@hadoop102 apache-zookeeper-3.6.1-bin]# bin/zkServer.sh start
[root@hadoop100 bin]# ./zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/software/apache-zookeeper-3.6.1-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: follower
[root@hadoop101 bin]# ./zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/software/apache-zookeeper-3.6.1-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: leader
[root@hadoop102 apache-zookeeper-3.6.1-bin]# bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/software/apache-zookeeper-3.6.1-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: follower
看到当前hadoop100,hadoop102是follower;hadoop101是leader说明Zookeeper选举机制正常运行
有关Zookeeper的基础和原理可以看我上一篇文章:
链接:Zookeeper(一、大数据之ZooKeeper基础与原理).
[root@hadoop100 apache-zookeeper-3.6.1-bin]# bin/zkCli.sh
zk: localhost:2181(CONNECTED) 2] ls /
[zookeeper]
[zk: localhost:2181(CONNECTED) 4] ls -s /
[zookeeper]
cZxid = 0x0
ctime = Thu Jan 01 08:00:00 CST 1970
mZxid = 0x0
mtime = Thu Jan 01 08:00:00 CST 1970
pZxid = 0x0
cversion = -1
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 1
[zk: localhost:2181(CONNECTED) 1] create /sanguo "jinlian"
Created /sanguo
[zk: localhost:2181(CONNECTED) 2] create /sanguo/shuguo "liubei"
Created /sanguo/shuguo
[zk: localhost:2181(CONNECTED) 3] ls /
[sanguo, zookeeper]
[zk: localhost:2181(CONNECTED) 4] ls /sanguo
[shuguo]
[zk: localhost:2181(CONNECTED) 5] get /sanguo
jinlian
[zk: localhost:2181(CONNECTED) 6] get /sanguo/shuguo
liubei
[zk: localhost:2181(CONNECTED) 7] get -s /sanguo
jinlian
cZxid = 0x100000004
ctime = Wed Jun 24 00:04:51 CST 2020
mZxid = 0x100000004
mtime = Wed Jun 24 00:04:51 CST 2020
pZxid = 0x100000005
cversion = 1
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 7
numChildren = 1
[zk: localhost:2181(CONNECTED) 8] get -s /sanguo/shuguo
liubei
cZxid = 0x100000005
ctime = Wed Jun 24 00:05:10 CST 2020
mZxid = 0x100000005
mtime = Wed Jun 24 00:05:10 CST 2020
pZxid = 0x100000005
cversion = 0
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 6
numChildren = 0
[zk: localhost:2181(CONNECTED) 9] create -e /sanguo/wuguo
Created /shuguo
[zk: localhost:2181(CONNECTED) 10] ls /sanguo
[shuguo, wuguo]
[zk: localhost:2181(CONNECTED) 10] quit
[root@hadoop100 apache-zookeeper-3.6.1-bin]# bin/zkCli.sh
[zk: localhost:2181(CONNECTED) 1] ls /sanguo
[shuguo]
发现客户端退出之后,刚刚创建的节点 /shuguo 就被删除了
A. 先创建一个普通的根节点/sanguo/weiguo
[zk: localhost:2181(CONNECTED) 0] create /sanguo/weiguo
Created /sanguo/weiguo
[zk: localhost:2181(CONNECTED) 1] ls /sanguo
[shuguo, weiguo]
B. 创建带序号的节点
[zk: localhost:2181(CONNECTED) 2] create -s /sanguo/weiguo "caocao"
Created /sanguo/weiguo0000000003
[zk: localhost:2181(CONNECTED) 3] create -s /sanguo/weiguo "caocao"
Created /sanguo/weiguo0000000004
[zk: localhost:2181(CONNECTED) 4] create -s /sanguo/weiguo "caocao"
Created /sanguo/weiguo0000000005
如果原来没有序号节点,序号从0开始依次递增。如果原节点下已有2个节点,则再排序时从2开始,以此类推。
[zk: localhost:2181(CONNECTED) 8] set /sanguo/shuguo "diaocan"
[zk: localhost:2181(CONNECTED) 9] get -s /sanguo/shuguo
diaocan
cZxid = 0x100000005
ctime = Wed Jun 24 00:05:10 CST 2020
mZxid = 0x100000011
mtime = Wed Jun 24 00:22:34 CST 2020
pZxid = 0x100000005
cversion = 0
dataVersion = 1
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 7
numChildren = 0
[zk: localhost:2181(CONNECTED) 10] set /sanguo/shuguo "lvbu"
[zk: localhost:2181(CONNECTED) 11] get -s /sanguo/shuguo
lvbu
cZxid = 0x100000005
ctime = Wed Jun 24 00:05:10 CST 2020
mZxid = 0x100000012
mtime = Wed Jun 24 00:23:26 CST 2020
pZxid = 0x100000005
cversion = 0
dataVersion = 2
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 4
numChildren = 0
在 hadoop102 上注册监听 /sanguo 节点数据变化
[zk: localhost:2181(CONNECTED) 3] get -s -w /sanguo
jinlian
cZxid = 0x100000004
ctime = Wed Jun 24 00:04:51 CST 2020
mZxid = 0x100000004
mtime = Wed Jun 24 00:04:51 CST 2020
pZxid = 0x10000000f
cversion = 7
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 7
numChildren = 5
发现在这时候的值还是刚开始创建的值**“jinlian”**
在hadoop100上修改**/sanguo**节点的数据
[zk: localhost:2181(CONNECTED) 13] set /sanguo "guanyu"
回到hadoop102上发现提示有数据,但是此时为一次性监听,之后再次修改不会提示。(之后在Zookeeper工程中可以根据逻辑进行永久监听)!
[zk: localhost:2181(CONNECTED) 18] ls /sanguo
[shuguo, weiguo, weiguo0000000003, weiguo0000000004, weiguo0000000005]
[zk: localhost:2181(CONNECTED) 19] delete /sanguo/weiguo0000000003
[zk: localhost:2181(CONNECTED) 20] ls /sanguo
[shuguo, weiguo, weiguo0000000004, weiguo0000000005]
[zk: localhost:2181(CONNECTED) 21] deleteall /sanguo
[zk: localhost:2181(CONNECTED) 22] ls /
[zookeeper]
[zk: localhost:2181(CONNECTED) 25] stat /zookeeper
cZxid = 0x0
ctime = Thu Jan 01 08:00:00 CST 1970
mZxid = 0x0
mtime = Thu Jan 01 08:00:00 CST 1970
pZxid = 0x0
cversion = -2
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 2
参考资料:
链接:尚硅谷Zookeeper教程(zookeeper框架精讲).