介质路径:https://zookeeper.apache.org/releases.html
部署文档:https://zookeeper.apache.org/doc/r3.1.2/zookeeperStarted.html
# 按需求更改主机名
hostname hadoop1
# 关闭 SELinux
# 将 SELINUX 值更改为 disabled
vi /etc/selinux/config
SELINUX=disabled
# 需要重启后才可生效
# 查看 SELinux 状态
getenforce
# 关闭防火墙
systemctl stop firewalld && systemctl disable firewalld && systemctl status firewalld
# 安装 Chrony 服务
yum install chrony -y
# 配置 Chrony 服务
# 注释默认的 NTP 服务地址
# 配置所需的 NTP 服务地址
vi /etc/chonry.conf
server hadoop1 iburst
# 重启 Chrony 服务并配置开机自启
systemctl enable chronyd --now
# 查看 Chrony 服务状态
chronyc sources -v
210 Number of sources = 1
.-- Source mode '^' = server, '=' = peer, '#' = local clock.
/ .- Source state '*' = current synced, '+' = combined , '-' = not combined,
| / '?' = unreachable, 'x' = time may be in error, '~' = time too variable.
|| .- xxxx [ yyyy ] +/- zzzz
|| Reachability register (octal) -. | xxxx = adjusted offset,
|| Log2(Polling interval) --. | | yyyy = measured offset,
|| \ | | zzzz = estimated error.
|| | |
MS Name/IP address Stratum Poll Reach LastRx Last sample
====================================================================================================================
^* hadoop1 4 6 377 12 -28us[ -45us] +/- 75ms
# 配置免密登录
# 所有节点生成 id_rsa.pub
ssh-keygen -t rsa
# 将每个节点的 id_rsa.pub 信息,分别放入所有节点的 authorized_keys 文件内
cat id_rsa.pub >> hadoop1:/root/.ssh/authorized_keys
cat id_rsa.pub >> hadoop2:/root/.ssh/authorized_keys
cat id_rsa.pub >> hadoop3:/root/.ssh/authorized_keys
# 最终效果
cat /root/.ssh/authorized_keys
# redis-nodes
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDwuKw9LdfDO3Ln+ViNtQEqZtH/RvoFymKkexBXRUK/2XcczKHPv967KHH71L/5vPOQPUXZLZg3TPERlRTIW9MvCh0LmceGAiQHrxczx56RnYh8nESknd2jbHBToGwqgoB8xsB2IQuhze0CqvRs7A0nrbyBvnUpg/DvePTOSSgii4z9kishBCbrCPamQm20drXVDK3gQ9Q+/YJLKa3+mxzI67xfk/jby0A0DD9XKL7fflRgMK0GXEtYsJ04tKc5Bo+w6Zc8gHyryFrKD4wpeoPakqmrdzaTVYI1x5WvrAPrQplxAP8iNfBqRJSHvlDBXVeXgSxz2I4HBshsStkKp root@redis1
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkspWeTwWoWyr6biMnefOYT4kh+7gPAboHAWe7p67IR9pfu+Rkk/vxLFDbi7X6Td9AhIXEZH6fY5BhihBzhRO/VtjE24QqnXdOLDHV1i0rSEYh6GOAbnVl/93lKidQF/2wvnQET31m1iwls3ul6aWw8/pOcxWy6kB+6MRiOExhu+0erE3jBFLcl+e0IJLKp/nLjCof/qWh3hLGVyhgMn/WmGhf7OyUbedXFqAwwS83/M60jSL1nB1lnIOoHrNSdnrN/GJVXmmwJjJAG4g4hbAg2zNind2rz6p4mq5k7iBbDUFghFwKKYsGeV0Onm7SKErFlHCJNFSOgfVNpaUYJ root@redis2
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+DGKAYw3tbdmv2GDsz3HEOdoKk8JVCEvDFczap2g3DoaqwEBkRag2l9IQ3RZL/WtpKe0f2vZzcm5t3d7e6YhyfEXRn1fjOmynTcykB13xAVrlRfJ6Sayur0OiPzWBktpNj8qaTKjwH+lyHGBwa5duqKiVEglEH2mX5grcOa/mH2Mo+IWsCYeCldKjfdBy2drlAim1fYvJwvtg0uDe8sfDUdDonG4phNOVaWB2u79SxKlGnGewGNuOrifIzkbc0mH9kNgrlw/xdSIqaFA738Yn/4n/kSe3BgceJ0wBowLzorgW2ogyGOdQp6MzBRlg/hxn4EDLJisrC9mSCMOOl root@redis3
# hadoop-nodes
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvWawSJqu4/Adnu6TjvV8rVDAqTU2CGNaSBOTDjcytHHOaY8UiwUMKvXUJugBmRkyhtWhQPHrVSmOH6+qMnHk5XQcWBmce8qCQqDoz49WwyZH95ciY/ynKR9dzAJwXN5fvJEoKxBhSJLk27SDsgRUX05IAjTN5Wx05GCNC36CRGHr6bwsC5iK+nv1ZllkRPyqoICJcvVVoJFDe+svNwLJS8bEpTUS/3C6w1RdfEgGVK0/NLnmANz6VIu5LAZqOpwFcB8Zed3wgnoHUfDCSXLEUQbcgRxDvba7lcvOqbiNh4Tr6WctSHw0UD9PSK6AXdS0jAAyjZ1J5kbWaI+vmZ root@hadoop1
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwCqgQWDgw7sSqNer1oONzsFhoCWBmLqdWOQCcC7RYhD6kiVzdAEP7qZwWKRwoe/E++xP0+slgxsIsXGVoObGrlT3n+g/2xsgTCaBT/6sGV7k28UOozh76GlyfJjzavbwWE9Q2yR2mkb3/ILGE6CUNCkqqLuYEDTG4DxNupGhsGSYChAcjclzYFrMxDARiOJ8cahDjVlmGzFWxNhzJ36pFC1Rdyeu4CrtZ8tkuqQagGZqB63bVmvTiOM2fY8Wp8TNv0Zz2XmFmv7IUhpDXlPZdFCviwLYLLoJ9LTG32rO/jY0U78LFdDpsYdebthztNakKMZEhCqVIR+k1VMPtp root@hadoop2
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHmj5qT64jSc3LCR2EBKB+12C1XxhFlc44X8zdf3mL8gbepG+ndMgBV4eombLg7QjZshCsjhk9d8esofAlrPk5tX/nWWHg3p5jYTh5/6V+iU7VDpWmMVN/87dsjBbmM9P6jTNiwqk4rdSXDKXkmrVygGHnEj95eP35Nq1JKg+GS7RjWWB0+loGQ4eYKO1nj2nYNOlNBi28CKh1uMWf42bDtcfKP3Z4gEOtPBD5rVPiU2Tq6jgtAs/VvaYGv5FHO4MB0lBE1ik8zp/4trfGU5hie/1PzCRAIvsqPEBSzeUs9nhHODj6vZYwgQupK9Qv5jEbQgh6pCGEfFZlfsC03 root@hadoop3
# 配置 OracleJDK
# 下载 Oracle JDK 并存放至指定路径内
# 配置 /etc/profile 文件
cat > /etc/profile << EOF
# Oracle JDK 1.8.0_333
export JAVA_HOME=/data/service/jdk/jdk1.8.0_333
export CLASSPATH=$:CLASSPATH:$JAVA_HOME/lib/
export PATH=$PATH:$JAVA_HOME/bin
EOF
# 刷新配置
source /etc/profile
# 查看 JDK 状态
java -version
java version "1.8.0_333"
Java(TM) SE Runtime Environment (build 1.8.0_333-b02)
Java HotSpot(TM) 64-Bit Server VM (build 25.333-b02, mixed mode)
# 配置 HOSTS 文件
cat > /etc/hosts << EOF
# redis-nodes
10.10.10.21 redis1
10.10.10.22 redis2
10.10.10.23 redis3
# hadoop-nodes
10.10.10.131 hadoop1
10.10.10.132 hadoop2
10.10.10.133 hadoop3
EOF
# 关闭 swap
swapoff -a
# 注销 swap 分区挂载
vi /etc/fstab
# 配置 vm.swapiness
echo "vm.swappiness = 0" >> /etc/sysctl.conf
# 刷新配置
sysctl -p
# 配置 transparent_hugepage
# 临时生效
echo never > /sys/kernel/mm/transparent_hugepage/enabled && echo never > /sys/kernel/mm/transparent_hugepage/defrag
# 永久生效
echo "echo never > /sys/kernel/mm/transparent_hugepage/enabled" >> /etc/rc.local && echo "echo never > /sys/kernel/mm/transparent_hugepage/defrag" >> /etc/rc.local
# 配置 最大连接数
# CentOS6 的文件名为 90-nproc.conf
# CentOS7 的文件名为 20-nproc.conf
vi /etc/security/limits.d/20-nproc.conf
* - nofile 655350
* - nproc 655350
mkdir -p /data/service/zookeeper/:data,datalog,jvm_heap_dump,logs}
# ZooKeeper 3.5.10
export ZK_HOME=/data/service/zookeeper/apache-zookeeper-3.5.10-bin
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/sbin:$ZK_HOME/bin
# Zookeeper的心跳间隔,随后的心跳时间配置都以此为单位时间。单位为毫秒。默认值为2000。
tickTime=2000
# Follower与Leader初始连接时最大心跳数(ticktime数量)。默认值为10。
initLimit=20
# Follower与Leader请求与应答时最大心跳数(ticktime数量)。默认值为5。
syncLimit=5
# 存放数据快照的路径。
dataDir=/data/service/zookeeper/data
# 存放事务日志文件的路径。若未指定则写入dataDir所配置路径中。一般建议两个日志路径分开存放。
dataLogDir=/data/service/zookeeper/datalog
# 客户端连接服务器的端口。默认值为2181。
clientPort=2181
# 单个客户端对单个服务器的并发连接数量。默认为60。0为不限制。
maxClientCnxns=0
# 服务器允许客户端会话的最小超时时间。单位为毫秒。默认值为2*tickTime。
minSessionTimeout=30000
# 服务器允许客户端会话的最大超时时间。单位为毫秒。默认值为20*tickTime。
maxSessionTimeout=60000
# 自动清理的时间间隔,单位为小时。默认值是0。配置为正整数,则表示启动自动清理功能。
autopurge.purgeInterval=1
# 当启用自动清理功能后,所保留数据快照和事务日志文件的数量。默认值为3。最小值为3。
autopurge.snapRetainCount=3
# 最大未处理请求数。默认值为1000。
globalOutstandingLimit=200
# 事务日志文件预分配的磁盘空间大小。单位为KB。默认为65536。
preAllocSize=65536
# 单个事务日志的最大记录阈值。默认为100000。
snapCount=100000
# Leader是否接受客户端连接。默认为yes。若节点数大于3,建议改为no。
leaderServes=yes
# 集群中的节点信息
# 2888为传输信息端口,3888为选举端口。
server.1=10.10.10.131:2888:3888
server.2=10.10.10.132:2888:3888
server.3=10.10.10.133:2888:3888
#开启四字命令
4lw.commands.whitelist=*
1
#!/bin/bash
export JAVA_HOME=/data/service/jdk/jdk1.8.0_333
export JVMFLAGS="$JVMFLAGS -XX:+UseG1GC -Xms1024m -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/data/service/zookeeper/jvm_heap_dump"
ZOO_LOG_DIR="/data/service/zookeeper/logs"
将 /data/service/zookeeper 分发至所有节点
调整 /data/service/zookeeper/data/myid 中所填写的 ID,使其 不重复
$ZK_HOME/bin/zkServer.sh start
$ZK_HOME/bin/zkServer.sh status
Mode: leader
$ZK_HOME/bin/zkCli.sh -server hadoop1:2181
[zk: hadoop1:2181(CONNECTED) 0] ls /
[hadoop-ha, hbase, rmstore, spark, yarn-leader-election, zookeeper]
# 查看是否存在此用户
id zk
# 查看是否存在此 UID 和 GID
cat /etc/passwd |grep 1000
cat /etc/group |grep 1000
# 创建用户 zk 并指定 UID 为 1000,GID 为 1000
groupadd -g 1000 zk && useradd -u 1000 zk
chmod -R 700 $ZK_HOME && chmod 600 $ZK_HOME/conf/*
chwon -R zk:zk $ZK_HOME
su - zk
$ZK_HOME/bin/zkServer.sh start