docker版本
[root@localhost opt]# docker --version
Docker version 19.03.1, build 74b1e89
docker网络
[root@localhost opt]# docker network ls
NETWORK ID NAME DRIVER SCOPE
d05f84a8746f bridge bridge local
398d73e5b1ed hadoop_net bridge local
da5e746c4d2e host host local
bfa3adcc971d none null local
ip | 主机名 | HDFS | MapReduce/Yarn | 角色 |
---|---|---|---|---|
172.10.0.2 | master | NameNode | ResourceManager | Master |
172.10.0.3 | slave1 | DataNode | NodeManager | RegionServer、Backup Master |
172.10.0.4 | slave2 | DataNode | NodeManager | RegionServer |
hadoop镜像制作方法:https://blog.csdn.net/qq_39680564/article/details/98043754
从仓库拉取镜像
docker pull 192.168.0.20:5000/hadoop:master
docker pull 192.168.0.20:5000/hadoop:slave1
docker pull 192.168.0.20:5000/hadoop:slave2
启动master:
docker run -d \
--add-host master:172.10.0.2 \
--add-host slave1:172.10.0.3 \
--add-host slave2:172.10.0.4 \
--net hadoop_net \
--ip 172.10.0.2 \
-h master \
-p 10022:22 \
-p 2181:2181 \
-p 2887:2888 \
-p 3887:3888 \
-p 9870:9870 \
-p 8088:8088 \
-p 16010:16010 \
--restart always \
--name master \
192.168.0.20:5000/hadoop:master
启动slave1:
docker run -d \
--add-host master:172.10.0.2 \
--add-host slave1:172.10.0.3 \
--add-host slave2:172.10.0.4 \
--net hadoop_net \
--ip 172.10.0.3 \
-h slave1 \
-p 20022:22 \
-p 2182:2181 \
-p 2888:2888 \
-p 3888:3888 \
-p 9864:9864 \
-p 8042:8042 \
-p 16011:16010 \
-p 16030:16030 \
--restart always \
--name slave1 \
192.168.0.20:5000/hadoop:slave1
启动slave2:
docker run -d \
--add-host master:172.10.0.2 \
--add-host slave1:172.10.0.3 \
--add-host slave2:172.10.0.4 \
--net hadoop_net \
--ip 172.10.0.4 \
-h slave2 \
-p 30022:22 \
-p 2183:2181 \
-p 2889:2888 \
-p 3889:3888 \
-p 9865:9864 \
-p 8043:8042 \
-p 16031:16030 \
--restart always \
--name slave2 \
192.168.0.20:5000/hadoop:slave2
将hbase包复制进容器
docker cp hbase-2.1.0/ master:/opt/
docker cp hbase-2.1.0/ slave1:/opt/
docker cp hbase-2.1.0/ slave2:/opt/
vim ~/.bashrc
新增内容
# Hbase
export HBASE_HOME=/opt/hbase-2.1.0
export HBASE_CONF_DIR=/opt/hbase-2.1.0/conf
export PATH=$PATH:$HBASE_HOME/bin
刷新生效
source ~/.bashrc
vim /opt/hbase-2.1.0/conf/hbase-env.sh
修改如下内容
export JAVA_HOME=/opt/jdk-1.8
export HBASE_MANAGES_ZK=false
vim /opt/hbase-2.1.0/conf/hbase-site.xml
添加如下内容
<property>
<name>hbase.rootdirname>
<value>hdfs://master:9000/hbasevalue>
property>
<property>
<name>hbase.cluster.distributedname>
<value>truevalue>
property>
<property>
<name>hbase.zookeeper.quorumname>
<value>master,slave1,slave2value>
property>
<property>
<name>hbase.unsafe.stream.capability.enforcename>
<value>falsevalue>
property>
vim /opt/hbase-2.1.0/conf/regionservers
改为如下内容
server2
server3
vim /opt/hbase-2.1.0/conf/backup-masters
添加如下内容(该文件是不存在的,需要自己创建,里面的主机当作master的备份机)
server2
如图
rm -rf /opt/hbase-2.1.0/lib/client-facing-thirdparty/slf4j-log4j12-1.7.25.jar
cp /opt/hbase-2.1.0/lib/client-facing-thirdparty/htrace-core-3.1.0-incubating.jar /opt/hbase-2.1.0/lib/
[root@master ~]# /opt/hbase-2.1.0/bin/start-hbase.sh
running master, logging to /opt/hbase-2.1.0/logs/hbase-root-master-master.out
slave1: running regionserver, logging to /opt/hbase-2.1.0/logs/hbase-root-regionserver-slave1.out
slave2: running regionserver, logging to /opt/hbase-2.1.0/logs/hbase-root-regionserver-slave2.out
slave1: running master, logging to /opt/hbase-2.1.0/logs/hbase-root-master-slave1.out
master
[root@master ~]# jps
33 QuorumPeerMain
1649 Jps
249 NameNode
507 SecondaryNameNode
1372 HMaster
749 ResourceManager
slave1
[root@slave1 ~]# jps
880 Jps
33 QuorumPeerMain
530 HMaster
249 NodeManager
138 DataNode
445 HRegionServer
slave2
[root@slave2 ~]# jps
33 QuorumPeerMain
243 NodeManager
132 DataNode
440 HRegionServer
682 Jps
HDFS
192.168.0.138:9870
192.168.0.138:9864
192.168.0.138:9865
YARN
192.168.0.138:8088
192.168.0.138:8042
192.168.0.138:8043
HBASE
192.168.0.138:16010
192.168.0.138:16011
192.168.0.138:16030
192.168.0.138:16031
保存镜像
docker commit master 192.168.0.20:5000/hadoop-hbase:master
docker commit slave1 192.168.0.20:5000/hadoop-hbase:slave1
docker commit slave2 192.168.0.20:5000/hadoop-hbase:slave2
上传镜像至仓库
docker push 192.168.0.20:5000/hadoop-hbase:master
docker push 192.168.0.20:5000/hadoop-hbase:slave1
docker push 192.168.0.20:5000/hadoop-hbase:slave2
启动方法
master
docker run -d \
--add-host master:172.10.0.2 \
--add-host slave1:172.10.0.3 \
--add-host slave2:172.10.0.4 \
--net hadoop_net \
--ip 172.10.0.2 \
-h master \
-p 10022:22 \
-p 2181:2181 \
-p 2887:2888 \
-p 3887:3888 \
-p 9870:9870 \
-p 8088:8088 \
-p 16010:16010 \
--restart always \
--name master \
192.168.0.20:5000/hadoop-hbase:master
slave1
docker run -d \
--add-host master:172.10.0.2 \
--add-host slave1:172.10.0.3 \
--add-host slave2:172.10.0.4 \
--net hadoop_net \
--ip 172.10.0.3 \
-h slave1 \
-p 20022:22 \
-p 2182:2181 \
-p 2888:2888 \
-p 3888:3888 \
-p 9864:9864 \
-p 8042:8042 \
-p 16011:16010 \
-p 16030:16030 \
--restart always \
--name slave1 \
192.168.0.20:5000/hadoop-hbase:slave1
slave2
docker run -d \
--add-host master:172.10.0.2 \
--add-host slave1:172.10.0.3 \
--add-host slave2:172.10.0.4 \
--net hadoop_net \
--ip 172.10.0.4 \
-h slave2 \
-p 30022:22 \
-p 2183:2181 \
-p 2889:2888 \
-p 3889:3888 \
-p 9865:9864 \
-p 8043:8042 \
-p 16031:16030 \
--restart always \
--name slave2 \
192.168.0.20:5000/hadoop-hbase:slave2
先启动zookeeper,再启动hdfs,再启动yarn,再启动hbase
/opt/zookeeper-3.4.10/bin/zkServer.sh start #每台服务器启动zookeeper
/opt/zookeeper-3.4.10/bin/zkServer.sh status #查看启动状态
/opt/hadoop-3.0.3/sbin/start-dfs.sh #master启动HDFS
/opt/hadoop-3.0.3/sbin/start-yarn.sh #master启动yarn
/opt/hbase-2.1.0/bin/start-hbase.sh #master启动hbase