Docker官网安装文档:https://docs.docker.com/engine/install/ubuntu/
# 文本处理的流编辑器 -i直接修改读取的文件内容,而不是输出到终端
# sed -i 's/原字符串/新字符串/' /home/1.txt
# 下面这个是修改ubuntu的源
sudo sed -i 's/cn.archive.ubuntu.com/mirrors.aliyun.com/g' /etc/apt/sources.list
# 更新
sudo apt update -y
sudo apt install curl
# 抓取docker安装脚本到一个文件中
curl -fsSL get.docker.com -o get-docker.sh
# 执行脚本,通过脚本下载 推荐设置阿里云镜像下载加速 默认管理员登陆不加sudo;有警告就忽略
sudo sh get-docker.sh --mirror Aliyun
#==========解决每次输入sudo问题===========
# 将当前用户加入到docker组,这样每次使用就不需要sudo了
sudo gpasswd -a ${USER} docker
# 更新用户组,这样才能生效
newgrp - docker
sudo service docker restart
#============加速器================
# 加速器网址:https://www.daocloud.io/mirror#accelerator-doc,可能有时会失效
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
# 查看配置文件,自己添加加速器
vi /etc/docker/daemon.json
# {"registry-mirrors": ["http://hub-mirror.c.163.com"]}
# 下面地址任选一种就好
#腾讯云的镜像地址
https://mirror.ccs.tencentyun.com
#网易的镜像地址
http://hub-mirror.c.163.com
#下面的地址是假的,需要自己去阿里云 的容器镜像服务-》镜像加速器去复制自己的镜像地址
https://xxxx.mirror.aliyuncs.com
#daocloud发布的镜像地址
http://f1361db2.m.daocloud.io
# 最后要重启
sudo systemctl restart docker.service
# 安装ssh
sudo apt install openssh-server
#===========docker-compose=============
# 在官网下载符合条件的:https://github.com/docker/compose/releases
sudo curl -L https://github.com/docker/compose/releases/download/1.16.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
# wget也可以使用来下载
mv /usr/local/bin/docker-compose-linux-x86_64 /usr/local/bin/docker-compose
# 记得添加权限
chmod +x /usr/local/bin/docker-compose
# 放在bin目录下,在其他位置可以直接使用
sudo mv /usr/local/bin/docker-compose-linux-x86_64 /usr/local/bin/docker-compose
# 第二种方法
sudo install docker-compose-linux-x86_64 /usr/local/bin/docker-compose
docker-compose ps
# ============================================
# 2023/6/20可用源
# 编辑 Docker 配置文件
sudo vim /etc/docker/daemon.json
{
"registry-mirrors": [
"https://dockerproxy.com",
"https://hub-mirror.c.163.com",
"https://mirror.baidubce.com",
"https://ccr.ccs.tencentyun.com"
]
}
# 方法一,采用 systemctl 来重启,推荐
sudo systemctl daemon-reload
sudo systemctl restart docker
# 方法二,采用 service 来重启
sudo service docker restart
# 文本处理的流编辑器 -i直接修改读取的文件内容,而不是输出到终端
# sed -i 's/原字符串/新字符串/' /home/1.txt
# 下面这个是修改ubuntu的源
sudo sed -i 's/cn.archive.ubuntu.com/mirrors.aliyun.com/g' /etc/apt/sources.list
# 更新
sudo apt update -y
sudo apt install curl
# 抓取docker安装脚本到一个文件中
curl -fsSL get.docker.com -o get-docker.sh
# 执行脚本,通过脚本下载 推荐设置阿里云镜像下载加速 默认管理员登陆不加sudo;有警告就忽略
sudo sh get-docker.sh --mirror Aliyun
#==========解决每次输入sudo问题===========
# 将当前用户加入到docker组,这样每次使用就不需要sudo了
sudo gpasswd -a ${USER} docker
# 更新用户组,这样才能生效
newgrp - docker
sudo service docker restart
#============加速器================
# 加速器网址:https://www.daocloud.io/mirror#accelerator-doc,可能有时会失效
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
# 查看配置文件,自己添加加速器
vi /etc/docker/daemon.json
# {"registry-mirrors": ["http://hub-mirror.c.163.com"]}
# 下面地址任选一种就好
#腾讯云的镜像地址
https://mirror.ccs.tencentyun.com
#网易的镜像地址
http://hub-mirror.c.163.com
#下面的地址是假的,需要自己去阿里云 的容器镜像服务-》镜像加速器去复制自己的镜像地址
https://xxxx.mirror.aliyuncs.com
#daocloud发布的镜像地址
http://f1361db2.m.daocloud.io
# 最后要重启
sudo systemctl restart docker.service
# 安装ssh
sudo apt install openssh-server
#===========docker-compose=============
# 在官网下载符合条件的:https://github.com/docker/compose/releases
sudo curl -L https://github.com/docker/compose/releases/download/1.16.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
# wget也可以使用来下载
mv /usr/local/bin/docker-compose-linux-x86_64 /usr/local/bin/docker-compose
# 记得添加权限
chmod +x /usr/local/bin/docker-compose
# 放在bin目录下,在其他位置可以直接使用
sudo mv /usr/local/bin/docker-compose-linux-x86_64 /usr/local/bin/docker-compose
# 第二种方法
sudo install docker-compose-linux-x86_64 /usr/local/bin/docker-compose
docker-compose ps
# ============================================
# 2023/6/20可用源
# 编辑 Docker 配置文件
sudo vim /etc/docker/daemon.json
{
"registry-mirrors": [
"https://dockerproxy.com",
"https://hub-mirror.c.163.com",
"https://mirror.baidubce.com",
"https://ccr.ccs.tencentyun.com"
]
}
# 方法一,采用 systemctl 来重启,推荐
sudo systemctl daemon-reload
sudo systemctl restart docker
# 方法二,采用 service 来重启
sudo service docker restart
# 指定时区和中文乱码
docker run -d -p 8080:8080 \
--name tomcat \
--restart=always \
-v /home/docker/tomcat/logs/:/usr/local/tomcat/logs/ \
# 这里需要自行创建好webapps内容,或者进入容器将webapps.list改成webapps
-v /home/docker/tomcat/webapps/:/usr/local/tomcat/webapps/ \
-v /etc/localtime:/etc/localtime \
-e TZ="Asia/Shanghai" \
-e LANG="C.UTF-8" \
tomcat:latest
#==========================
docker run --name tomcat -p 8080:8080 \
-v tomcatconf:/usr/local/tomcat/conf \
-v tomcatwebapp:/usr/local/tomcat/webapps \
-d tomcat:jdk8-openjdk-slim-buster
docker-compose部署
version: '3'
services:
tomcat:
image: tomcat:latest
container_name: tomcat
restart: always
ports:
- 8080:8080
environment:
TZ: "Asia/Shanghai"
LANG: "C.UTF-8"
volumes:
- /opt/tomcat/conf/server.xml:/usr/local/tomcat/server.xml
- /opt/tomcat/webapps:/usr/local/tomcat/webapps
- /opt/tomcat/logs:/usr/local/tomcat/logs
# https://www.digitalocean.com/community/tools/nginx
# 注意 外部的/nginx/conf下面的内容必须存在,否则挂载会覆盖
docker run -p 80:80 -p 443:443 --name nginx \
-v /usr/local/docker/nginx/html:/usr/share/nginx/html \
-v /usr/local/docker/nginx/logs:/var/log/nginx \
-v /usr/local/docker/nginx/conf/:/etc/nginx \
-d nginx:1.20.1
docker-compose部署
version: '3'
services:
nginx:
container_name: nginx
image: nginx
restart: always
ports:
- 80:80
- 443:443
privileged: true
volumes:
- /etc/localtime:/etc/localtime:ro
- ./conf/nginx/log/:/var/log/nginx
# 注意:如下挂载都是覆盖
#- ./conf/nginx/conf.d:/etc/nginx/conf.d
#- ./conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
#- ./conf/nginx/html:/usr/share/nginx/html
# 5.7版本
docker run -p 3306:3306 --name mysql57 \
-v /app/mysql/log:/var/log/mysql \
-v /app/mysql/data:/var/lib/mysql \
-v /app/mysql/conf:/etc/mysql/conf.d \
-v /etc/localtime:/etc/localtime:ro
-e MYSQL_ROOT_PASSWORD=123456 \
-d mysql:5.7
#8.x版本,引入了 secure-file-priv 机制,磁盘挂载将没有权限读写data数据,所以需要将权限透传,或者
chmod -R 777 /app/mysql/data
# --privileged 特权容器,容器内使用真正的root用户
docker run -p 3306:3306 --name mysql8 \
-v /app/mysql/conf:/etc/mysql/conf.d \
-v /app/mysql/log:/var/log/mysql \
-v /app/mysql/data:/var/lib/mysql \
-v /etc/localtime:/etc/localtime:ro
-e MYSQL_ROOT_PASSWORD=123456 \
--privileged \
-d mysql
# 针对配置文件,新建 my.cnf (/usr/local/mysql/conf),否则中文乱码
[client]
default_character_set=utf8
[mysqld]
collation_server = utf8_general_ci
character_set_server = utf8
docker-compose部署
version : '3'
services:
mysql:
# 容器名
container_name: mysql5
# 重启策略
restart: always
image: mysql:5.7
ports:
- "3306:3306"
volumes:
# 挂载配置文件
# - ./mysql/db/:/docker-entrypoint-initdb.d
# 挂载配置文件
- ./mysql/conf:/etc/mysql/conf.d
# 挂载日志
- ./mysql/logs:/logs
# 挂载数据
- ./mysql/data:/var/lib/mysql
command: [
'mysqld',
'--innodb-buffer-pool-size=80M',
'--character-set-server=utf8mb4',
'--collation-server=utf8mb4_unicode_ci',
'--default-time-zone=+8:00',
'--lower-case-table-names=1'
]
environment:
# root 密码
MYSQL_ROOT_PASSWORD: 123456
#======================mysql8.0=================
version: '3'
services:
mysql:
container_name: mysql8
restart: always
# 注意8.0以后的问题
privileged: true
image: mysql:8.0
volumes:
# 挂载配置文件
- ./mysql/conf:/etc/mysql/conf.d
# 挂载日志
- ./mysql/logs:/logs
# 挂载数据
- ./mysql/data:/var/lib/mysql
command:
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--explicit_defaults_for_timestamp=true
environment:
- TZ=Asia/Shanghai
- LANG=C.UTF-8
- MYSQL_ROOT_PASSWORD=root
ports:
- 33106:3306
network_mode: "bridge"
# 1、新建主服务器容器实例3307
docker run -p 3307:3306 --name mysql-master \
-v /usr/local/docker/mysql-master/log:/var/log/mysql \
-v /usr/local/docker/mysql-master/data:/var/lib/mysql \
-v /usr/local/docker/mysql-master/conf:/etc/mysql \
-e MYSQL_ROOT_PASSWORD=root -d mysql:5.7
# 2、进入/usr/local/docker/mysql-master/conf目录下新建 my.cnf
#==================================
[mysqld]
## 设置server_id,同一局域网中需要唯一
server_id=101
## 指定不需要同步的数据库名称
binlog-ignore-db=mysql
## 开启二进制日志功能
log-bin=mall-mysql-bin
## 设置二进制日志使用内存大小(事务)
binlog_cache_size=1M
## 设置使用的二进制日志格式(mixed,statement,row)
binlog_format=mixed
## 二进制日志过期清理时间。默认值为0,表示不自动清理。
expire_logs_days=7
## 跳过主从复制中遇到的所有错误或指定类型的错误,避免slave端复制中断。
## 如:1062错误是指一些主键重复,1032错误是因为主从数据库数据不一致
slave_skip_errors=1062
#====================================
# 3、修改完配置后重启master实例
docker restart mysql-master
# 4、进入mysql-master容器
docker exec -it mysql-master /bin/bash
mysql -uroot -proot
# 5、master容器实例内创建数据同步用户
CREATE USER 'slave'@'%' IDENTIFIED BY '123456';
GRANT REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'slave'@'%';
# 6、新建从服务器容器实例3308
docker run -p 3308:3306 --name mysql-slave \
-v /usr/local/docker/mysql-slave/log:/var/log/mysql \
-v /usr/local/docker/mysql-slave/data:/var/lib/mysql \
-v /usr/local/docker/mysql-slave/conf:/etc/mysql \
-e MYSQL_ROOT_PASSWORD=root -d mysql:5.7
# 7、进入/usr/local/docker/mysql-slave/conf目录下新建my.cnf
# ====================================================
[mysqld]
## 设置server_id,同一局域网中需要唯一
server_id=102
## 指定不需要同步的数据库名称
binlog-ignore-db=mysql
## 开启二进制日志功能,以备Slave作为其它数据库实例的Master时使用
log-bin=mall-mysql-slave1-bin
## 设置二进制日志使用内存大小(事务)
binlog_cache_size=1M
## 设置使用的二进制日志格式(mixed,statement,row)
binlog_format=mixed
## 二进制日志过期清理时间。默认值为0,表示不自动清理。
expire_logs_days=7
## 跳过主从复制中遇到的所有错误或指定类型的错误,避免slave端复制中断。
## 如:1062错误是指一些主键重复,1032错误是因为主从数据库数据不一致
slave_skip_errors=1062
## relay_log配置中继日志
relay_log=mall-mysql-relay-bin
## log_slave_updates表示slave将复制事件写进自己的二进制日志
log_slave_updates=1
## slave设置为只读(具有super权限的用户除外)
read_only=1
# ====================================================
# 8、修改完配置后重启slave实例
docker restart mysql-slave
# 9、在主数据库中查看主从同步状态
docker exec -it mysql-master /bin/bash
mysql -uroot -proot
show master status;
mysql> show master status;
+-----------------------+----------+--------------+------------------+-------------------+
| File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
+-----------------------+----------+--------------+------------------+-------------------+
| mall-mysql-bin.000001 | 617 | | mysql | |
+-----------------------+----------+--------------+------------------+-------------------+
1 row in set (0.00 sec)
# 10、在从数据库中配置主从复制
change master to master_host='宿主机ip', master_user='slave', master_password='123456', master_port=3307, master_log_file='mall-mysql-bin.000001', master_log_pos=617, master_connect_retry=30;
mysql> change master to master_host='106.14.76.55', master_user='slave', master_password='123456', master_port=3307, master_log_file='mall-mysql-bin.000001', master_log_pos=617, master_connect_retry=30;
Query OK, 0 rows affected, 2 warnings (0.03 sec)
# master_host:主数据库的IP地址;
# master_port:主数据库的运行端口;
# master_user:在主数据库创建的用于同步数据的用户账号;
# master_password:在主数据库创建的用于同步数据的用户密码;
# master_log_file:指定从数据库要复制数据的日志文件,通过查看主数据的状态,获取File参数;
# master_log_pos:指定从数据库从哪个位置开始复制数据,通过查看主数据的状态,获取Position参数;
# master_connect_retry:连接失败重试的时间间隔,单位为秒。
# 11、在从数据库中查看主从同步状态
docker exec -it mysql-slave /bin/bash
mysql -uroot -proot
show slave status \G;
# 12、在从数据库中开启主从同步看到Slave_IO_Running和Slave_SQL_Running启动就行
start slave;
# 提前准备好redis.conf文件,创建好相应的文件夹。如:
port 6379
# 开启redis数据持久化,可选
appendonly yes
# 开启redis验证,可选
requirepass 123
#注释掉,允许redis外地连接
# bind 127.0.0.1
#将daemonize yes注释起来或者 daemonize no设置,因为该配置和docker run中-d参数冲突,会导致容器一直启动失败
daemonize no
#更多配置参照 https://raw.githubusercontent.com/redis/redis/6.0/redis.conf
docker run -p 6379:6379 --name redis \
--privileged=true \
-v /usr/local/redis/redis.conf:/etc/redis/redis.conf \
-v /usr/local/redis/data:/data -d redis:6.0.8 \
redis-server /etc/redis/redis.conf --appendonly yes
docker-compose部署
version: '3'
services:
redis:
image: redis:6.2.5
container_name: redis
privileged: true
volumes:
- ../redis/data:/data
- ../redis/conf/redis.conf:/usr/local/etc/redis/redis.conf
- ../redis/logs:/logs
command: ["redis-server","/usr/local/etc/redis/redis.conf"]
privileged: true
ports:
- 6379:6379
environment:
- TZ="Asia/Shanghai"
restart: always
# port:节点端口;
# requirepass:设置密码,访问时需要验证
# protected-mode:保护模式,默认值 yes,即开启。开启保护模式以后,需配置 bind ip 或者设置访问密码;关闭保护模式,外部网络可以直接访问;
# daemonize:是否以守护线程的方式启动(后台启动),默认 no;
# appendonly:是否开启 AOF 持久化模式,默认 no;
# cluster-enabled:是否开启集群模式,默认 no;
# cluster-config-file:集群节点信息文件;
# cluster-node-timeout:集群节点连接超时时间;
# cluster-announce-ip:集群节点 IP
# 注意: 如果你想要你的redis集群可以供外网访问,这里直接填 服务器的IP 地址即可;如若为了安全,只是在服务器内部进行访问,这里还需要做一些修改。
# cluster-announce-port:集群节点映射端口;
# cluster-announce-bus-port:集群节点总线端口
#shell脚本循环生成配置文件
for port in $(seq 1 6);\
do \
mkdir -p /mydata/redis/node-${port}/conf
touch /mydata/redis/node-${port}/conf/redis.conf
cat <<EOF>>/mydata/redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done
# 首先创建自定义网络
docker network create --driver bridge --subnet 172.38.0.0/24 --gateway 172.38.0.1 redis
#分别启动docker容器,shell脚本,注意配置文件的生成
for i in $(seq 1 6);\
do \
docker run -p 637${i}:6379 -p 1637${i}:16379 \
--name redis-${i} --privileged=true \
-v /mydata/redis/node-${i}/data:/data \
-v /mydata/redis/node-${i}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.1${i} redis \
redis-server /etc/redis/redis.conf
done
# 随机进入一个容器
docker exec -it redis-1 /bin/bash
# 生成集群,--cluster-replicas 1 表示为每个master创建一个slave节点
redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 172.38.0.13:6379 172.38.0.14:6379 172.38.0.15:6379 172.38.0.16:6379 --cluster-replicas 1
# 进入客户端
redis-cli -p 6379
# 查看集群结点状况
cluster nodes
# 查看集群信息
cluster info
# 需要进入集群模式,否则会报错
redis-cli -p 6379 -c
# 集群会自动计算哈希槽存储位置,若主节点宕机,从节点会成为主节点
127.0.0.1:6379> set a b
-> Redirected to slot [15495] located at 172.38.0.13:6379
OK
# 1、新建6377、6378两个节点+新建后启动+查看是否8节点,注意配置文件的创建
docker run -p 6377:6379 -p 16377:16379 --name redis-7 --privileged=true -v /mydata/redis/node-7/data:/data -v /mydata/redis/node-7/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.17 redis redis-server /etc/redis/redis.conf
docker run -p 6378:6379 -p 16378:16379 --name redis-8 --privileged=true -v /mydata/redis/node-8/data:/data -v /mydata/redis/node-8/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.18 redis redis-server /etc/redis/redis.conf
# 2、进入6377容器实例内部
docker exec -it redis-7 /bin/bash
# 3、将新增的6377节点(空槽号)作为master节点加入原集群
# redis-cli --cluster add-node 自己实际IP地址:6377 自己实际IP地址:6371
# 6377 就是将要作为master新增节点
# 6371 就是原来集群节点里面的领路人,相当于6377找到组织加入集群
# 这里我直接使用了内部网络
redis-cli --cluster add-node 172.38.0.17:6379 172.38.0.11:6379
# 4、检查集群情况第1次
redis-cli --cluster check 172.38.0.11:6379
# 5、重新分派槽号
redis-cli --cluster reshard 172.38.0.11:6379
# 因为4台,16384/4=4096,每台分4分之一
How many slots do you want to move (from 1 to 16384)? 4096
# 172.38.0.17:6379 的id
What is the receiving node ID? 3a732104b11d3cf81d1128def9f0158fb5708ca7
# 输入 all
Source node #1: all
# 6、检查集群情况第2次
redis-cli --cluster check 172.38.0.11:6379
172.38.0.11:6379 (9c109831...) -> 0 keys | 4096 slots | 1 slaves.
172.38.0.12:6379 (0100361c...) -> 0 keys | 4096 slots | 1 slaves.
172.38.0.13:6379 (348dc3b1...) -> 0 keys | 4096 slots | 1 slaves.
172.38.0.17:6379 (5d34f842...) -> 0 keys | 4096 slots | 0 slaves.
[OK] 0 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 172.38.0.11:6379)
M: 9c109831a0afd33f7c13500caf39d3191a13e0b3 172.38.0.11:6379
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
M: 0100361c6056686b671f84877af18ffd22b4c428 172.38.0.12:6379
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: 348dc3b13714d0b07236c2ba6c5147f270b08800 172.38.0.13:6379
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
S: 6fc86fc554f1f939564e6acc346da222a886ab6a 172.38.0.14:6379
slots: (0 slots) slave
replicates 348dc3b13714d0b07236c2ba6c5147f270b08800
S: 2a2fe251d41085ec7eb53e9bf4e4c749e5d9dbb9 172.38.0.15:6379
slots: (0 slots) slave
replicates 9c109831a0afd33f7c13500caf39d3191a13e0b3
M: 5d34f8422f8b68ea87bd2f8388c686fdbcfdc9a1 172.38.0.17:6379
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: 9068998c9b5cd5e4cca939738cc42a2954770b47 172.38.0.16:6379
slots: (0 slots) slave
replicates 0100361c6056686b671f84877af18ffd22b4c428
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
## 为什么172.38.0.17是3个新的区间,以前的还是连续?
## 重新分配成本太高,所以前3家各自匀出来一部分,从三个旧节点分别匀出1364个坑位给新节点172.38.0.17
# 7、为主节点172.38.0.17分配从节点172.38.0.18
# redis-cli --cluster add-node ip:新slave端口 ip:新master端口 --cluster-slave --cluster-master-id 新主机节点ID
redis-cli --cluster add-node 172.38.0.18:6379 172.38.0.17:6379 --cluster-slave --cluster-master-id 3a732104b11d3cf81d1128def9f0158fb5708ca7
# 8、检查集群情况第3次
redis-cli --cluster check 172.38.0.18:6379
# 1、将172.38.0.17和172.38.0.18下线
# 2、检查集群情况 获得172.38.0.18的从节点ID
redis-cli --cluster check 172.38.0.11:6379
# 3、将172.38.0.18从节点删除 从集群中将4号从节点172.38.0.18删除
# redis-cli --cluster del-node ip:从机端口 从机6388节点ID
redis-cli --cluster del-node 172.38.0.18:6379 bdf8a5114bbad6d400aa8b1a5e3f29b174d6676e
## 检查一下发现只剩下7台机器了
redis-cli --cluster check 172.38.0.11:6379
# 4、将172.38.0.17的槽号清空,重新分配,本例将清出来的槽号都给172.38.0.11
redis-cli --cluster reshard 172.38.0.11:6379
How many slots do you want to move (from 1 to 16384)?4096
# 172.38.0.11的id
What is the receiving node ID? edf165b5d01f1a1f276237517d391c86c32d9f93
# 172.38.0.17的id
Source node #1: 3a732104b11d3cf81d1128def9f0158fb5708ca7
Source node #2: done
# 5、检查集群情况第二次
redis-cli --cluster check 172.38.0.11:6379
# 4096个槽位都指给172.38.0.11,它变成了8192个槽位,相当于全部都给172.38.0.11了,不然要输入3次,一锅端
# 6、将172.38.0.17删除
# redis-cli --cluster del-node ip:端口 172.38.0.17节点ID
redis-cli --cluster del-node 172.38.0.17:6379 3a732104b11d3cf81d1128def9f0158fb5708ca7
# 7、检查集群情况第三次
redis-cli --cluster check 172.38.0.11:6379
version: '3'
services:
mongo:
image: mongo:5.0.9
container_name: mongo
restart: always
ports:
- 27017:27017
volumes:
- /etc/localtime:/etc/localtime #时区
- ../mongodb/data/db:/data/db #数据
- ../mongodb/log:/var/log/mongodb # 挂载日志目录
# - ./config:/etc/mongo #配置目录
privileged: true
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: root
mongo-express:
image: mongo-express:0.54
container_name: mongo-express
restart: always
ports:
- 8079:8081
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: root
ME_CONFIG_MONGODB_ADMINPASSWORD: root
ME_CONFIG_MONGODB_URL: mongodb://root:root@mongo:27017/
添加管理员
# 登录并切换db到admin
$ docker exec -it mongo mongo admin
# 登录root用户,如果前面没有创建root用户可跳过此步
> db.auth('root','123456')
# 创建所有数据库管理用户 admin/123456
> db.createUser({user:'admin',pwd: '123456',roles:[{role:"userAdminAnyDatabase", db:"admin"}, "readWriteAnyDatabase"]});
# Successfully added user: { ...
# 使用新用户登录,返回 0=失败 1=成功
> db.auth('admin', '123456')
# 1
# 创建测试库
> use testdb
# 插入测试数据
> db.testdb.insert({"name":"testdb"})
# 创建用户并授权,dbOwner代表数据库所有者角色,拥有最高该数据库最高权限。
> db.createUser({ user:"user_testdb", pwd:"123456", roles: [{ role:"readWrite", db:"testdb" }] })
>
# 退出数据库
> exit
version: '3'
services:
minio:
image: minio/minio:RELEASE.2022-03-26T06-49-28Z
hostname: "minio"
ports:
- "9000:9000" # api 端口
- "9001:9001" # 控制台端口
environment:
MINIO_ACCESS_KEY: root #管理后台用户名
MINIO_SECRET_KEY: rootroot #管理后台密码,最小8个字符
volumes:
- ../minio/data:/data #映射当前目录下的data目录至容器内/data目录
- ../minio/config:/root/.minio/ #映射配置目录
command: server --console-address ':9001' /data #指定容器中的目录 /data
privileged: true
restart: always
logging:
options:
max-size: "50M" # 最大文件上传限制
max-file: "10"
driver: json-file
# 外置数据库
version: "3"
services:
nacos:
image: nacos/nacos-server:${NACOS_VERSION}
container_name: nacos-standalone-mysql
env_file:
- ../nacos/nacos-standlone-mysql.env
volumes:
- ../nacos/standalone-logs/:/home/nacos/logs
- ../nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties
ports:
- "8848:8848"
- "9848:9848"
- "9555:9555"
restart: always
设置配置文件
# 同级目录下的.env
NACOS_VERSION=v2.0.4
# ../nacos/nacos-standlone-mysql.env
PREFER_HOST_MODE=hostname
MODE=standalone
SPRING_DATASOURCE_PLATFORM=mysql
MYSQL_SERVICE_HOST=192.168.31.28
MYSQL_SERVICE_DB_NAME=nacos
MYSQL_SERVICE_PORT=3306
MYSQL_SERVICE_USER=root
MYSQL_SERVICE_PASSWORD=root
MYSQL_SERVICE_DB_PARAM=characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useSSL=false
Docker安装基本应用
最全的Docker-compose应用部署