docker 部署Redis集群(三主三从,以及扩容、缩容)

1:创建6个redis容器

docker run -d --name redis01 --net host --privileged=true -v /opt/redis/redis01:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6381
docker run -d --name redis02 --net host --privileged=true -v /opt/redis/redis02:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6382
docker run -d --name redis03 --net host --privileged=true -v /opt/redis/redis03:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6383
docker run -d --name redis04 --net host --privileged=true -v /opt/redis/redis04:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6384
docker run -d --name redis05 --net host --privileged=true -v /opt/redis/redis05:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6385
docker run -d --name redis06 --net host --privileged=true -v /opt/redis/redis06:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6386

2:查看容器运行状态

[root@localhost redis]# docker ps 
CONTAINER ID   IMAGE         COMMAND                  CREATED          STATUS          PORTS     NAMES
2230e0a5bf5c   redis:6.0.8   "docker-entrypoint.s…"   7 seconds ago    Up 6 seconds              redis06
0bc9f5da8601   redis:6.0.8   "docker-entrypoint.s…"   9 seconds ago    Up 8 seconds              redis05
e1431fb85072   redis:6.0.8   "docker-entrypoint.s…"   9 seconds ago    Up 8 seconds              redis04
01c2ff5e0090   redis:6.0.8   "docker-entrypoint.s…"   9 seconds ago    Up 8 seconds              redis03
88892f9eb9db   redis:6.0.8   "docker-entrypoint.s…"   9 seconds ago    Up 9 seconds              redis02
a13bfc991867   redis:6.0.8   "docker-entrypoint.s…"   44 seconds ago   Up 43 seconds             redis01

3:创建redis 3主3从集群

[root@localhost ~]# docker exec -it redis01 /bin/bash

--cluster-replicas 1 表示为每个master,创建一个slave节点
root@localhost:/data# redis-cli --cluster create 192.168.1.31:6381 192.168.1.31:6382 192.168.1.31:6383 192.168.1.31:6384 192.168.1.31:6385 192.168.1.31:6386 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 192.168.1.31:6385 to 192.168.1.31:6381
Adding replica 192.168.1.31:6386 to 192.168.1.31:6382
Adding replica 192.168.1.31:6384 to 192.168.1.31:6383
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[0-5460] (5461 slots) master
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[5461-10922] (5462 slots) master
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[10923-16383] (5461 slots) master
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
.
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

4:查看集群状态

root@localhost:/data# redis-cli -p 6381
127.0.0.1:6381> keys *
(empty array)
127.0.0.1:6381> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:300
cluster_stats_messages_pong_sent:310
cluster_stats_messages_sent:610
cluster_stats_messages_ping_received:305
cluster_stats_messages_pong_received:300
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:610

127.0.0.1:6381> cluster nodes
cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381@16381 myself,master - 0 1700124241000 1 connected 0-5460
eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384@16384 slave 89228357317c6b7d6850fffa2f0819085def1a2f 0 1700124242176 2 connected
89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382@16382 master - 0 1700124242000 2 connected 5461-10922
5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386@16386 slave cd9a9149593770a920258bf75e1235ca4b904cd5 0 1700124240000 1 connected
d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385@16385 slave c2436c65625e1d74d8ea5bde328df04699d494e9 0 1700124239000 3 connected
c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383@16383 master - 0 1700124241149 3 connected 10923-16383

可以看出主从关系如下:
192.168.1.31:6381 -> 192.168.1.31:6386
192.168.1.31:6382 -> 192.168.1.31:6384
192.168.1.31:6383 -> 192.168.1.31:6385

5:存储数据,必须连接redis集群,不能连接单节点

root@localhost:/data# redis-cli -p 6381 -c
127.0.0.1:6381> keys *
(empty array)
127.0.0.1:6381> set k1 v1
(error) MOVED 12706 192.168.1.31:6383
127.0.0.1:6381> set k2 v2
OK
127.0.0.1:6381> set k3 v3
OK
127.0.0.1:6381> set k4 v4
(error) MOVED 8455 192.168.1.31:6382
127.0.0.1:6381> 

127.0.0.1:6381> set k1 v-cluster1
-> Redirected to slot [12706] located at 192.168.1.31:6383
OK
192.168.1.31:6383> set k2 v-cluster2
-> Redirected to slot [449] located at 192.168.1.31:6381
OK
192.168.1.31:6381> set k3 v3
OK
192.168.1.31:6381> set k4 v4
-> Redirected to slot [8455] located at 192.168.1.31:6382
OK

6:主从容错切换迁移
6.1:查看集群信息

root@localhost:/data# redis-cli --cluster check 192.168.1.31:6381
192.168.1.31:6381 (cd9a9149...) -> 2 keys | 5461 slots | 1 slaves.
192.168.1.31:6382 (89228357...) -> 1 keys | 5462 slots | 1 slaves.
192.168.1.31:6383 (c2436c65...) -> 1 keys | 5461 slots | 1 slaves.
[OK] 4 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

6.2:停止6381,redis01

[root@localhost redis]# docker stop redis01

6.3:查看集群状态

[root@localhost redis]# docker exec -it redis02 /bin/bash
root@localhost:/data# redis-cli -p 6382 -c

查看集群状态:
127.0.0.1:6382> cluster nodes
eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384@16384 slave 89228357317c6b7d6850fffa2f0819085def1a2f 0 1700133223366 2 connected
5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386@16386 master - 0 1700133224000 7 connected 0-5460
d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385@16385 slave c2436c65625e1d74d8ea5bde328df04699d494e9 0 1700133224388 3 connected
89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382@16382 myself,master - 0 1700133223000 2 connected 5461-10922
cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381@16381 master,fail - 1700133123340 1700133116000 1 disconnected
c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383@16383 master - 0 1700133225412 3 connected 10923-16383

192.168.1.31:6386,成为master了,192.168.1.31:6381宕机了。


# 可以正常查询数据
127.0.0.1:6382> get k1
-> Redirected to slot [12706] located at 192.168.1.31:6383
"v-cluster1"
192.168.1.31:6383> get k2
-> Redirected to slot [449] located at 192.168.1.31:6386
"v-cluster2"
192.168.1.31:6386> get k3
"v3"
192.168.1.31:6386> get k4
-> Redirected to slot [8455] located at 192.168.1.31:6382
"v4"

6.4:启动redis01

[root@localhost redis]# docker start redis01

6.5:查看redis 集群状况

[root@localhost redis]# docker exec -it redis02 /bin/bash
root@localhost:/data# redis-cli -p 6382 -c
127.0.0.1:6382> cluster nodes
eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384@16384 slave 89228357317c6b7d6850fffa2f0819085def1a2f 0 1700134213000 2 connected
5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386@16386 master - 0 1700134213004 7 connected 0-5460
d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385@16385 slave c2436c65625e1d74d8ea5bde328df04699d494e9 0 1700134214020 3 connected
89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382@16382 myself,master - 0 1700134211000 2 connected 5461-10922
cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381@16381 slave 5b40728c470dac59556f7b51866e590e9038bbd9 0 1700134211000 7 connected
c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383@16383 master - 0 1700134212000 3 connected 10923-16383

6381成为slave,6386是master

6.6:恢复成原来的 6381位master,6386为slave,只需要重启redis06

docker stop redis06
docker start redis06

6.7:检查集群状况

root@localhost:/data# redis-cli --cluster check 192.168.1.31:6381
192.168.1.31:6381 (cd9a9149...) -> 2 keys | 5461 slots | 1 slaves.
192.168.1.31:6383 (c2436c65...) -> 1 keys | 5461 slots | 1 slaves.
192.168.1.31:6382 (89228357...) -> 1 keys | 5462 slots | 1 slaves.
[OK] 4 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

7:主从扩容,增加一个节点6387为master,一个节点6388为slave

docker run -d --name redis07 --net host --privileged=true -v /opt/redis/redis07:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6387
docker run -d --name redis08 --net host --privileged=true -v /opt/redis/redis08:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6388


[root@localhost redis]# docker exec -it redis07 /bin/bash
将新增的6387作为master节点加入集群
6387就是将要作为master新增节点
6381就是原来集群节点里面的领导
root@localhost:/data# redis-cli --cluster add-node 192.168.1.31:6387 192.168.1.31:6381
>>> Adding node 192.168.1.31:6387 to cluster 192.168.1.31:6381
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.1.31:6387 to make it join the cluster.
[OK] New node added correctly.


root@localhost:/data# redis-cli --cluster check 192.168.1.31:6381
192.168.1.31:6381 (cd9a9149...) -> 2 keys | 5461 slots | 1 slaves.
192.168.1.31:6387 (f32e0d73...) -> 0 keys | 0 slots | 0 slaves.
192.168.1.31:6383 (c2436c65...) -> 1 keys | 5461 slots | 1 slaves.
192.168.1.31:6382 (89228357...) -> 1 keys | 5462 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
M: f32e0d7320635a5873beb3594927ed6eea318976 192.168.1.31:6387
   slots: (0 slots) master
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

重新分配槽号:
root@localhost:/data# redis-cli --cluster reshard 192.168.1.31:6381
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[0-6826],[10923-12287] (8192 slots) master
   1 additional replica(s)
M: f32e0d7320635a5873beb3594927ed6eea318976 192.168.1.31:6387
   slots: (0 slots) master
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096 
What is the receiving node ID? f32e0d7320635a5873beb3594927ed6eea318976 #6387的节点id
Please enter all the source node IDs.
  Type 'all' to use all the nodes as source nodes for the hash slots.
  Type 'done' once you entered all the source nodes IDs.
Source node #1: all   #从其他3个主节点拿槽位


root@localhost:/data# redis-cli --cluster check 192.168.1.31:6381  
192.168.1.31:6381 (cd9a9149...) -> 1 keys | 4096 slots | 1 slaves.
192.168.1.31:6387 (f32e0d73...) -> 1 keys | 4096 slots | 0 slaves.
192.168.1.31:6383 (c2436c65...) -> 1 keys | 4096 slots | 1 slaves.
192.168.1.31:6382 (89228357...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: f32e0d7320635a5873beb3594927ed6eea318976 192.168.1.31:6387
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covere

可以看到6387是3个新的区间,以前的还是连续?
重新分配成本太高,所以3家各匀出来一部分,从6381、6382、6383三个旧节点,分别匀出1364个槽位给新节点6387

给6387 添加slave 6388,f32e0d7320635a5873beb3594927ed6eea318976是6387的编号
root@localhost:/data# redis-cli --cluster add-node 192.168.1.31:6388 192.168.1.31:6387 --cluster-slave --cluster-master-id f32e0d7320635a5873beb3594927ed6eea318976
>>> Adding node 192.168.1.31:6388 to cluster 192.168.1.31:6387
>>> Performing Cluster Check (using node 192.168.1.31:6387)
M: f32e0d7320635a5873beb3594927ed6eea318976 192.168.1.31:6387
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.1.31:6388 to make it join the cluster.
Waiting for the cluster to join
>>> Configure node as replica of 192.168.1.31:6387.
[OK] New node added correctly.

root@localhost:/data# redis-cli --cluster check 192.168.1.31:6381
192.168.1.31:6381 (cd9a9149...) -> 1 keys | 4096 slots | 1 slaves.
192.168.1.31:6387 (f32e0d73...) -> 1 keys | 4096 slots | 1 slaves.
192.168.1.31:6383 (c2436c65...) -> 1 keys | 4096 slots | 1 slaves.
192.168.1.31:6382 (89228357...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: f32e0d7320635a5873beb3594927ed6eea318976 192.168.1.31:6387
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
   1 additional replica(s)
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: a8fd323608979efd31be1222d281db20b250820b 192.168.1.31:6388
   slots: (0 slots) slave
   replicates f32e0d7320635a5873beb3594927ed6eea318976
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

8:主从缩容,将6387、6388踢出集群,恢复3主3从

1:先清除从节点6388
root@localhost:/data# redis-cli --cluster del-node 192.168.1.31:6388 a8fd323608979efd31be1222d281db20b250820b
>>> Removing node a8fd323608979efd31be1222d281db20b250820b from cluster 192.168.1.31:6388
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.
root@localhost:/data# 
root@localhost:/data# redis-cli --cluster check 192.168.1.31:6381                                            
192.168.1.31:6381 (cd9a9149...) -> 1 keys | 4096 slots | 1 slaves.
192.168.1.31:6387 (f32e0d73...) -> 1 keys | 4096 slots | 0 slaves.
192.168.1.31:6383 (c2436c65...) -> 1 keys | 4096 slots | 1 slaves.
192.168.1.31:6382 (89228357...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: f32e0d7320635a5873beb3594927ed6eea318976 192.168.1.31:6387
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
检查发现6388被删除了,只剩下7台redis 

2:清理出来的槽号重新分配
将6387的槽号清空,重新分配,本例将清出来的槽号都给6381
root@localhost:/data# redis-cli --cluster reshard 192.168.1.31:6381
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: f32e0d7320635a5873beb3594927ed6eea318976 192.168.1.31:6387
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096 
What is the receiving node ID? cd9a9149593770a920258bf75e1235ca4b904cd5   #6381节点id,用来接收空出来的槽号
Source node #1: f32e0d7320635a5873beb3594927ed6eea318976                  #6387节点id,被删除的那个
Source node #2: done
    Moving slot 12284 from f32e0d7320635a5873beb3594927ed6eea318976
    Moving slot 12285 from f32e0d7320635a5873beb3594927ed6eea318976
    Moving slot 12286 from f32e0d7320635a5873beb3594927ed6eea318976
    Moving slot 12287 from f32e0d7320635a5873beb3594927ed6eea318976
Do you want to proceed with the proposed reshard plan (yes/no)? yes
Moving slot 12284 from 192.168.1.31:6387 to 192.168.1.31:6381: 
Moving slot 12285 from 192.168.1.31:6387 to 192.168.1.31:6381: 
Moving slot 12286 from 192.168.1.31:6387 to 192.168.1.31:6381: 
Moving slot 12287 from 192.168.1.31:6387 to 192.168.1.31:6381:

root@localhost:/data# redis-cli --cluster check 192.168.1.31:6381  
192.168.1.31:6381 (cd9a9149...) -> 2 keys | 8192 slots | 1 slaves.
192.168.1.31:6387 (f32e0d73...) -> 0 keys | 0 slots | 0 slaves.
192.168.1.31:6383 (c2436c65...) -> 1 keys | 4096 slots | 1 slaves.
192.168.1.31:6382 (89228357...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[0-6826],[10923-12287] (8192 slots) master
   1 additional replica(s)
M: f32e0d7320635a5873beb3594927ed6eea318976 192.168.1.31:6387
   slots: (0 slots) master
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

6387的4096个槽位都给6381了,6381有8192个槽位了


3:再删除6387
root@localhost:/data# redis-cli --cluster del-node 192.168.1.31:6387 f32e0d7320635a5873beb3594927ed6eea318976
>>> Removing node f32e0d7320635a5873beb3594927ed6eea318976 from cluster 192.168.1.31:6387
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.


4:恢复成3主3从
root@localhost:/data# redis-cli --cluster check 192.168.1.31:6381                                            
192.168.1.31:6381 (cd9a9149...) -> 2 keys | 8192 slots | 1 slaves.
192.168.1.31:6383 (c2436c65...) -> 1 keys | 4096 slots | 1 slaves.
192.168.1.31:6382 (89228357...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.31:6381)
M: cd9a9149593770a920258bf75e1235ca4b904cd5 192.168.1.31:6381
   slots:[0-6826],[10923-12287] (8192 slots) master
   1 additional replica(s)
S: eac210d52a6c6ddeb9556ffa1820d13a89828264 192.168.1.31:6384
   slots: (0 slots) slave
   replicates 89228357317c6b7d6850fffa2f0819085def1a2f
S: d8f0436ada2c423bc07d8cba38461eb3bb00ca3a 192.168.1.31:6385
   slots: (0 slots) slave
   replicates c2436c65625e1d74d8ea5bde328df04699d494e9
S: 5b40728c470dac59556f7b51866e590e9038bbd9 192.168.1.31:6386
   slots: (0 slots) slave
   replicates cd9a9149593770a920258bf75e1235ca4b904cd5
M: c2436c65625e1d74d8ea5bde328df04699d494e9 192.168.1.31:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: 89228357317c6b7d6850fffa2f0819085def1a2f 192.168.1.31:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

你可能感兴趣的:(docker,redis,容器)