[root@docker redis-node-1]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
--net 172.168.000,000
[root@docker ~]# docker run -d --name redis-node-1 --net host --privileged=true -v /data/redis/share/redis-node-1:/data redis:latest --cluster-enabled yes --appendonly yes --port 6381
[root@docker ~]# docker run -d --name redis-node-2 --net host --privileged=true -v /data/redis/share/redis-node-2:/data redis:latest --cluster-enabled yes --appendonly yes --port 6382
[root@docker ~]# docker run -d --name redis-node-3 --net host --privileged=true -v /data/redis/share/redis-node-3:/data redis:latest --cluster-enabled yes --appendonly yes --port 6383
[root@docker ~]# docker run -d --name redis-node-4 --net host --privileged=true -v /data/redis/share/redis-node-4:/data redis:latest --cluster-enabled yes --appendonly yes --port 6384
[root@docker ~]# docker run -d --name redis-node-5 --net host --privileged=true -v /data/redis/share/redis-node-5:/data redis:latest --cluster-enabled yes --appendonly yes --port 6385
[root@docker ~]# docker run -d --name redis-node-6 --net host --privileged=true -v /data/redis/share/redis-node-6:/data redis:latest --cluster-enabled yes --appendonly yes --port 6386
docker exec -it redis-node-1 /bin/bash
//注意,进入docker容器后才能执行一下命令,且注意自己的真实ip地址,--cluster-replicas 1 表示为为每个master创建一个slave节点
redis-cli --cluster create 192.168.0.0:6381 192.168.0.0:6382 192.168.0.0:6383 192.168.0.0:6384 192.168.0.0:6385 192.168.0.0:6386 --cluster-replicas 1
这个命令的含义是:
redis-cli
:运行 Redis 命令行客户端。--cluster create
:以集群创建模式运行 Redis 命令行客户端。--cluster-replicas 1
:指定每个主节点的复制品数量为 1,即每个主节点都有一个对应的从节点。请确保在运行该命令之前已经启动了六个对应的 Redis 容器,并且这些容器处于可访问的状态。此外,还需要注意集群的配置和部署步骤,以确保正确设置 Redis 集群。
[root@docker share]# docker exec -it redis-node-1 /bin/bash
root@docker:/data# redis-cli --cluster create 1192.168.0.0:6381 192.168.0.0:6382 192.168.0.0:6383 192.168.0.0:6384 192.168.0.0:6385 192.168.0.0:6386 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 192.168..0.0:6385 to 192.168.0.0:6381
Adding replica 192.168.0.0:6386 to 192.168.0.0:6382
Adding replica 192.168.0.0.:6384 to 192.168.0.0:6383
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: c4b154db9f9afaf5cbec7358c3f0ed2215f529 192.168.0.0:6381
slots:[0-5460] (5461 slots) master
M: 2ce3e1eabd023d3a9e7206bb20b521741e4204 192.168.0.0:6382
slots:[5461-10922] (5462 slots) master
M: e58d7f988509663440af1c29fcf9cf2211876 192.168.0.0:6383
slots:[10923-16383] (5461 slots) master
S: 11a25d98c18861376d69064ea6ec6e40fae8d 192.168.0.0:6384
replicates e58d7f988509440af1c2cf69fcf9cf2211876
S: 96565be50f438fd485c7073bd45a0e64d5f14227 192.168.0.0:6385
replicates c4b154db9f9afacbec7355a8c3f0ed2215f529
S: 8c90df02c76fb3740455ac3bf9aa9171112c7e 192.168.0.0:6386
replicates 2ce3e1eabd02a9e7206b6cb20b521741e4204
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
.
>>> Performing Cluster Check (using node 192.168.0.0:6381)
M: c4b154db9f9afaf5cbec7353f0ed2215f529 192.168.0.0:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: 8c90df02c76fb3740455ac3b7aa9171112c7e 192.168.0.0:6386
slots: (0 slots) slave
replicates 2ce3e1eabd02a9e7206b6cb20b521741e4204
M: 2ce3e1eabd023d3a9e7206b20b521741e4204 192.168.0.0:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
M: e58d7f988509663440af1c2cf6f9cf2211876 192.168.0.0:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: 96565be50f438fd485c7073bd45a0e64d5f14227 192.168.0.0:6385
slots: (0 slots) slave
replicates c4b154db9f9afafec7355a8c3f0ed2215f529
S: 11a25d98c18861376d69064b50ec6e40fae8d 192.168.0.0:6384
slots: (0 slots) slave
replicates e58d7f909663440af1c2cf69fcf9cf2211876
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@docker:/data#
root@docker:/data# redis-cli -p 6381
127.0.0.1:6381> keys *
(empty array)
127.0.0.1:6381> cluster info
cluster_state:ok //状态
cluster_slots_assigned:16384 //已经分配的槽位
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6 //已经知道的节点
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:221
cluster_stats_messages_pong_sent:237
cluster_stats_messages_sent:458
cluster_stats_messages_ping_received:232
cluster_stats_messages_pong_received:221
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:458
total_cluster_links_buffer_limit_exceeded:0
127.0.0.1:6381> cluster nodes
8c90df02c76fb3740455ac3bf9a9171112c7e 192.168.0.0:6386@16386 slave 2ce3e1eabd023d3a9e7206b6cb2021741e4204 0 1695704760496 2 connected
2ce3e1eabd023d3a9e7206b6cb201741e4204 192.168.0.0:6382@16382 master - 0 1695704762554 2 connected 5461-10922
c4b154db9f9afaf5cbec7355a8cd2215f529 192.168.0.0:6381@16381 myself,master - 0 1695704761000 1 connected 0-5460
e58d7f98850966344f1c2cf69fcf9cf2211876 192.168.0.0:6383@16383 master - 0 1695704761537 3 connected 10923-16383
96565be50f43885c7073bd45a0e64d5f14227 192.168.0.0:6385@16385 slave c4b154db9f9acbec7355a8c3f0ed2215f529 0 1695704762000 1 connected
11a25d98c18376d69064b50ea6ec6e40fae8d 192.168.0.0:6384@16384 slave e58d7f988509663440acf69fcf9cf2211876 0 1695704760000 3 connected
127.0.0.1:6381>