问题说明:
1分片作用:
a可以动态地实现内存扩容
b可以将数据分散保存
2哨兵作用:
可以实现redis高可用(简称ha)
Redis集群实现
说明:采用redis集群配置,在集群内部实现通信和选举,同时实现分片的扩容和redis高可用性
脑裂:
1主2从-主机宕机-2从平票3次-出现两主(一个链接访问多台主机)
因为选举机制投票平票结果导致出现多台主机。(整个服务都断掉了,而一个请求无法发往两个主机)。这种现象称之为脑裂。
解决机制:让节点个数增加(保证超半数即可)
集群搭建步骤
划分集群规模
小规模:3主6从
端口:7000-7008共9台
先关闭已有的服务
[root@localhost sentinel]# ps -ef|grep redis
root 6362 1 0 15:45 ? 00:00:32 redis-server *:6380
root 6366 1 0 15:45 ? 00:00:43 redis-server *:6381
root 6601 1 0 16:26 ? 00:00:30 redis-server *:6379
root 8185 7688 0 20:15 pts/1 00:00:00 grep redis
[root@localhost sentinel]# kill -9 6362
[root@localhost sentinel]# kill -9 6366
[root@localhost sentinel]# kill -9 6601
[root@localhost sentinel]# ps -ef|grep redis
root 8189 7688 0 20:16 pts/1 00:00:00 grep redis
[root@localhost sentinel]# cd ..
[root@localhost redis-3.2.8]# mkdir cluster
[root@localhost redis-3.2.8]# ls
00-RELEASENOTES CONTRIBUTING dump.rdb MANIFESTO redis.conf~ runtest-sentinel shards utils
BUGS COPYING INSTALL README.md runtest sentinel src
cluster deps Makefile redis.conf runtest-cluster sentinel.conf tests
[root@localhost redis-3.2.8]# cd cluster/
[root@localhost cluster]# ls
[root@localhost cluster]# mkdir 7000
[root@localhost cluster]# ls
7000
[root@localhost cluster]# cd ../
1复制redis.conf文件到7000下
[root@localhost redis-3.2.8]# cp redis.conf cluster/7000
[root@localhost redis-3.2.8]# cd cluster/
[root@localhost cluster]# cd 7000/
[root@localhost 7000]# vim redis.conf
2关闭aof使用rdb模式 593行
appendonly no
3取消IP绑定 61行前加#
# bind 127.0.0.1 ::1
4关闭保护模式 80行
protected-mode no
5改端口号 84行
port 7000
6开启后台启动, 128行 如果不开启一启动就出图标。
daemonize yes
7修改pid的位置到 自己建的文件夹下,注意要绝对路径 150行
任何服务都有pid文件,文件里有当前服务占用的pid号。启用9台机器文件名不能重复。为了维护方便放在7000文件夹下,要用绝对路径
打开另一个虚拟机连接窗口,先复制文件路径,pwd看一下
[root@bogon 7000]# pwd
/usr/local/src/redis-3.2.8/cluster/7000
再改
pidfile /usr/local/src/redis-3.2.8/cluster/7000/redis_7000.pid
8修改持久化文件的路径 dir ./
为自己的路径 247行,注意7000后不要加/
dir /usr/local/src/redis-3.2.8/cluster/7000
9修改内存策略 复制543行的 allkeys-lru
覆盖 560行的# maxmemory-policy noeviction
内存满了,采用lru算法进行编辑
maxmemory-policy allkeys-lru
10开启集群配置 删去前面的#
721行
因为默认是不开集群的
cluster-enabled yes
11开启集群配置文件 729行
集群都会通过xxx.conf定义,所以要改原来的# cluster-config-file nodes-6379.conf
为下面的
cluster-config-file nodes-7000.conf
12修改集群推选时间,735行 删除原来的#
集群连不通,默认15s开始推选
cluster-node-timeout 15000
进行退出保存
:wq
[root@localhost 7000]# cd ..
[root@localhost cluster]# ls
7000
[root@localhost cluster]# cp -r 7000 7001
[root@localhost cluster]# cp -r 7000 7002
[root@localhost cluster]# cp -r 7000 7003
[root@localhost cluster]# cp -r 7000 7004
[root@localhost cluster]# cp -r 7000 7005
[root@localhost cluster]# cp -r 7000 7006
[root@localhost cluster]# cp -r 7000 7007
[root@localhost cluster]# cp -r 7000 7008
[root@localhost cluster]# ls
7000 7001 7002 7003 7004 7005 7006 7007 7008
[root@localhost cluster]# vim 7001/redis.conf
匹配全部的7000改成7001\g
表示全文修改
:%s/7000/7001/g
[root@localhost cluster]# vim 7001/redis.conf
[root@localhost cluster]# vim 7002/redis.conf
:%s/7000/7002/g
[root@localhost cluster]# vim 7003/redis.conf
:%s/7000/7003/g
[root@localhost cluster]# vim 7004/redis.conf
:%s/7000/7004/g
[root@localhost cluster]# vim 7005/redis.conf
:%s/7000/7005/g
[root@localhost cluster]# vim 7006/redis.conf
:%s/7000/7006/g
[root@localhost cluster]# vim 7007/redis.conf
:%s/7000/7007/g
[root@localhost cluster]# vim 7008/redis.conf
:%s/7000/7008/g
创建并编辑shell启动脚本start.sh
[root@bogon cluster]# ls
7000 7001 7002 7003 7004 7005 7006 7007 7008
[root@bogon cluster]# vim start.sh
最后一个&
保留
#!/bin/sh
redis-server 7000/redis.conf &
redis-server 7001/redis.conf &
redis-server 7002/redis.conf &
redis-server 7003/redis.conf &
redis-server 7004/redis.conf &
redis-server 7005/redis.conf &
redis-server 7006/redis.conf &
redis-server 7007/redis.conf &
redis-server 7008/redis.conf &
保存
:wq
关闭脚本stop.sh可以这么写,这里暂时不创建
#!/bin/sh
redis-cli -p 7000 shutdown &
redis-cli -p 7001 shutdown &
redis-cli -p 7002 shutdown &
redis-cli -p 7003 shutdown &
redis-cli -p 7004 shutdown &
redis-cli -p 7005 shutdown &
redis-cli -p 7006 shutdown &
redis-cli -p 7007 shutdown &
redis-cli -p 7008 shutdown &
查看一下
[root@bogon cluster]# ls
7000 7001 7002 7003 7004 7005 7006 7007 7008 start.sh
开启redis
[root@bogon cluster]# sh start.sh
检查
[root@bogon cluster]# ps -ef|grep redis
root 2538 1 0 08:25 ? 00:00:06 /usr/bin/gnome-terminal -x /bin/sh -c cd '/usr/local/src/redis-3.2.8' && exec $SHELL
root 9180 1 0 10:55 ? 00:00:00 redis-server *:7004 [cluster]
root 9181 1 0 10:55 ? 00:00:00 redis-server *:7005 [cluster]
root 9182 1 0 10:55 ? 00:00:00 redis-server *:7006 [cluster]
root 9183 1 0 10:55 ? 00:00:00 redis-server *:7002 [cluster]
root 9184 1 0 10:55 ? 00:00:00 redis-server *:7007 [cluster]
root 9185 1 0 10:55 ? 00:00:00 redis-server *:7003 [cluster]
root 9186 1 0 10:55 ? 00:00:00 redis-server *:7008 [cluster]
root 9187 1 0 10:55 ? 00:00:00 redis-server *:7001 [cluster]
root 9188 1 0 10:55 ? 00:00:00 redis-server *:7000 [cluster]
root 9208 6044 0 10:55 pts/2 00:00:00 grep redis
然后就是通过rugby创建redis集群
检查是否安装rugby,进入redis的src,看有没有redis-trib.rb
[root@bogon src]# cd redis-3.2.8/
[root@bogon redis-3.2.8]# cd src
[root@bogon src]# ls
adlist.c bio.c crc64.h geo.o Makefile pubsub.c redis-check-aof.c rio.c sha1.c t_hash.o ziplist.o
adlist.h bio.h crc64.o help.h Makefile.dep pubsub.o redis-check-aof.o rio.h sha1.h t_list.c zipmap.c
adlist.o bio.o db.c hyperloglog.c memtest.c quicklist.c redis-check-rdb rio.o sha1.o t_list.o zipmap.h
ae.c bitops.c db.o hyperloglog.o memtest.o quicklist.h redis-check-rdb.c scripting.c slowlog.c t_set.c zipmap.o
ae_epoll.c bitops.o debug.c intset.c mkreleasehdr.sh quicklist.o redis-check-rdb.o scripting.o slowlog.h t_set.o zmalloc.c
ae_evport.c blocked.c debugmacro.h intset.h multi.c rand.c redis-cli sdsalloc.h slowlog.o t_string.c zmalloc.h
ae.h blocked.o debug.o intset.o multi.o rand.h redis-cli.c sds.c solarisfixes.h t_string.o zmalloc.o
ae_kqueue.c cluster.c dict.c latency.c networking.c rand.o redis-cli.o sds.h sort.c t_zset.c
ae.o cluster.h dict.h latency.h networking.o rdb.c redis-sentinel sds.o sort.o t_zset.o
ae_select.c cluster.o dict.o latency.o notify.c rdb.h redis-server sentinel.c sparkline.c util.c
anet.c config.c endianconv.c lzf_c.c notify.o rdb.o redis-trib.rb sentinel.o sparkline.h util.h
anet.h config.h endianconv.h lzf_c.o object.c redisassert.h release.c server.c sparkline.o util.o
anet.o config.o endianconv.o lzf_d.c object.o redis-benchmark release.h server.h syncio.c valgrind.sup
aof.c crc16.c fmacros.h lzf_d.o pqsort.c redis-benchmark.c release.o server.o syncio.o version.h
aof.o crc16.o geo.c lzf.h pqsort.h redis-benchmark.o replication.c setproctitle.c testhelp.h ziplist.c
asciilogo.h crc64.c geo.h lzfP.h pqsort.o redis-check-aof replication.o setproctitle.o t_hash.c ziplist.h
更改集群搭建命令,
记得更改ip为自己的,replicas表示每个节点跟两个从,一组九台集群,那么就是9/3=3 那么就是分3片
./src/redis-trib.rb create --replicas 2 192.168.216.200:7000 192.168.216.200:7001 192.168.216.200:7002 192.168.216.200:7003 192.168.216.200:7004 192.168.216.200:7005 192.168.216.200:7006 192.168.216.200:7007 192.168.216.200:7008
在redis根目录下运行,如
[root@bogon src]# cd ../
[root@bogon redis-3.2.8]# ./src/redis-trib.rb create --replicas 2 192.168.6.129:7000 192.168.6.129:7001 192.168.6.129:7002 192.168.6.129:7003 192.168.6.129:7004 192.168.6.129:7005 192.168.6.129:7006 192.168.6.129:7007 192.168.6.129:7008
如果出现
[root@bogon redis-3.2.8]# ./src/redis-trib.rb create --replicas 2 192.168.6.129:7000 192.168.6.129:7001 192.168.6.129:7002 192.168.6.129:7003 192.168.6.129:7004 192.168.6.129:7005 192.168.6.129:7006 192.168.6.129:7007 192.168.6.129:7008
>>> Creating cluster
>>> Performing hash slots allocation on 9 nodes...
Using 3 masters:
192.168.6.129:7000
192.168.6.129:7001
192.168.6.129:7002
Adding replica 192.168.6.129:7003 to 192.168.6.129:7000
Adding replica 192.168.6.129:7004 to 192.168.6.129:7000
Adding replica 192.168.6.129:7005 to 192.168.6.129:7001
Adding replica 192.168.6.129:7006 to 192.168.6.129:7001
Adding replica 192.168.6.129:7007 to 192.168.6.129:7002
Adding replica 192.168.6.129:7008 to 192.168.6.129:7002
M: 46af3be33b0e9a9651c474888f75071d4d58fa79 192.168.6.129:7000
slots:0-5460 (5461 slots) master
M: 4499d3a5cbc53b3492d9a9a15844eb24d51deecc 192.168.6.129:7001
slots:5461-10922 (5462 slots) master
M: 1f9318fa8a341037de40df3f6ea121f0f1985742 192.168.6.129:7002
slots:10923-16383 (5461 slots) master
S: daa56cd1877c1994db4ed1fdbf21d5ca19718937 192.168.6.129:7003
replicates 46af3be33b0e9a9651c474888f75071d4d58fa79
S: 661be9722d61f19055daa49b4ed5f323f69d0741 192.168.6.129:7004
replicates 46af3be33b0e9a9651c474888f75071d4d58fa79
S: 253be281ae69c364aeb5e76d6221373fcbf48010 192.168.6.129:7005
replicates 4499d3a5cbc53b3492d9a9a15844eb24d51deecc
S: b99f5fbffe2eaf6c9ec0dd249577ec8bff372da7 192.168.6.129:7006
replicates 4499d3a5cbc53b3492d9a9a15844eb24d51deecc
S: cda1feb626fbb871304b20138cbfc8f67bcf06ea 192.168.6.129:7007
replicates 1f9318fa8a341037de40df3f6ea121f0f1985742
S: 31e4e8ee920c77cbe331cc43b7b2356ef701b590 192.168.6.129:7008
replicates 1f9318fa8a341037de40df3f6ea121f0f1985742
Can I set the above configuration? (type 'yes' to accept):
看到yes输入yes,回车
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join.....
>>> Performing Cluster Check (using node 192.168.6.129:7000)
M: 46af3be33b0e9a9651c474888f75071d4d58fa79 192.168.6.129:7000
slots:0-5460 (5461 slots) master
2 additional replica(s)
S: b99f5fbffe2eaf6c9ec0dd249577ec8bff372da7 192.168.6.129:7006
slots: (0 slots) slave
replicates 4499d3a5cbc53b3492d9a9a15844eb24d51deecc
S: 253be281ae69c364aeb5e76d6221373fcbf48010 192.168.6.129:7005
slots: (0 slots) slave
replicates 4499d3a5cbc53b3492d9a9a15844eb24d51deecc
M: 4499d3a5cbc53b3492d9a9a15844eb24d51deecc 192.168.6.129:7001
slots:5461-10922 (5462 slots) master
2 additional replica(s)
M: 1f9318fa8a341037de40df3f6ea121f0f1985742 192.168.6.129:7002
slots:10923-16383 (5461 slots) master
2 additional replica(s)
S: 31e4e8ee920c77cbe331cc43b7b2356ef701b590 192.168.6.129:7008
slots: (0 slots) slave
replicates 1f9318fa8a341037de40df3f6ea121f0f1985742
S: daa56cd1877c1994db4ed1fdbf21d5ca19718937 192.168.6.129:7003
slots: (0 slots) slave
replicates 46af3be33b0e9a9651c474888f75071d4d58fa79
S: cda1feb626fbb871304b20138cbfc8f67bcf06ea 192.168.6.129:7007
slots: (0 slots) slave
replicates 1f9318fa8a341037de40df3f6ea121f0f1985742
S: 661be9722d61f19055daa49b4ed5f323f69d0741 192.168.6.129:7004
slots: (0 slots) slave
replicates 46af3be33b0e9a9651c474888f75071d4d58fa79
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
看到两个[OK]集群搭建完成
检查一下
[root@bogon redis-3.2.8]# redis-cli -p 7000
127.0.0.1:7000> info replication
# Replication
role:master
connected_slaves:2
slave0:ip=192.168.6.129,port=7004,state=online,offset=295,lag=0
slave1:ip=192.168.6.129,port=7003,state=online,offset=295,lag=0
master_repl_offset:295
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:294
127.0.0.1:7000> exit
redis集群内部是按照槽的形式进行划分的,16384 slots就是16384 个槽道,存储的位置就那么多。一共归3个节点所有。没人理论上负责其中的三份之一。