git clone https://github.com/ucloud/redis-cluster-operator.git
yum install unzip
unzip redis-cluster-operator.git
tree看下目录结构
CCE要在界面创建项目
[root@work-master1 operator]# kubectl create namespace redis-cluster
1、修改namespace
[root@work-master1 crds]# cat redis.kun_distributedredisclusters_crd.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: distributedredisclusters.redis.kun
nammespace: redis-cluster
spec:
2、创建yaml文件
[root@work-master1 crds]# pwd
/root/zhs/redis/operator/redis-cluster-operator-master/deploy/crds
[root@work-master1 crds]# tree
.
├── redis.kun_distributedredisclusters_crd.yaml
└── redis.kun_redisclusterbackups_crd.yaml
kubectl apply -f deploy/crds/
[root@work-master1 cluster]# pwd
/root/zhs/redis/operator/redis-cluster-operator-master/deploy/cluster
[root@work-master1 cluster]# tree
.
├── cluster_role_binding.yaml
├── cluster_role.yaml
└── operator.yaml
1、修改命名空间
2、创建yanl文件
cd deploy
kubectl apply -f service_account.yaml
cd deploy/cluster/
kubectl apply -f cluster_role.yaml
kubectl apply -f cluster_role_binding.yaml
kubectl apply -f operator.yaml
kubectl get pod -n redis-cluster
cat redis-cluster.yaml
apiVersion: redis.kun/v1alpha1
kind: DistributedRedisCluster
metadata:
annotations:
# if your operator run as cluster-scoped, add this annotations
redis.kun/scope: cluster-scoped
name: example-distributedrediscluster
namespace: redis-cluster
spec:
image: redis:5.0.4-alpine
imagePullPolicy: IfNotPresent
masterSize: 3 #master节点数量
clusterReplicas: 1 #每个master节点的从节点数量
serviceName: redis-svc
# resources config
resources:
limits:
cpu: 300m
memory: 200Mi
requests:
cpu: 200m
memory: 150Mi
# pv storage
storage:
type: persistent-claim
size: 1Gi
class: copaddon-zhanghsn-poc
deleteClaim: true
kubectl apply -f redis-cluster.yaml
=============================================================
root@work-master1 deploy]# kubectl get pod -n redis-cluster
NAME READY STATUS RESTARTS AGE
drc-example-distributedrediscluster-0-0 1/1 Running 0 41m
drc-example-distributedrediscluster-0-1 1/1 Running 0 41m
drc-example-distributedrediscluster-1-0 1/1 Running 0 41m
drc-example-distributedrediscluster-1-1 1/1 Running 0 40m
drc-example-distributedrediscluster-2-0 1/1 Running 0 41m
drc-example-distributedrediscluster-2-1 1/1 Running 0 41m
redis-cluster-operator-764f4f9bb7-5kmnt 1/1 Running 0 41m
[root@work-master1 deploy]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
ceph-csi-cephfs cephfs.csi.ceph.com Delete Immediate true 59d
copaddon-nfs copaddon-nfs Delete Immediate false 61d
copaddon-zhanghsn-poc copaddon-zhanghsn-poc Delete Immediate false 24h
csi-local everest-csi-provisioner Delete Immediate false 62d
csi-local-topology everest-csi-provisioner Delete WaitForFirstConsumer false 62d
[root@work-master1 deploy]# kubectl get pvc -n redis-cluster
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
redis-data-drc-example-distributedrediscluster-0-0 Bound pvc-50157068-c31c-4aad-b33e-08a085a186f4 2Gi RWO copaddon-zhanghsn-poc 71m
redis-data-drc-example-distributedrediscluster-0-1 Bound pvc-207d7a9a-380d-4089-9424-013aecaac497 2Gi RWO copaddon-zhanghsn-poc 70m
redis-data-drc-example-distributedrediscluster-1-0 Bound pvc-8f94c04a-60af-4858-b8a4-19e7bda2b020 2Gi RWO copaddon-zhanghsn-poc 71m
redis-data-drc-example-distributedrediscluster-1-1 Bound pvc-e7be9b40-4f78-4326-a33b-921ef202d13d 2Gi RWO copaddon-zhanghsn-poc 70m
redis-data-drc-example-distributedrediscluster-2-0 Bound pvc-cbf0966c-b335-46b2-a8c8-21ec8a4dca92 2Gi RWO copaddon-zhanghsn-poc 71m
redis-data-drc-example-distributedrediscluster-2-1 Bound pvc-851f2e41-a3e9-4da0-b4c1-98abb7efe0a4 2Gi RWO copaddon-zhanghsn-poc 70m
apiVersion: v1
data:
fix-ip.sh: |-
#!/bin/sh
CLUSTER_CONFIG="/data/nodes.conf"
if [ -f ${CLUSTER_CONFIG} ]; then
if [ -z "${POD_IP}" ]; then
echo "Unable to determine Pod IP address!"
exit 1
fi
echo "Updating my IP to ${POD_IP} in ${CLUSTER_CONFIG}"
sed -i.bak -e "/myself/ s/ .*:6379@16379/ ${POD_IP}:6379@16379/" ${CLUSTER_CONFIG}
fi
exec "$@"
redis.conf: |-
cluster-enabled yes
cluster-config-file /data/nodes.conf
cluster-node-timeout 10000
protected-mode no
daemonize no
pidfile /var/run/redis.pid
port 6379
tcp-backlog 511
bind 0.0.0.0
timeout 3600
tcp-keepalive 1
loglevel verbose
logfile /data/redis.log
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data
#requirepass yl123456
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
lua-time-limit 20000
slowlog-log-slower-than 10000
slowlog-max-len 128
#rename-command FLUSHALL ""
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-entries 512
list-max-ziplist-value 64
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
shutdown.sh: "#!/bin/sh\nCLUSTER_CONFIG=\"/data/nodes.conf\"\nfailover() {\n\techo
\"Do CLUSTER FAILOVER\"\n\tmasterID=$(cat ${CLUSTER_CONFIG} | grep \"myself\"
| awk '{print $1}')\n\techo \"Master: ${masterID}\"\n\tslave=$(cat ${CLUSTER_CONFIG}
| grep ${masterID} | grep \"slave\" | awk 'NR==1{print $2}' | sed 's/:6379@16379//')\n\techo
\"Slave: ${slave}\"\n\tpassword=$(cat /data/redis_password)\n\tif [[ -z \"${password}\"
]]; then\n\t\tredis-cli -h ${slave} CLUSTER FAILOVER\n\telse\n\t\tredis-cli -h
${slave} -a \"${password}\" CLUSTER FAILOVER\n\tfi\n\techo \"Wait for MASTER <->
SLAVE syncFinished\"\n\tsleep 20\n}\nif [ -f ${CLUSTER_CONFIG} ]; then\n\tcat
${CLUSTER_CONFIG} | grep \"myself\" | grep \"master\" && \\\n\tfailover\nfi"
kind: ConfigMap
metadata:
labels:
managed-by: redis-cluster-operator
redis.kun/name: example-distributedrediscluster
name: redis-cluster-example-distributedrediscluster-bak
namespace: redis-cluster
[root@work-master1 operator]# kubectl exec -it -n redis-cluster drc-example-distributedrediscluster-2-0 -- sh
/data # redis-cli -c -h redis-svc
redis-svc:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:5
cluster_my_epoch:0
cluster_stats_messages_ping_sent:2697
cluster_stats_messages_pong_sent:2797
cluster_stats_messages_meet_sent:1
cluster_stats_messages_sent:5495
cluster_stats_messages_ping_received:2797
cluster_stats_messages_pong_received:2694
cluster_stats_messages_received:5491
redis-svc:6379>
验证方式
1、 kubectl delete pod -n redis-cluster --all
等待pod重启进入pod看集群状态是否是ok
2、kubectl delete pod -n redis-cluster <namsterpod>
查看集群是否还是正常状态
3、先set数据,再kubectl delete pod -n redis-cluster --all
恢复看数据在不在
for pv in `kubectl get pvc -n redis-cluster |awk 'NR>=2{print $3}'` ; do kubectl -n redis-cluster patch pv $pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' ;done
1、平时不要delete --all
在使用operator部署的redis集群,在删除集群数据会被删除包括dump.rdb文件,所以一般不用去delete --all,或者删除前进行数据备份
2、configmap问题
operator创建的cm修改后悔自动恢复原来的yaml文件
#这里的骚操作
把configmap重新创建一个,修改sts中cm的挂在
问题描述
operator部署完redis,测试时候发现delete --all完pod起来,redis集群式ok,但是数据没有
问题定位
1、在用户进行delete --all时候可能operator默认用户不用了就会删除数据包括pv,但是感觉不合理,pv的数据应该不能删除
2、在查看redis的配置是发现是空文件,configmap中redis.conf是空的,修改完出现一个现象,它会恢复到以前,所以这也是个问题
问题解决
1、修改pv成都策略为Retain
for pv in `kubectl get pvc -n redis-cluster |awk 'NR>=2{print $3}'` ; do kubectl -n redis-cluster patch pv $pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' ;done
2、重新创建一个configmap,修改sts中configmap的挂载为新创建的configmap
总结
进过上面操作重新测试,delete --all完,pod为running进入pod执行keys *,发现数据在,问题解决