本工具依赖于我的probuf序列化、反序列化工具类,参见:https://mp.csdn.net/postedit/82427119
使用到的技术点:泛型返回值,应用redisson实现分布式锁,redis哨兵部署配置,redis集群部署配置等。
/**
* Copyright [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package framework.webapp.commons.utils;
import framework.webapp.commons.model.AjaxJson;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.redisson.Redisson;
import org.redisson.api.RBucket;
import org.redisson.api.RLock;
import org.redisson.api.RedissonClient;
import org.redisson.config.Config;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
/**
* **************************************************
* @description redis工具类,底层使用redisson
* @author karl
* @version 1.0, 2018-08-17
* @see HISTORY
*
* Date Desc Author Operation
*
*
* 2018-8-17 创建文件 karl create
*
* @since 2017 Phyrose Science & Technology (Kunming) Co., Ltd.
* ************************************************
*/
public class RedisUtil {
private static final Log log = LogFactory.getLog(RedisUtil.class);
private static final String DISTRIBUTED_LOCK_FLAG = "DistributedLock_";
private static volatile RedissonClient redissonManager;
public static RedissonClient getRedissonManager() {
if (redissonManager == null) {
synchronized (DISTRIBUTED_LOCK_FLAG) {
if (redissonManager == null) {
//使用配置文件
ApplicationContext applicationContext = new ClassPathXmlApplicationContext("classpath:spring-redisson.xml");
redissonManager = (RedissonClient) applicationContext.getBean("redissonManager");
//使用spring容器
//redissonManager = ApplicationContextUtil.getBean("redissonManager");
}
}
}
return redissonManager;
}
//
// private static Redisson getRedissonByteArray() {
// if (redissonByteArray == null) {
// synchronized (REDISSON_BYTEARRAY_LOCK) {
// if (redissonByteArray == null) {
// redissonByteArray = ApplicationContextUtil.getBean("redissonManager");
//}
// }
// }
// return redissonByteArray;
// }
/**
* 根据name进行上锁操作,Redisson分布式锁 阻塞的,采用的机制发布/订阅
* 自定义锁超时
* {@code
* try {
* RedisUtil.lock(1, TimeUnit.MINUTES);
* // manipulate protected state
* } finally {
* RedisUtil.unlock();
* }
* }
*
*
*
* If the lock is not available then the current thread becomes disabled for
* thread scheduling purposes and lies dormant until the lock has been
* acquired.
*
* If the lock is acquired, it is held until unlock
is invoked,
* or until leaseTime milliseconds have passed since the lock was granted -
* whichever comes first.
*
* @param lockname
*
* @param leaseTime the maximum time to hold the lock after granting it,
* before automatically releasing it if it hasn't already been released by
* invoking unlock
. If leaseTime is -1, hold the lock until
* explicitly unlocked.
* @param unit the time unit of the {@code leaseTime} argument
*
*/
public static void lock(String lockname, long leaseTime, TimeUnit unit) {
String key = DISTRIBUTED_LOCK_FLAG + lockname;
RLock lock = getRedissonManager().getLock(key);
//lock提供带timeout参数,timeout结束强制解锁,防止死锁 :1分钟
lock.lock(leaseTime, unit);
}
/**
* Redisson分布式锁 根据name进行解锁操作
* 与lock一一对应
*
{@code
* try {
* RedisUtil.lock(1, TimeUnit.MINUTES);
* // manipulate protected state
* } finally {
* RedisUtil.unlock();
* }
* }
*
*
* @param lockname
*/
public static void unlock(String lockname) {
String key = DISTRIBUTED_LOCK_FLAG + lockname;
RLock lock = getRedissonManager().getLock(key);
lock.unlock();
}
/**
* 根据name进行上锁操作,Redisson分布式锁 阻塞的,采用的机制发布/订阅,锁超时:默认1分钟自动释放锁
* {@code
* try {
* RedisUtil.lock();
* // manipulate protected state
* } finally {
* RedisUtil.unlock();
* }
* }
*
*
* @param lockname
*/
public static void lock(String lockname) {
String key = DISTRIBUTED_LOCK_FLAG + lockname;
RLock lock = getRedissonManager().getLock(key);
//lock提供带timeout参数,timeout结束强制解锁,防止死锁 :1分钟
lock.lock(1, TimeUnit.MINUTES);
}
/**
* 从Redis获取key为name的数据,并封装为指定class实例,可能发生运行时异常
*
* @param 泛型支持
* @param clazz
* @param name
* @return
*/
public static T get(Class clazz, String name) {
RBucket keyObject = getRedissonManager().getBucket(name);
byte[] bytes = keyObject.get();
if (bytes == null) {
return null;
}
return SerializationUtil.deserialize(clazz, keyObject.get());
}
/**
* 根据name清除数据
*
* @param name
*/
public static void remove(String name) {
getRedissonManager().getBucket(name).delete();
}
/**
* 缓存数据到Redis,无超时
*
* @param name
* @param value
*/
public static void put(String name, Object value) {
RBucket keyObject = getRedissonManager().getBucket(name);
keyObject.set(SerializationUtil.serialize(value));
}
/**
* 缓存数据到Redis
*
* @param name
* @param value
* @param timeToLive
* @param timeUnit
*/
public static void put(String name, Object value, long timeToLive, TimeUnit timeUnit) {
RBucket keyObject = getRedissonManager().getBucket(name);
keyObject.set(SerializationUtil.serialize(value), timeToLive, timeUnit);
}
/**
* 按分钟(计算超时)缓存数据
*
* @param name
* @param value
* @param timeToLive
*/
public static void putCacheWithMinutes(String name, Object value, long timeToLive) {
RBucket keyObject = getRedissonManager().getBucket(name);
keyObject.set(SerializationUtil.serialize(value), timeToLive, TimeUnit.MINUTES);
}
public static void main(String[] args) {
ApplicationContext applicationContext = new ClassPathXmlApplicationContext("classpath:spring-redisson.xml");
RedissonClient redisson = (RedissonClient) applicationContext.getBean("redissonManager");
// 首先获取redis中的key-value对象,key不存在没关系
// System.out.println(get(UserEntity.class, DISTRIBUTED_LOCK_FLAG));
put("keyBytes", "keyBytes");
System.out.println(get(String.class, "keyBytes"));
//RBucket keyObject = redisson.getBucket("key");
//RBucket keyObject = redisson.getBucket("keyBytes");
//System.out.println("ok:" + keyObject.get());
// 如果key存在,就设置key的值为新值value
// 如果key不存在,就设置key的值为value
// keyObject.set("value".getBytes());
redisson.shutdown();
}
}
maven依赖:spring mvc,apache log
配置文件:
spring-redisson.xml
xmlns:context="http://www.springframework.org/schema/context"
xmlns:redisson="http://redisson.org/schema/redisson"
xsi:schemaLocation="
http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context.xsd
http://redisson.org/schema/redisson
http://redisson.org/schema/redisson/redisson.xsd">
redis哨兵配置示例sentinel-26379.conf:
##sentinel实例之间的通讯端口
daemonize yes
port 26379
#redis-master
sentinel myid 9028ea0dc586c123e58dca922f4ce406e7666cb6
sentinel monitor redis-master 127.0.0.1 6379 1
sentinel down-after-milliseconds redis-master 5000
sentinel failover-timeout redis-master 900000
#sentinel auth-pass redis-master 123456
logfile "./sentinel.log"
redis集群从配置示例redis16379-slave.conf:
bind 127.0.0.1
protected-mode yes
port 16379
tcp-backlog 511
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output.
logfile ""
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
#
# save
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving completely by commenting out all "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
dir ./
slaveof 127.0.0.1 6379
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets the replication timeout for:
#
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
# 2) Master timeout from the point of view of slaves (data, pings).
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
repl-disable-tcp-nodelay no
slave-priority 100
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# If Redis is to be used as an in-memory-only cache without any kind of
# persistence, then the fork() mechanism used by the background AOF/RDB
# persistence is unnecessary. As an optimization, all persistence can be
# turned off in the Windows version of Redis. This will redirect heap
# allocations to the system heap allocator, and disable commands that would
# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF.
# This flag may not be combined with any of the other flags that configure
# AOF and RDB operations.
# persistence-available [(yes)|no]
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# WARNING: not setting maxmemory will cause Redis to terminate with an
# out-of-memory exception if the heap limit is reached.
#
# NOTE: since Redis uses the system paging file to allocate the heap memory,
# the Working Set memory usage showed by the Windows Task Manager or by other
# tools such as ProcessExplorer will not always be accurate. For example, right
# after a background save of the RDB or the AOF files, the working set value
# may drop significantly. In order to check the correct amount of memory used
# by the redis-server to store the data, use the INFO client command. The INFO
# command shows only the memory used to store the redis data, not the extra
# memory used by the Windows process for its own requirements. Th3 extra amount
# of memory not reported by the INFO command can be calculated subtracting the
# Peak Working Set reported by the Windows Task Manager and the used_memory_peak
# reported by the INFO command.
#
# maxmemory
appendonly no
# The name of the append only file (default: "appendonly.aof")
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
# instead of waiting for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log. Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
附记:
为方便读者测试redis哨兵部署和集群部署效果,共享一个一键本地启动java工具类,配置文件请参考如上所示,并修改相应端口(哨兵部署需要复制并修改生成3个,集群部署需要复制并修改生成6个)。
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* Created by dufy on 2017/3/28.
*
* cmd /c dir 是执行完dir命令后关闭命令窗口。
* cmd /k dir 是执行完dir命令后不关闭命令窗口.
* cmd /c start dir 会打开一个新窗口后执行dir指令,原窗口会关闭。
* cmd /k start dir 会打开一个新窗口后执行dir指令,原窗口不会关闭。
* redis-cli.exe -h 127.0.0.1 -p 端口
* info replication -- 查看主从复制
* info sentinel-- 查看哨兵情况
*
* window本地搭redis的哨兵模式:http://blog.csdn.net/liuchuanhong1/article/details/53206028
*
* 启动服务工具类
*/
public class StartRedisServer {
private final static String redisRootPath = "E:/Redis-x64-3.2.100";
public static void main(String[] args) {
List cmds = new ArrayList();
//三主三从集群模式
String cmdRedis6379 = "cmd /k start redis-server.exe redis6379.conf ";//redis-server.exe master redis.conf
String cmdRedis16379 = "cmd /k start redis-server.exe redis16379-slave.conf ";//redis-server.exe slave redis.conf
String cmdRedis6380 = "cmd /k start redis-server.exe redis6380.conf ";//redis-server.exe master redis.conf
String cmdRedis16380 = "cmd /k start redis-server.exe redis16380-slave.conf ";//redis-server.exe slave redis.conf
String cmdRedis6381 = "cmd /k start redis-server.exe redis6381.conf ";//redis-server.exe master redis.conf
String cmdRedis16381 = "cmd /k start redis-server.exe redis16381-slave.conf ";//redis-server.exe slave redis.conf
// cmds.add(cmdRedis6379);
// cmds.add(cmdRedis16389);
// cmds.add(cmdRedis6380);
// cmds.add(cmdRedis16380);
// cmds.add(cmdRedis6381);
// cmds.add(cmdRedis16381);
//哨兵模式
String cmdRedis26379 = "cmd /k start redis-server.exe sentinel-26379.conf --sentinel";//redis-server.exe sentinel26479.conf --sentinel
String cmdRedis26479 = "cmd /k start redis-server.exe sentinel-26380.conf --sentinel";//redis-server.exe sentinel26479.conf --sentinel
String cmdRedis26579 = "cmd /k start redis-server.exe sentinel-26381.conf --sentinel";//redis-server.exe sentinel26479.conf --sentinel
cmds.add(cmdRedis26379);
cmds.add(cmdRedis26479);
cmds.add(cmdRedis26579);
initRedisServer(cmds);
}
public static void initRedisServer(List cmdStr) {
if (cmdStr != null && cmdStr.size() > 0) {
for (String cmd : cmdStr) {
try {
Process exec = Runtime.getRuntime().exec(cmd, null, new File(redisRootPath));
Thread.sleep(1 * 1000);
} catch (InterruptedException e) {
System.out.println("线程中断异常" + e.getMessage());
e.printStackTrace();
} catch (IOException e) {
System.out.println("cmd command error" + e.getMessage());
e.printStackTrace();
}
}
}
}
}