使用lettuce和redisTemplate操作redis cluster踩坑日记

环境:虚拟机3主3从

1、关闭slave是对读写都没有影响的

2、关键就是关闭master,读写都会失败

虽说可以设置lettuce的拓扑自动更新,但是redis的slave变成master是需要时间的
在这段时间内的读写都无法进行,异常-->【connection refused】
等到slave变成了master,读写才会恢复,但是依然会报警告,无法连接到xxx,哎...

找了半天发现有个方法可以避免读的失败,就是优先从slave读
    StatefulRedisClusterConnection对象之
    setReadFrom(ReadFrom.REPLICA_PREFERRED);
通过测试发现只有REPLICA_PREFERRED有效,其他无效
这个选项意思是优先从副本读取,master宕机或者slave宕机都不会对读造成任何影响
NICE啊。。。

3、使用lettuce操作cluster

    |--工具类

/**
 * lettuce获取cluster连接
 */
public class ClusterUtil {
    private static final List redisURIList = new ArrayList<>();
    private static final String address =
                    "192.168.8.131:6379," +
                    "192.168.8.129:6379," +
                    "192.168.8.130:6379," +
                    "192.168.8.132:6379," +
                    "192.168.8.134:6379," +
                    "192.168.8.133:6379";
    private static final String auth = "******";
    private static RedisClusterClient client;
    private static ThreadLocal box = new ThreadLocal<>();
    static {
        for (String x : address.split(",")) {
            StringBuilder sb = new StringBuilder();
            sb.append("redis://").append(auth).append("@").append(x);
            redisURIList.add(RedisURI.create(sb.toString()));
        }
        client = RedisClusterClient.create(redisURIList);
        //拓扑更新设置
        ClusterTopologyRefreshOptions.Builder topoBuilder = ClusterTopologyRefreshOptions.builder();
        topoBuilder.enableAllAdaptiveRefreshTriggers();
        topoBuilder.enablePeriodicRefresh(true);
        topoBuilder.dynamicRefreshSources(true);
        topoBuilder.closeStaleConnections(true);
        //client选项设置
        ClusterClientOptions.Builder clientOptionsBuilder = ClusterClientOptions.builder();
        clientOptionsBuilder.maxRedirects(5);
        clientOptionsBuilder.topologyRefreshOptions(topoBuilder.build());
        client.setOptions(clientOptionsBuilder.build());
    }

    //获取一个连接
    public static StatefulRedisClusterConnection getConnection() {
        if (box.get() == null) {
            StatefulRedisClusterConnection connect = client.connect();
            //关键操作:优先从副本读取
            connect.setReadFrom(ReadFrom.REPLICA_PREFERRED);
            box.set(connect);
        }
        return box.get();
    }

    //关闭连接
    public static void close() {
        if (box.get() == null) return;
        StatefulRedisClusterConnection connect = box.get();
        connect.close();
        box.remove();
    }
}

    |--操作类

/**
 * redis集群  添加与获取KEY
 */
public class ClusterService {
    //添加
    public String set(String key, String value) {
        String result = null;
        try {
            StatefulRedisClusterConnection connection = ClusterUtil.getConnection();
            RedisAdvancedClusterCommands commands = connection.sync();
            result = commands.set(key, value);
        } catch (Exception e) {
            if (io.lettuce.core.RedisException.class == e.getClass()) {
                //master挂了
            }
        }finally {
            ClusterUtil.close();
            return result;
        }
    }
    //获取
    public String get(String key) {
        String value = null;
        try {
            StatefulRedisClusterConnection connection = ClusterUtil.getConnection();
            RedisAdvancedClusterCommands commands = connection.sync();
            value = (String) commands.get(key);
        } catch (Exception e) {
            if (io.lettuce.core.RedisException.class == e.getClass()) {
                //who knows
            }
        } finally {
            ClusterUtil.close();
            return value;
        }
    }
}

虽说lettuce这个【先从副本读】很牛逼,但是项目里也要与spring整合才行对么
不跟spring搞一起的框架不是好框架,况且spring可以方便的进行序列化

4、使用spring-data-redis操作cluster

    |--配置类

@Configuration
@PropertySource(value = {"classpath:redis/redis.properties"},encoding = "UTF-8")
public class RedisConfig {
    @Bean
    public RedisClusterConfiguration getRedisClusterConfiguration(
            @Value("${redis.cluster.nodes}") String nodes,
            @Value("${redis.cluster.max-redirect}") int maxRedirect,
            @Value("${redis.cluster.auth}") String password
    ){
        RedisClusterConfiguration redisConfiguration = new RedisClusterConfiguration();
        Set redisNodes = new HashSet<>();
        for(String s : nodes.split(",")){
            if(s == null || s.equals("")) continue;
            String ip = s.split(":")[0];
            int port = Integer.parseInt(s.split(":")[1]);
            redisNodes.add(new RedisNode(ip,port));
        }
        redisConfiguration.setClusterNodes(redisNodes);
        redisConfiguration.setMaxRedirects(maxRedirect);
        redisConfiguration.setPassword(password);
        return redisConfiguration;
    }

    @Bean
    public GenericObjectPoolConfig getGenericObjectPoolConfig(
            @Value("${redis.pool.minIdle}") int minIdle,
            @Value("${redis.pool.maxIdle}") int maxIdle,
            @Value("${redis.pool.maxTotal}") int maxTotal,
            @Value("${redis.pool.maxWaitMills}") long maxWaitMillis,
            @Value("${redis.pool.testOnBorrow}") boolean testOnBorrow
    ){
        GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig();
        genericObjectPoolConfig.setMinIdle(minIdle);
        genericObjectPoolConfig.setMaxIdle(maxIdle);
        genericObjectPoolConfig.setMaxTotal(maxTotal);
        genericObjectPoolConfig.setMaxWaitMillis(maxWaitMillis);
        genericObjectPoolConfig.setTestOnBorrow(testOnBorrow);
        return genericObjectPoolConfig;
    }

    @Bean
    public LettucePoolingClientConfiguration getLettucePoolingClientConfiguration(
            @Autowired GenericObjectPoolConfig poolConfig
    ){
        LettucePoolingClientConfiguration.LettucePoolingClientConfigurationBuilder configurationBuilder = LettucePoolingClientConfiguration.builder();
        configurationBuilder.poolConfig(poolConfig);
        //拓扑更新设置
        ClusterTopologyRefreshOptions.Builder topoBuilder = ClusterTopologyRefreshOptions.builder();
        topoBuilder.enableAllAdaptiveRefreshTriggers();
        topoBuilder.enablePeriodicRefresh(true);
        topoBuilder.dynamicRefreshSources(true);
        topoBuilder.closeStaleConnections(true);
        //client选项设置
        ClusterClientOptions.Builder clientOptionsBuilder = ClusterClientOptions.builder();
        clientOptionsBuilder.autoReconnect(false);
        clientOptionsBuilder.topologyRefreshOptions(topoBuilder.build());
        configurationBuilder.clientOptions(clientOptionsBuilder.build());
        return configurationBuilder.build();
    }

    @Bean
    public LettuceConnectionFactory getLettuceConnectionFactory(
            @Autowired RedisClusterConfiguration redisClusterConfiguration,
            @Autowired LettucePoolingClientConfiguration lettucePoolingClientConfiguration
    ){
        return new LettuceConnectionFactory(redisClusterConfiguration,lettucePoolingClientConfiguration);
    }

    /**
     * JSON序列化  同步模式
     * @param redisConnectionFactory
     * @return
     */
    @Bean(name = "jsonSyncRedisTemplate")
    public RedisTemplate getJsonSyncRedisTemplate(
        @Autowired RedisConnectionFactory redisConnectionFactory
    ){
        RedisTemplate redisTemplate = new RedisTemplate<>();
        redisTemplate.setConnectionFactory(redisConnectionFactory);
        redisTemplate.setEnableTransactionSupport(true);
        redisTemplate.setKeySerializer(RedisSerializer.string());
        redisTemplate.setValueSerializer(RedisSerializer.json());
        redisTemplate.setHashKeySerializer(RedisSerializer.string());
        redisTemplate.setHashValueSerializer(RedisSerializer.json());
        return redisTemplate;
    }

    /**
     * JSON序列化  异步模式
     * @param redisConnectionFactory
     * @return
     */
    @Bean(name = "jsonAsyncRedisTemplate")
    public RedisTemplate getJsonAsyncRedisTemplate(
            @Autowired RedisConnectionFactory redisConnectionFactory
    ){
        RedisTemplate redisTemplate = new RedisTemplate<>();
        redisTemplate.setConnectionFactory(redisConnectionFactory);
        redisTemplate.setKeySerializer(RedisSerializer.string());
        redisTemplate.setValueSerializer(RedisSerializer.json());
        redisTemplate.setHashKeySerializer(RedisSerializer.string());
        redisTemplate.setHashValueSerializer(RedisSerializer.json());
        return redisTemplate;
    }
}

    |--操作类

@Service
public class ClusterService {
    @Autowired
    @Qualifier("jsonAsyncRedisTemplate")
    private RedisTemplate jsonAsyncRedisTemplate;
    
    //添加
    public void set(String key, Object value) {
        try {
            jsonAsyncRedisTemplate.opsForValue().set(key, value);
        } catch (Exception e) {
            //who knows
        }
    }
    //获取
    public Object get(String key) {
        Object value = null;
        try {
            value = jsonAsyncRedisTemplate.opsForValue().get(key);
        } catch (Exception e) {
            //who knows
        } finally {
            return value;
        }
    }
}

坑1)发现关闭master后,spring-data-redis一直在重连,导致方法阻塞
    slave变成了master依然阻塞
    把原来的master启动后还是阻塞
    设置拓扑刷新间隔为1秒还是阻塞
    心累。。。只好设置clientOptionsBuilder.autoReconnect(false);
    终于OK了,但是读写失败无法避免

坑2)想着lettuce的【先从副本读】能否在spring配置类里面也怼一个呢?
    看了半天,不行。。。只能配置LettucePoolingClientConfiguration里面的东西
    但是从副本读需要配置connection对象
    这个connection是springTemplate自己调用方法的时候,用connectionFactory生成的,咱们拿不到它
    算了。。。破罐破摔呗
    master挂掉,读写都会失败,只能捕获异常,自定义处理。
    或者把这些失败的记录怼到一个消息队列中?

3)开始测试
    100个线程,每个线程200ms进行一次读写,连续读写100次
    于是总共10000次读和10000次写,master挂掉后读写失败率差不多13%
    虚拟机性能差,用云服务器大概0.00000013%

你可能感兴趣的:(使用lettuce和redisTemplate操作redis cluster踩坑日记)