使用Redis作为缓存服务器的,刚开始的时候会满足需要,随着项目的增大缓存数据的增多就会查询和插入更慢这时就要考虑Redis集群方案了
使用Redis分布式要保证数据都能能够平均的缓存到每一台机器,首先想到的做法是对数据进行分片,因为Redis是key-value存储的,首先想到的是Hash分片,可能的做法是对key进行哈希运算,得到一个long值对分布式的数量取模会得到一个一个对应数据库的一个映射,没有读取就可以定位到这台数据库,那么速度但然会提升了。
但是取模的hash算法是有问题的如果集群数量不变的话没有什么问题,一旦增加一台机器或者一台机器挂掉,导致机器数量变化,就会导致计算的出的数据库映射乱掉,不能正确存取数据了。
因为这个问题引入我们说的一致性哈希算法,这个哈希算法具有的特征
1.均衡性:也有人把它定义为平衡性,是指哈希的结果能够尽可能分布到所有的节点中去,这样可以有效的利用每个节点上的资源。
2.单调性:对于单调性有很多翻译让我非常的不解,而我想要的是当节点数量变化时哈希的结果应尽可能的保护已分配的内容不会被重新分派到新的节点。
public class Node {
// 节点名称
private String name;
// 节点IP
private String ip;
// 节点端口号
private int port;
// 节点密码
private String password;
public Node(String name, String ip, int port, String password) {
this.name = name;
this.ip = ip;
this.port = port;
this.password = password;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getIp() {
return ip;
}
public void setIp(String ip) {
this.ip = ip;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
@Override
public String toString() {
return "Node [name=" + name + ", ip=" + ip + ", port=" + port
+ ", password=" + password + "]";
}
public final class MurmurHash {
public MurmurHash() {
}
private byte[] toBytesWithoutEncoding(String str) {
int len = str.length();
int pos = 0;
byte[] buf = new byte[len << 1];
for (int i = 0; i < len; i++) {
char c = str.charAt(i);
buf[pos++] = (byte) (c & 0xFF);
buf[pos++] = (byte) (c >> 8);
}
return buf;
}
public int hashcode(String str) {
byte[] bytes = toBytesWithoutEncoding(str);
return hash32(bytes, bytes.length);
}
/**
* * Generates 32 bit hash from byte array of the given length and * seed.
* * * @param data byte array to hash * @param length length of the array
* to hash * @param seed initial seed value * @return 32 bit hash of the
* given array
*/
public int hash32(final byte[] data, int length, int seed) {
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
final int m = 0x5bd1e995;
final int r = 24;
// Initialize the hash to a random value
int h = seed ^ length;
int length4 = length / 4;
for (int i = 0; i < length4; i++) {
final int i4 = i * 4;
int k = (data[i4 + 0] & 0xff) + ((data[i4 + 1] & 0xff) << 8)
+ ((data[i4 + 2] & 0xff) << 16)
+ ((data[i4 + 3] & 0xff) << 24);
k *= m;
k ^= k >>> r;
k *= m;
h *= m;
h ^= k;
}
// Handle the last few bytes of the input array
switch (length % 4) {
case 3:
h ^= (data[(length & ~3) + 2] & 0xff) << 16;
case 2:
h ^= (data[(length & ~3) + 1] & 0xff) << 8;
case 1:
h ^= (data[length & ~3] & 0xff);
h *= m;
}
h ^= h >>> 13;
h *= m;
h ^= h >>> 15;
return h;
}
/**
* * Generates 32 bit hash from byte array with default seed value. * * @param
* data byte array to hash * @param length length of the array to hash * @return
* 32 bit hash of the given array
*/
public int hash32(final byte[] data, int length) {
return hash32(data, length, 0x9747b28c);
}
public int hash32(final String data) {
byte[] bytes = toBytesWithoutEncoding(data);
return hash32(bytes, bytes.length, 0x9747b28c);
}
/**
* * Generates 64 bit hash from byte array of the given length and seed. *
* * @param data byte array to hash * @param length length of the array to
* hash * @param seed initial seed value * @return 64 bit hash of the
* given array
*/
public long hash64(final byte[] data, int length, int seed) {
final long m = 0xc6a4a7935bd1e995L;
final int r = 47;
long h = (seed & 0xffffffffl) ^ (length * m);
int length8 = length / 8;
for (int i = 0; i < length8; i++) {
final int i8 = i * 8;
long k = ((long) data[i8 + 0] & 0xff)
+ (((long) data[i8 + 1] & 0xff) << 8)
+ (((long) data[i8 + 2] & 0xff) << 16)
+ (((long) data[i8 + 3] & 0xff) << 24)
+ (((long) data[i8 + 4] & 0xff) << 32)
+ (((long) data[i8 + 5] & 0xff) << 40)
+ (((long) data[i8 + 6] & 0xff) << 48)
+ (((long) data[i8 + 7] & 0xff) << 56);
k *= m;
k ^= k >>> r;
k *= m;
h ^= k;
h *= m;
}
switch (length % 8) {
case 7:
h ^= (long) (data[(length & ~7) + 6] & 0xff) << 48;
case 6:
h ^= (long) (data[(length & ~7) + 5] & 0xff) << 40;
case 5:
h ^= (long) (data[(length & ~7) + 4] & 0xff) << 32;
case 4:
h ^= (long) (data[(length & ~7) + 3] & 0xff) << 24;
case 3:
h ^= (long) (data[(length & ~7) + 2] & 0xff) << 16;
case 2:
h ^= (long) (data[(length & ~7) + 1] & 0xff) << 8;
case 1:
h ^= (long) (data[length & ~7] & 0xff);
h *= m;
}
;
h ^= h >>> r;
h *= m;
h ^= h >>> r;
return h;
}
/**
* * Generates 64 bit hash from byte array with default seed value. * * @param
* data byte array to hash * @param length length of the array to hash * @return
* 64 bit hash of the given string
*/
public long hash64(final byte[] data, int length) {
return hash64(data, length, 0xe17a1465);
}
public long hash64(final String data) {
byte[] bytes = toBytesWithoutEncoding(data);
return hash64(bytes, bytes.length);
}
}
public class ConsistentHash {
/**
* 虚拟节点个数 用于复制真是节点进行负载均衡
*/
private final int virtualNodeNum;
//环形SortMap 用于存放节点并排序
private SortedMap circleMap = new TreeMap();
/**
* 构造,使用Java默认的Hash算法
* @param virtualNodeNum 虚拟化节点数量 复制的节点个数,增加每个节点的复制节点有利于负载均衡
* @param nodes 节点对象
*/
public ConsistentHash(int virtualNodeNum,Collection nodes){
this.virtualNodeNum = virtualNodeNum;
for(Node node:nodes){
addNode(node);
}
}
/**
* 构造
* @param virtualNodeNum 虚拟化节点数量 复制的节点个数,增加每个节点的复制节点有利于负载均衡
* @param nodes 节点对象
*/
public ConsistentHash(int virtualNodeNum,Node node){
this.virtualNodeNum = virtualNodeNum;
addNode(node);
}
/**
* 构造
* @param virtualNodeNum 虚拟化节点数量
*
*/
public ConsistentHash(int virtualNodeNum){
this.virtualNodeNum = virtualNodeNum;
}
/**
* 增加节点
* 每增加一个节点,就会在闭环上增加给定复制节点数
* 例如复制节点数是2,则每调用此方法一次,增加两个虚拟节点,这两个节点指向同一Node
* 由于hash算法会调用node的toString方法,故按照toString去重
* @param node 节点对象
*/
public void addNode(Node node) {
for (int i = 0; i < virtualNodeNum; i++) {
circleMap.put(HashUtils.murMurHash(node.toString() + i), node);
}
}
/**
* 移除节点的同时移除相应的虚拟节点
* @param node 节点对象
*/
public void remove(Node node) {
for (int i = 0; i < virtualNodeNum; i++) {
circleMap.remove(HashUtils.murMurHash(node.toString() + i));
}
}
/**
* 获得一个最近的顺时针节点
* @param key 为给定键取Hash,取得顺时针方向上最近的一个虚拟节点对应的实际节点
* @return 节点对象
*/
public Node get(Object key) {
if (circleMap.isEmpty()) {
return null;
}
long hash = HashUtils.murMurHash(RText.toString(key));
if (!circleMap.containsKey(hash)) {
SortedMap tailMap = circleMap.tailMap(hash); //返回此映射的部分视图,其键大于等于 hash
hash = tailMap.isEmpty() ? circleMap.firstKey() : tailMap.firstKey();
}
//正好命中
return circleMap.get(hash);
}
}
JedisShardInfo jedisShardInfo1 = new JedisShardInfo(
bundle.getString("redis1.ip"), Integer.valueOf(bundle .getString("redis.port")));
JedisShardInfo jedisShardInfo2 = new JedisShardInfo(
bundle.getString("redis2.ip"), Integer.valueOf(bundle .getString("redis.port")));
List list = new LinkedList();
list.add(jedisShardInfo1);
list.add(jedisShardInfo2);
初始化
ShardedJedisPool
代替
JedisPool:
ShardedJedisPool pool = new ShardedJedisPool(config, list);
测试
public void test() {
// 从池中获取一个Jedis对象
ShardedJedis jedis = pool.getResource();
String keys = "name";
String value = "snowolf";
// 删数据
jedis.del(keys);
// 存数据
jedis.set(keys, value);
// 取数据
String v = jedis.get(keys);
System.out.println(v);
// 释放对象池
pool.returnResource(jedis);
assertEquals(value, v);
}