Springboot + 分布式Snowflake源码分析

在项目中使用到了snowflake生成主键ID,正好有空,探究下其实现原理。

使用的是snowflake-spring-boot-starter

        
            wiki.xsx
            snowflake-spring-boot-starter
            RELEASE
        

注册中心使用的是nacos

        
            com.alibaba.cloud
            spring-cloud-starter-alibaba-nacos-discovery
        

启动Springboot,SnowflakeAutoConfiguration 中 自动装配单例Snowflake对象,使用snowflake生成id:

@Resource
private Snowflake snowflake;
Long id = snowflake.nextId();

SnowflakeAutoConfiguration:

@Configuration
@ConditionalOnClass({DiscoveryClient.class})
@Import({RestTemplate.class, LoadBalancerAutoConfiguration.class, SnowflakeApi.class, ConsulDiscoveryConfig.class, EurekaDiscoveryConfig.class, NacosDiscoveryConfig.class, ZookeeperDiscoveryConfig.class})
public class SnowflakeAutoConfiguration {
    private static final Logger log = LoggerFactory.getLogger(SnowflakeAutoConfiguration.class);
    private static final long MAX_WORKER_ID = 31L;
    private static final long MAX_DATA_CENTER_ID = 31L;

    public SnowflakeAutoConfiguration() {
    }

    @Bean
    Snowflake snowflake(RestTemplate restTemplate, DiscoveryConfig config, DiscoveryClient discoveryClient) {
        Snowflake snowflake = this.initSnowflake(restTemplate, config, discoveryClient);
        log.info("【当前机器码id为:{},数据中心id为:{}】", snowflake.getWorkerId(), snowflake.getDataCenterId());
        return snowflake;
    }

    private Snowflake initAvailableSnowflake(List activeSnowflakeList) {
        boolean flag = true;

        for(long i = 0L; i < 31L; ++i) {
            for(long j = 0L; j < 31L; ++j) {
                Iterator var7 = activeSnowflakeList.iterator();

                while(var7.hasNext()) {
                    Snowflake active = (Snowflake)var7.next();
                    if (active.getWorkerId() == i && active.getDataCenterId() == j) {
                        flag = false;
                        break;
                    }
                }

                if (flag) {
                    return new Snowflake(i, j);
                }

                flag = true;
            }
        }

        throw new RuntimeException("no available snowflake");
    }

    private Snowflake initSnowflake(RestTemplate restTemplate, DiscoveryConfig config, DiscoveryClient discoveryClient) {
        boolean flag = false;
        Snowflake usedSnowflake = null;
        long workerId = ThreadLocalRandom.current().nextLong(0L, 31L);
        long dataCenterId = ThreadLocalRandom.current().nextLong(0L, 31L);
        List activeSnowflakeList = new ArrayList(Long.valueOf(31L).intValue());
        List serviceInstances = discoveryClient.getInstances(config.getServiceName());
        Iterator var12 = serviceInstances.iterator();

        while(var12.hasNext()) {
            ServiceInstance serviceInstance = (ServiceInstance)var12.next();

            try {
                String used = (String)restTemplate.postForObject(String.format("%s%s", serviceInstance.getUri(), "/snowflake/used"), (Object)null, String.class, new Object[0]);
                if (used != null) {
                    String[] wdId = used.split("-");
                    usedSnowflake = new Snowflake(Long.parseLong(wdId[0]), Long.parseLong(wdId[1]));
                }
            } catch (Exception var16) {
                log.debug("snowflake服务【{}】调用失败,将跳过当前失败服务", serviceInstance.getInstanceId());
                continue;
            }

            if (usedSnowflake != null) {
                if (workerId == usedSnowflake.getWorkerId() && dataCenterId == usedSnowflake.getDataCenterId()) {
                    flag = true;
                }

                activeSnowflakeList.add(usedSnowflake);
            }
        }

        Snowflake availableSnowflake;
        if (flag) {
            availableSnowflake = this.initAvailableSnowflake(activeSnowflakeList);
        } else {
            availableSnowflake = new Snowflake(workerId, dataCenterId);
        }

        return availableSnowflake;
    }
}
Snowflake snowflake = this.initSnowflake(restTemplate, config, discoveryClient);

initSnowflake方法生成snowflake对象

看下initSnowflake方法的实现:

private Snowflake initSnowflake(RestTemplate restTemplate, DiscoveryConfig config, DiscoveryClient discoveryClient) {
//标记本机机器码id,数据中心id 是否与其他服务器一致
boolean flag = false;
//声明各服务器Snowflake 对象
Snowflake usedSnowflake = null;
//线程安全 随机生成0~31 机器码id
long workerId = ThreadLocalRandom.current().nextLong(0L, 31L);
//线程安全 随机生成0~31 数据中心id
long dataCenterId = ThreadLocalRandom.current().nextLong(0L, 31L);
//新建长度31的空集合 准备存放各服务Snowflake对象
List activeSnowflakeList = new ArrayList(Long.valueOf(31L).intValue());
//通过discoveryClient 获取所有注册在注册中心中的服务集合
List serviceInstances = discoveryClient.getInstances(config.getServiceName());
//获取集合迭代器
Iterator var12 = serviceInstances.iterator();

//循环服务集合
while(var12.hasNext()) {
    //获取服务实例
	ServiceInstance serviceInstance = (ServiceInstance)var12.next();

	try {
        //通过远程调用各服务的/snowflake/used(为snowflake的api接口之一,作用:查询服务机器码id和数据中心ID)
		String used = (String)restTemplate.postForObject(String.format("%s%s", serviceInstance.getUri(), "/snowflake/used"), (Object)null, String.class, new Object[0]);
		if (used != null) {
            //return this.snowflake.getWorkerId() + "-" + this.snowflake.getDataCenterId();
			String[] wdId = used.split("-");
            //新建服务的Snowflake对象
			usedSnowflake = new Snowflake(Long.parseLong(wdId[0]), Long.parseLong(wdId[1]));
		}
	} catch (Exception var16) {
		log.debug("snowflake服务【{}】调用失败,将跳过当前失败服务", serviceInstance.getInstanceId());
		continue;
	}

	if (usedSnowflake != null) {
        //当本地服务与其他服务 机器码ID和数据中心ID相同时 标记本机机器码id,数据中心id 是否与其他服务器一致
		if (workerId == usedSnowflake.getWorkerId() && dataCenterId == usedSnowflake.getDataCenterId()) {
			flag = true;
		}

        //服务Snowflake添加至集合中
		activeSnowflakeList.add(usedSnowflake);
	}
}

Snowflake availableSnowflake;
//如果本地服务与其他服务 机器码ID和数据中心ID相同
if (flag) {
    //生成与所有服务机器码ID和数据中心ID不相同的Snowflake实例
	availableSnowflake = this.initAvailableSnowflake(activeSnowflakeList);
} else {
    //生成Snowflake实例
	availableSnowflake = new Snowflake(workerId, dataCenterId);
}

return availableSnowflake;
}

服务注册中心可以使用Consul,Eureka,Nacos,Zookeeper;

SnowflakeApi:

package wiki.xsx.core.snowflake.api;

import javax.annotation.Resource;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import wiki.xsx.core.snowflake.config.Snowflake;

@RestController
@RequestMapping({"/snowflake"})
public class SnowflakeApi {
    @Resource(
        type = Snowflake.class
    )
    private Snowflake snowflake;

    public SnowflakeApi() {
    }

    @GetMapping({"/long"})
    public Long getLong() {
        return this.snowflake.nextId();
    }

    @GetMapping({"/string"})
    public String getString() {
        return this.snowflake.nextIdStr();
    }

    @GetMapping({"/timestamp"})
    public Long getTimestamp(Long id) {
        return this.snowflake.getTimestamp(id);
    }

    @PostMapping({"/used"})
    public String getUsed() {
        return this.snowflake.getWorkerId() + "-" + this.snowflake.getDataCenterId();
    }
}

这样确保了所有分布式服务的Snowflake实例的机器码ID,数据中心ID都不一致。

下面看下id生成原理:

   public synchronized long nextId() {
        long timestamp = this.genTime();
        if (timestamp < this.lastTimestamp) {
            throw new IllegalStateException(String.format("Clock moved backwards. Refusing to generate id for %sms", this.lastTimestamp - timestamp));
        } else {
            if (this.lastTimestamp == timestamp) {
                long sequenceMask = 4095L;
                this.sequence = this.sequence + 1L & sequenceMask;
                if (this.sequence == 0L) {
                    timestamp = this.tilNextMillis(this.lastTimestamp);
                }
            } else {
                this.sequence = 0L;
            }

            this.lastTimestamp = timestamp;
            this.getClass();
            return timestamp - 946656000000L << 22 | this.dataCenterId << 17 | this.workerId << 12 | this.sequence;
        }
    }

通过timestamp(时间戳),dataCenterId(数据中心ID),workerId(机械码ID),sequence(小于等于4095大于等于0的自增整数)构建雪花算法生成以下特性的ID

1. 全局唯一
2. 自增
3. 高可用
4. 支持高并发

 缺陷:

在调用nextId方法中使用了System.currentTimeMillis()获取时间戳,性能不是最佳,

原因:

System.currentTimeMillis()的调用比new一个普通对象要耗时的多(具体耗时高出多少我还没测试过,有人说是100倍左右),System.currentTimeMillis()之所以慢是因为去跟系统打了一次交道

在看mybatis-plus的Sequence源码时,获取时间戳非System.currentTimeMillis(),而是初始化一个初始时间戳,之后其他一个单线程执行一个每隔1秒一次的定时任务,使得每次获取时间戳无需从系统中获取。

第一次写源码解析文章,拿一个简单的练手

你可能感兴趣的:(Springboot + 分布式Snowflake源码分析)