项目如何生成全局唯一的id主键

一。新建自增序列表方式

使用于mysql,新建一个只有id为自动递增的表,每次获取id,先往自增序列表写入数据,然后获取id

CREATE TABLE `org_dept`  (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  PRIMARY KEY (`id`) USING BTREE
); 

缺点:

1.不适用于分库场景

2.不适用于特高并发场景

优点:

1.利用mysql数据库本身,实现简单

二。新建序列方式

适用于Oracle,SQLserver

-- 对于编号为1的数据库:
create sequence seq_id increment 1 start 1000000000000 ;

-- 对于编号为2的数据库:
create sequence seq_id increment 1 start 2000000000000 ;

-- 各库建表语句
create table t_table_name(
  n_id bigint not null default nextval('seq_id'), -- 内部编号 自动生成
-- 其他字段
primary key(n_id)
);

优点:

1.适用分库分表

三。UUID方式

通过UUID产生

优点:

1.不依赖于第三方

缺点:

1.不连续

四。雪花算法

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import lombok.ToString;

/**   
* Copyright: Copyright (c) 2019 
* 
* @ClassName: IdWorker.java
* @Description: 

SnowFlake 算法,是 Twitter 开源的分布式 id 生成算法。 * 其核心思想就是:使用一个 64 bit 的 long 型的数字作为全局唯一 id。 * 这 64 个 bit 中,其中 1 个 bit 是不用的,然后用其中的 41 bit 作为毫秒数, * 用 10 bit 作为工作机器 id,12 bit 作为序列号 *

* @version: v1.0.0 * @author: BianPeng * @date: 2019年4月11日 下午3:13:41 * * Modification History: * Date Author Version Description *---------------------------------------------------------------* * 2019年4月11日 BianPeng v1.0.0 initialize */ @ToString public class SnowflakeIdFactory { static Logger log = LoggerFactory.getLogger(SnowflakeIdFactory.class); private final long twepoch = 1288834974657L; private final long workerIdBits = 5L; private final long datacenterIdBits = 5L; private final long maxWorkerId = -1L ^ (-1L << workerIdBits); private final long maxDatacenterId = -1L ^ (-1L << datacenterIdBits); private final long sequenceBits = 12L; private final long workerIdShift = sequenceBits; private final long datacenterIdShift = sequenceBits + workerIdBits; private final long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits; private final long sequenceMask = -1L ^ (-1L << sequenceBits); private long workerId; private long datacenterId; private long sequence = 0L; private long lastTimestamp = -1L; public SnowflakeIdFactory(long workerId, long datacenterId) { if (workerId > maxWorkerId || workerId < 0) { throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId)); } if (datacenterId > maxDatacenterId || datacenterId < 0) { throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than 0", maxDatacenterId)); } this.workerId = workerId; this.datacenterId = datacenterId; } public synchronized long nextId() { long timestamp = timeGen(); if (timestamp < lastTimestamp) { //服务器时钟被调整了,ID生成器停止服务. throw new RuntimeException(String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", lastTimestamp - timestamp)); } if (lastTimestamp == timestamp) { sequence = (sequence + 1) & sequenceMask; if (sequence == 0) { timestamp = tilNextMillis(lastTimestamp); } } else { sequence = 0L; } lastTimestamp = timestamp; return ((timestamp - twepoch) << timestampLeftShift) | (datacenterId << datacenterIdShift) | (workerId << workerIdShift) | sequence; } protected long tilNextMillis(long lastTimestamp) { long timestamp = timeGen(); while (timestamp <= lastTimestamp) { timestamp = timeGen(); } return timestamp; } protected long timeGen() { return System.currentTimeMillis(); } public static void testProductIdByMoreThread(int dataCenterId, int workerId, int n) throws InterruptedException { List tlist = new ArrayList<>(); Set setAll = new HashSet<>(); CountDownLatch cdLatch = new CountDownLatch(10); long start = System.currentTimeMillis(); int threadNo = dataCenterId; Map idFactories = new HashMap<>(); for(int i=0;i<10;i++){ //用线程名称做map key. idFactories.put("snowflake"+i,new SnowflakeIdFactory(workerId, threadNo++)); } for(int i=0;i<10;i++){ Thread temp =new Thread(new Runnable() { @Override public void run() { Set setId = new HashSet<>(); SnowflakeIdFactory idWorker = idFactories.get(Thread.currentThread().getName()); for(int j=0;j setOne = new HashSet<>(); Set setTow = new HashSet<>(); long start = System.currentTimeMillis(); for (int i = 0; i < n; i++) { setOne.add(idWorker.nextId());//加入set } long end1 = System.currentTimeMillis() - start; log.info("第一批ID预计生成{}个,实际生成{}个<<<<*>>>>共耗时:{}",n,setOne.size(),end1); for (int i = 0; i < n; i++) { setTow.add(idWorker2.nextId());//加入set } long end2 = System.currentTimeMillis() - start; log.info("第二批ID预计生成{}个,实际生成{}个<<<<*>>>>共耗时:{}",n,setTow.size(),end2); setOne.addAll(setTow); log.info("合并总计生成ID个数:{}",setOne.size()); } public static void testPerSecondProductIdNums(){ SnowflakeIdFactory idWorker = new SnowflakeIdFactory(1, 2); long start = System.currentTimeMillis(); int count = 0; for (int i = 0; System.currentTimeMillis()-start<1000; i++,count=i) { /** 测试方法一: 此用法纯粹的生产ID,每秒生产ID个数为400w+ */ //idWorker.nextId(); /** 测试方法二: 在log中打印,同时获取ID,此用法生产ID的能力受限于log.error()的吞吐能力. * 每秒徘徊在10万左右. */ log.info(""+idWorker.nextId()); } long end = System.currentTimeMillis()-start; System.out.println(end); System.out.println(count); } public static void main(String[] args) { /** case1: 测试每秒生产id个数? * 结论: 每秒生产id个数400w+ */ //testPerSecondProductIdNums(); /** case2: 单线程-测试多个生产者同时生产N个id,验证id是否有重复? * 结论: 验证通过,没有重复. */ //testProductId(1,2,10000);//验证通过! //testProductId(1,2,20000);//验证通过! /** case3: 多线程-测试多个生产者同时生产N个id, 全部id在全局范围内是否会重复? * 结论: 验证通过,没有重复. */ try { testProductIdByMoreThread(1,2,100000);//单机测试此场景,性能损失至少折半! } catch (InterruptedException e) { e.printStackTrace(); } } }

优点:

1.不依赖第三方

2.实现简单

3.适用分库分表

你可能感兴趣的:(java开发)