提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
分布式事务和锁的来源背景:
单系统应用随着业务范围的扩大,会出现高耦合,不易扩展等问题,所以需要引入分布式应用,保证各个模块功能的独立;在这种分布式系统下,会涉及到分库分表,各个模块之间的数据通信就需要保证原子性,所以就有了分布式事务的解决方案;随着用户量的增长,核心模板需要做主备模式,减少单服务的访问压力,这时候就存在并发访问同一个方法的情况,涉及到数据安全,所以就有了分布式锁的解决方案。
实现原理:
基于XA协议的两阶段提交:
XA是一个分布式事务协议,由Tuxedo提出。XA中大致分为两部分:事务管理器和本地资源管理器。其中本地资源管理器往往由数据库实现,比如Oracle、DB2这些商业数据库都实现了XA接口,而事务管理器作为全局的调度者,负责各个本地资源的提交和回滚。XA实现分布式事务的原理如下:
总的来说,XA协议比较简单,而且一旦商业数据库实现了XA协议,使用分布式事务的成本也比较低。但是,XA也有致命的缺点,那就是性能不理想,特别是在交易下单链路,往往并发量很高,XA无法满足高并发场景。XA目前在商业数据库支持的比较理想,在mysql数据库中支持的不太理想,mysql的XA实现,没有记录prepare阶段日志,主备切换回导致主库与备库数据不一致。许多nosql也没有支持XA,这让XA的应用场景变得非常狭隘.
jta-atomikos基于xa协议的实现代码步骤:
一. 配置多数据源
a. yml多数据源配置
spring:
jta:
# 事务管理器唯一标识符
transaction-manager-id: txManager
log-dir: transaction-logs
atomikos:
datasource:
borrow-connection-timeout: 10000
min-pool-size: 5
max-pool-size: 10
properties:
# 事务超时时间 300 0000ms 默认10 000ms
default-jta-timeout: 300000
max-actives: 50
max-timeout: 300000
enable-logging: true
logBaseDir: transaction-logs
datasource:
type: com.alibaba.druid.pool.xa.DruidXADataSource
druid:
master:
name: master
url: jdbc:mysql://localhost:3306/demo?useSSL=false&autoReconnect=true&useUnicode=true&characterEncoding=UTF-8&noDatetimeStringSync=true&zeroDateTimeBehavior=CONVERT_TO_NULL&serverTimezone=Asia/Shanghai
username: root
# druid 链接密码 加密 需要同时配置 connection-properties filters: config
password: 123456
# connection-properties: config.decrypt=true;config.decrypt.key=MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAI+7x/MxFWgNSk2saE3iSoBwdpTbjozCtnvhh/Fk4UF/1tG7S11/uBR7kGnQqfo27ytkb1wJqsmtZ4ImQqzNVosCAwEAAQ==
initialSize: 10
minIdle: 10
maxActive: 100
maxWait: 60000
poolPreparedStatements: true
maxPoolPreparedStatementPerConnectionSize: 20
timeBetweenEvictionRunsMillis: 60000
minEvictableIdleTimeMillis: 300000
validationQuery: SELECT 1 FROM DUAL
validationQueryTimeout: 10000
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
statViewServlet:
enabled: true
urlPattern: /druid/*
#login-username: admin
#login-password: admin
# 如果是加密密码 则必须配置 filters: config 否则链接会失败
filters: config,stat,wall,log4j2
second:
name: second
url: jdbc:mysql://localhost:3306/test?useSSL=false&autoReconnect=true&useUnicode=true&characterEncoding=UTF-8&noDatetimeStringSync=true&zeroDateTimeBehavior=CONVERT_TO_NULL&serverTimezone=Asia/Shanghai
username: root
password: 123456
initialSize: 10
minIdle: 10
maxActive: 100
maxWait: 60000
poolPreparedStatements: true
maxPoolPreparedStatementPerConnectionSize: 20
timeBetweenEvictionRunsMillis: 60000
minEvictableIdleTimeMillis: 300000
validationQuery: SELECT 1 FROM DUAL
validationQueryTimeout: 10000
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
statViewServlet:
enabled: true
urlPattern: /druid/*
#login-username: admin
#login-password: admin
filters: stat,wall,log4j2
b. 添加属性配置类
@Data
public class DataSourceProperties {
private String name;
private String url;
private String username;
private String password;
private Integer initialSize;
private Integer maxActive;
private Integer minIdle;
private Integer maxWait;
private Boolean poolPreparedStatements;
private Integer maxPoolPreparedStatementPerConnectionSize;
private Integer timeBetweenEvictionRunsMillis;
private Integer minEvictableIdleTimeMillis;
private String validationQuery;
private Integer validationQueryTimeout;
private Boolean testWhileIdle;
private Boolean testOnBorrow;
private Boolean testOnReturn;
private String filters;
// private String connectionProperties;
}
@Data
@EqualsAndHashCode(callSuper = true)
@Validated
@Component
@ConfigurationProperties(prefix = "spring.datasource.druid.master")
public class DataSourceMasterProperties extends DataSourceProperties {
}
@Data
@EqualsAndHashCode(callSuper = true)
@Validated
@Component
@ConfigurationProperties(prefix = "spring.datasource.druid.second")
public class DataSourceSecondProperties extends DataSourceProperties {
}
c. 保存一个线程安全的DataSourceType容器
import java.sql.Connection;
import java.util.concurrent.ConcurrentHashMap;
import com.navinfo.entity.Constans;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* 保存一个线程安全的DataSourceType容器
* @author Administrator
*
*/
public class DataSourceContextHolder {
private static Logger logger = LoggerFactory.getLogger(DataSourceContextHolder.class);
private static final ThreadLocal<String> CONTEXT_HOLDER = new ThreadLocal<String>();
public static ConcurrentHashMap<String, Connection> connectionMap = new ConcurrentHashMap<>();
/**
* 设置数据库来源
*/
public static void setDateSoureType(String dataSourceType) {
logger.info("数据源切换为:" + dataSourceType);
CONTEXT_HOLDER.set(dataSourceType);
}
/**
* 获取数据库来源
*/
public static String getDateSoureType() {
String dsType = CONTEXT_HOLDER.get();
if (dsType == null) {
logger.info("当前线程没有设置数据源,使用默认数据源");
// 当前线程没有设置数据源,使用默认数据源GOV
setDateSoureType(Constans.MASTER);
}
return CONTEXT_HOLDER.get();
}
/**
* 清除数据库来源
*/
public static void clearDateSoureType() {
CONTEXT_HOLDER.remove();
}
/**
* 设置当前线程使用哪个数据源
*
* @param dataSourceType
*/
public static void chooseDataSource(String dataSourceType) {
switch (dataSourceType) {
case Constans.SECOND:
setDateSoureType(Constans.SECOND);
break;
default:
setDateSoureType(Constans.MASTER);
break;
}
}
}
d. 动态数据源配置
/**
* 动态数据源(需要继承AbstractRoutingDataSource)
* 作用:使用DataSourceContextHolder获取当前线程的DataSourceType
* @author kzx
*
*/
public class DynamicDataSource extends AbstractRoutingDataSource {
@Override
protected Object determineCurrentLookupKey() {
return DataSourceContextHolder.getDateSoureType();
}
}
e. 多数据源配置
/**
* @DependsOn({}) 在某个类注解@DependsOn("xxx")那么这个类一定会在xxx实例化之后实例化
* @param properties
* @return
*/
@Bean(name = "masterDataSource")
@Primary
public DataSource masterDataSource(DataSourceMasterProperties properties) {
return build(properties);
}
@Bean(name = "secondDataSource")
@Primary
public DataSource secondDataSource(DataSourceSecondProperties properties) {
return build(properties);
}
@SneakyThrows
private DruidXADataSource build(DataSourceProperties properties) {
DruidXADataSource druid = new DruidXADataSource();
// String connectionProperties = properties.getConnectionProperties();
// if (StringUtils.isNotBlank(connectionProperties)) {
// //不为空说明密码是加密的需要配置否则加密密码无法解析
// druid.setConnectionProperties(connectionProperties);
// }
druid.setName(properties.getName());
druid.setUrl(properties.getUrl());
druid.setUsername(properties.getUsername());
druid.setPassword(properties.getPassword());
druid.setInitialSize(properties.getInitialSize());
druid.setMinIdle(properties.getMinIdle());
druid.setMaxActive(properties.getMaxActive());
druid.setMaxWait(properties.getMaxWait());
druid.setPoolPreparedStatements(properties.getPoolPreparedStatements());
druid.setMaxPoolPreparedStatementPerConnectionSize(properties.getMaxPoolPreparedStatementPerConnectionSize());
druid.setTimeBetweenEvictionRunsMillis(properties.getTimeBetweenEvictionRunsMillis());
druid.setMinEvictableIdleTimeMillis(properties.getMinEvictableIdleTimeMillis());
druid.setValidationQuery(properties.getValidationQuery());
druid.setValidationQueryTimeout(properties.getValidationQueryTimeout());
druid.setTestWhileIdle(properties.getTestWhileIdle());
druid.setTestOnBorrow(properties.getTestOnBorrow());
druid.setTestOnReturn(properties.getTestOnReturn());
druid.setFilters(properties.getFilters());
AtomikosDataSourceBean atomikosDataSourceBean = new AtomikosDataSourceBean();
// //DataSource不能直接使用Druid提供的DruidDataSource, 需要使用atomikos来包装一下Druid提供的DruidXADataSource,来支持XA规范
// //see https://juejin.im/post/5e186601e51d4530591783ec
atomikosDataSourceBean.setXaDataSource(druid);
// atomikosDataSourceBean.setXaProperties();
atomikosDataSourceBean.setXaDataSourceClassName(xaDataSourceClassName);
atomikosDataSourceBean.setUniqueResourceName(properties.getName());
atomikosDataSourceBean.setPoolSize(10);
atomikosDataSourceBean.setMinPoolSize(5);
atomikosDataSourceBean.setMaxPoolSize(10);
return druid;
}
/**
* 动态数据源
* @param masterDataSource
* @param secondDataSource
* @return
*/
@Bean(name = "dynamicDataSource")
@Primary
public DataSource dataSource(@Qualifier("masterDataSource") DataSource masterDataSource,
@Qualifier("secondDataSource") DataSource secondDataSource) {
Map<Object, Object> targetDataSource = new HashMap<>();
targetDataSource.put(Constans.MASTER, masterDataSource);
targetDataSource.put(Constans.SECOND, secondDataSource);
DynamicDataSource dynamicDataSource = new DynamicDataSource();
dynamicDataSource.setTargetDataSources(targetDataSource);
dynamicDataSource.setDefaultTargetDataSource(masterDataSource);
return dynamicDataSource;
}
f. 设置SqlSession工厂
@Primary
@Bean("SqlSessionFactory")
public SqlSessionFactory SqlSessionFactory(@Qualifier("dynamicDataSource") DataSource dataSource) throws Exception {
MybatisSqlSessionFactoryBean factoryBean = new MybatisSqlSessionFactoryBean();
factoryBean.setDataSource(dataSource);
MybatisConfiguration configuration = new MybatisConfiguration();
configuration.setDefaultScriptingLanguage(MybatisXMLLanguageDriver.class);
// 数据库下划线转驼峰
configuration.setMapUnderscoreToCamelCase(true);
configuration.setJdbcTypeForNull(JdbcType.NULL);
factoryBean.setConfiguration(configuration);
factoryBean.setTransactionFactory(new DynamicTransactionsFactory());
//指定xml路径.
factoryBean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath*:com.xx.mapper/*.xml"));
factoryBean.setPlugins(
//分页插件
new PaginationInterceptor(),
//乐观锁插件
new OptimisticLockerInterceptor()
);
return factoryBean.getObject();
}
二. 分布式事务配置
/**
* 分布式事务配置
* @return
* @throws Throwable
*/
@Bean(name = "userTransaction")
public UserTransaction userTransaction() throws Throwable {
UserTransactionImp userTransactionImp = new UserTransactionImp();
userTransactionImp.setTransactionTimeout(10000);
return userTransactionImp;
}
@Bean(name = "atomikosTransactionManager")
public TransactionManager atomikosTransactionManager() {
UserTransactionManager userTransactionManager = new UserTransactionManager();
userTransactionManager.setForceShutdown(false);
return userTransactionManager;
}
/**
* 事务管理器配置
* @return
* @throws Throwable
*/
@Bean(name = "txManager")
@DependsOn({ "userTransaction", "atomikosTransactionManager" })
public PlatformTransactionManager transactionManager() throws Throwable {
UserTransaction userTransaction = userTransaction();
TransactionManager atomikosTransactionManager = atomikosTransactionManager();
return new JtaTransactionManager(userTransaction, atomikosTransactionManager);
}
实现原理:
项目中使用redis锁主要是依据 redis setnx命令的特性(SETNX:在指定的 key 不存在时,为 key 设置指定的值。 设置成功,返回 1 设置失败,返回 0 )
代码如下(示例):
@SpringBootTest
class RedislockApplicationTests {
@Autowired
private StringRedisTemplate redisTemplate;
/**
* 公共锁key
*/
private final String LOCK_KEY = "lock";
private final String VALUE = "value";
@Test
public void redisLockTest(){
//获取锁,设置有效期,防止程序异常没有释放锁导致死锁
try {
Boolean b = redisTemplate.opsForValue().setIfAbsent(LOCK_KEY, VALUE,Duration.ofSeconds(10));
if (b){
//获取锁成功
//执行业务逻辑
}else {
//获取锁失败
//快速失败,响应给客户端
}
}finally {
//释放锁
redisTemplate.delete(LOCK_KEY);
}
}
}
1.程序异常没有释放锁怎么办?
案例中是采用了给key设置有效期,当程序报错没有释放锁时key可以自动过期,但是这里有个弊端是key的过期时间怎么才能设置的更合理
2.程序执行时间超过了锁的释放时间会带来什么问题,以及解决方案?
a. 如果一个请求执行业务的时间比锁的有效期还要长,导致在业务执行过程中锁就失效了,此时另一个请求就会获取到锁,但前一个请求在业务执行完毕的时候,直接删除锁的话就会出现误删其它请求创建的锁的情况。
解决方案:
可以在创建锁的时候需要引入一个随机值并在删除锁的时候加以判断。
代码如下:
@SpringBootTest
class RedislockApplicationTests {
@Autowired
private StringRedisTemplate redisTemplate;
/**
* 公共锁key
*/
private final String LOCK_KEY = "lock";
@Test
public void redisLockTest(){
//获取锁,设置有效期,防止程序异常没有释放锁导致死锁
UUID uid = UUID.randomUUID();
String str = uid.toString();
try {
Boolean b = redisTemplate.opsForValue().setIfAbsent(LOCK_KEY, str,Duration.ofSeconds(10));
if (b){
//获取锁成功
//执行业务逻辑
}else {
//获取锁失败
//快速失败,响应给客户端
}
}finally {
//释放锁
if (str.equals(redisTemplate.opsForValue().get(LOCK_KEY))){
redisTemplate.delete(LOCK_KEY);
}
}
}
}
b. 多人获取到锁,并发问题还会存在
解决方案:
a. 手动实现watchdog模式,当客户端加锁成功后,可以启动一个定时任务,每隔10s(最好支持配置)来检测业务是否处理完成,检测的依据就是判断分布式锁的key是否还存在,如果存在,就进行续约。
b. 开启一个新线程while循环每个10s去查询key是否存在,如果存在续约。
3.假如1000个人同时发出请求,第一时间只会有一个请求获取到锁执行业务逻辑,获取锁失败的请求怎么办?
方式一 : 如案例中没有获取到锁的请求是通过快速失败的策略,没有获取到锁直接响应如:当前排队人数较多,请稍后再试诸如此类的话语.但是这样用户体验很差
方式二 : 没有获取到锁的请求采用轮询的方式处理,这样增加了CPU压力
实现原理:
a. redisson所有指令都通过lua脚本执行,保证了操作的原子性
b. redisson设置了watchdog看门狗,“看门狗”的逻辑保证了没有死锁发生
代码如下(示例):
@GetMapping("testLock")
public String testRedisLock() throws InterruptedException {
RLock test = redisInvokeService.lock("test");
try {
if (test.tryLock(2, 2, TimeUnit.SECONDS)) {
Object num = redisService.get("num1");
if (Integer.valueOf(num.toString()) > 0) {
redisService.decr("num1", 1L);
Thread.sleep(1000);
System.out.println("票数剩余:" + redisService.get("num1"));
}
}
}catch (IllegalMonitorStateException e){
System.out.println("尝试解锁锁,当前线程未按节点id锁定");
}finally {
test.unlock();
}
return "success";
}
针对上述redis锁,锁过期业务未执行结束的问题,它底层实现了watchdog模式,不需要手动判断。redisson定时器使用的是netty-common包中的HashedWheelTime来实现的。
实现原理:
代码示例如下:
/**
* @author KangZhiXing
* @date 2022/5/12
*/
@Configuration
@ConfigurationProperties(prefix = "zookeeper.curator")
@Data
public class ZookeeperConfig {
/**
* 集群地址
*/
private String ip;
/**
* 连接超时时间
*/
private Integer connectionTimeoutMs;
/**
* 会话超时时间
*/
private Integer sessionTimeOut;
/**
* 重试机制时间参数
*/
private Integer sleepMsBetweenRetry;
/**
* 重试机制重试次数
*/
private Integer maxRetries;
/**
* 命名空间(父节点名称)
*/
private String namespace;
/**
* - `session`重连策略
* - `RetryPolicy retry Policy = new RetryOneTime(3000);`
* - 说明:三秒后重连一次,只重连一次
* - `RetryPolicy retryPolicy = new RetryNTimes(3,3000);`
* - 说明:每三秒重连一次,重连三次
* - `RetryPolicy retryPolicy = new RetryUntilElapsed(1000,3000);`
* - 说明:每三秒重连一次,总等待时间超过个`10`秒后停止重连
* - `RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000,3)`
* - 说明:这个策略的重试间隔会越来越长
* - 公式:`baseSleepTImeMs * Math.max(1,random.nextInt(1 << (retryCount + 1)))`
* - `baseSleepTimeMs` = `1000` 例子中的值
* - `maxRetries` = `3` 例子中的值
*
* @return
* @throws Exception
*/
@Bean(value = "curatorClient")
@Conditional(CustomCondition.class)
public CuratorFramework curatorClient() throws Exception {
CuratorFramework client = CuratorFrameworkFactory.builder()
//连接地址 集群用,隔开
.connectString(ip)
.connectionTimeoutMs(connectionTimeoutMs)
//会话超时时间
.sessionTimeoutMs(sessionTimeOut)
//设置重试机制
.retryPolicy(new ExponentialBackoffRetry(sleepMsBetweenRetry, maxRetries))
//设置命名空间 在操作节点的时候,会以这个为父节点
.namespace(namespace)
.build();
client.start();
//注册监听器
ZookeeperWatches watches = new ZookeeperWatches(client);
watches.zNodeWatcher();
watches.zNodeChildrenWatcher();
return client;
}
}
@ApiOperation(value = "可重入锁",notes = "同一线程可重入锁")
@GetMapping("testLock")
public String testRedisLock() throws Exception {
InterProcessMutex lock = new InterProcessMutex(client, "/lock");
try {
if (lock.acquire(2,TimeUnit.SECONDS)) {
System.out.println(Thread.currentThread().getName()+"获取重入锁1");
}
// if (lock.acquire(1,TimeUnit.SECONDS)) {
//
// System.out.println(Thread.currentThread().getName()+"获取重入锁2");
// }
Object num = redisService.get("num");
if (Integer.valueOf(num.toString()) > 0) {
redisService.decr("num", 1L);
Thread.sleep(1000);
System.out.println("票数剩余:" + redisService.get("num"));
}
lock.release();
// lock.release();
} catch (IllegalMonitorStateException e) {
System.out.println(Thread.currentThread().getName() + "释放锁异常::" + e.getMessage());
}
return "success";
}
@ApiOperation(value = "不可重入锁",notes = "同一线程不可重入锁")
@GetMapping("testLock1")
public String testRedisLock1() throws Exception {
InterProcessSemaphoreMutex lock = new InterProcessSemaphoreMutex(client, "/lock1");
try {
lock.acquire();
System.out.println(Thread.currentThread().getName() + ":获取重入锁1");
//执行下面方法会阻塞
lock.acquire();
System.out.println(Thread.currentThread().getName() + ":获取重入锁2");
// if (lock.acquire(1,TimeUnit.SECONDS)) {
//
// System.out.println(Thread.currentThread().getName()+"获取重入锁2");
// }
Object num = redisService.get("num");
if (Integer.valueOf(num.toString()) > 0) {
redisService.decr("num", 1L);
Thread.sleep(1000);
System.out.println("票数剩余:" + redisService.get("num"));
}
lock.release();
lock.release();
} catch (IllegalMonitorStateException e) {
System.out.println(Thread.currentThread().getName() + "释放锁异常::" + e.getMessage());
}catch (Exception e){
System.out.println(Thread.currentThread().getName() + "异常::" + e.getMessage());
}
return "success";
}
个人实战中的一些解决方案,代码片段是项目中的部分截图。重要的是理解实现逻辑,由一生二。方案不是最优化的,后续还会补充!有问题还望多多指教。有更好的方案的小伙伴,欢迎私信或者评论交流,与分享!..