spring boot 2.x druid mybatis 数据库读写分离, 事务生效

【转摘请注明】【源码在github】

因项目需要,实现一个读写分离的场景,但是网上很多实现了之后,无法支持事务操作

这里的代码,在切换数据库之后能支持事务操作,但是记得一点:一次只能针对一个数据库进行操作

如果需要同时对多个数据源进行事务操作,请参考其他文章,我记得有一篇进行了详细介绍

这里我先不进行详细介绍了,今天还在加班,后面有时间了再整理,直接看代码

直接看代码 https://github.com/q258523454/spring-boot-mybatis-write-read

流程直接走:



CREATE TABLE `teacher_transaction` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `username` varchar(255) DEFAULT NULL,
  `password` varchar(11) DEFAULT NULL,
  `regTime` varchar(100) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=224 DEFAULT CHARSET=utf8

CREATE TABLE `student_transaction` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `username` varchar(255) DEFAULT NULL,
  `password` varchar(11) DEFAULT NULL,
  `regTime` varchar(100) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=275 DEFAULT CHARSET=utf8
package com.datasource.annotation;

import com.datasource.entity.DataSourceEnum;

import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;

/**
 * 数据源选择--自定义注解
 */
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
public @interface DataSourceAnnotation {

    DataSourceEnum value() default DataSourceEnum.MASTER;    // 默认主表master

}
package com.datasource.aop;

import com.datasource.annotation.DataSourceAnnotation;
import com.datasource.util.DataSourceContextHolder;
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.After;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.aspectj.lang.annotation.Pointcut;
import org.aspectj.lang.reflect.MethodSignature;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Component;

import javax.activation.DataContentHandler;
import java.lang.reflect.Method;

/**
 * AOP根据注解给上下文赋值
 */
@Aspect
@Order(1)    // 数据源的切换要在数据库事务之前, 设置AOP执行顺序(需要在事务之前,否则事务只发生在默认库中, 数值越小等级越高)
@Component
public class DataSourceAspect {

    private Logger log = LoggerFactory.getLogger(this.getClass());

    // 切点, 注意这里是在service层
    @Pointcut("execution(* com.service..*.*(..)))")
    public void aspect() {
    }

    @Before("aspect()")
    private void before(JoinPoint point) {
        Object target = point.getTarget();
        String method = point.getSignature().getName();
        Class classz = target.getClass();
        Class[] parameterTypes = ((MethodSignature) point.getSignature()).getMethod().getParameterTypes();
        try {
            Method m = classz.getMethod(method, parameterTypes);
            if (m != null && m.isAnnotationPresent(DataSourceAnnotation.class)) {
                DataSourceAnnotation data = m.getAnnotation(DataSourceAnnotation.class);
                DataSourceContextHolder.putDataSource(data.value().getName());
                log.info("-----------切换数据源, 上下文准备赋值-----:{}", data.value().getName());
                log.info("-----------切换数据源, 数据源上下文实际赋值-----:{}", DataSourceContextHolder.getCurrentDataSource());
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    // 切面结束, 重置线程变量
    @After("aspect()")
    public void after(JoinPoint joinPoint) {
        DataSourceContextHolder.removeCurrentDataSource();
        log.info("重置数据源: Restore DataSource to [{}] in Method [{}]", DataSourceContextHolder.getCurrentDataSource(), joinPoint.getSignature());
    }
}

 

 

package com.datasource.config;

import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.fastjson.JSON;
import com.datasource.router.DataSourceRouter;
import com.datasource.entity.DataSourceEnum;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.SqlSessionTemplate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.transaction.PlatformTransactionManager;

import javax.sql.DataSource;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;

/**
 * 数据源配置
 */
@Configuration
public class DataSourceConfig {

    private Logger log = LoggerFactory.getLogger(this.getClass());

    public final static String masterTransactionManager = "masterTransactionManager";

    public final static String slaveTransactionManager = "slaveTransactionManager";

    /***
     * 注意这里用的 Druid 连接池
     */
    @Bean(name = "dbMaster")
    @ConfigurationProperties(prefix = "spring.datasource.master")
    public DataSource dbMaster() {
        log.info("master数据源");
        return new DruidDataSource();
    }

    @Bean(name = "dbSlave")
    @ConfigurationProperties(prefix = "spring.datasource.slave")
    public DataSource dbSlave() {
        log.info("slave数据源");
        return new DruidDataSource();
    }


    /***
     * @Primary: 相同的bean中,优先使用用@Primary注解的bean.
     * @Qualifier:: 这个注解则指定某个bean有没有资格进行注入。
     */
    @Primary
    @Bean(name = "dataSourceRouter") // 对应Bean: DataSourceRouter
    public DataSource dataSourceRouter(@Qualifier("dbMaster") DataSource master, @Qualifier("dbSlave") DataSource slave) {
        DataSourceRouter dataSourceRouter = new DataSourceRouter();
        log.info(" ---------------------- 德鲁伊配置信息 BEGIN----------------------");
        DruidDataSource druidDataSourceMaster = (DruidDataSource) master;
        DruidDataSource druidDataSourceSlave = (DruidDataSource) slave;
        log.info("master: ");
        log.info("检测连接是否有效的sql: " + druidDataSourceMaster.getValidationQuery());
        log.info("最小空闲连接池数量: " + druidDataSourceMaster.getMinIdle());
        log.info("removeAbandoned功能: " + druidDataSourceMaster.removeAbandoned());
        log.info("超过时间限制时间(单位秒): " + druidDataSourceMaster.getRemoveAbandonedTimeout());
        log.info("slave: ");
        log.info("检测连接是否有效的sql: " + druidDataSourceSlave.getValidationQuery());
        log.info("最小空闲连接池数量: " + druidDataSourceSlave.getMinIdle());
        log.info("removeAbandoned功能: " + druidDataSourceSlave.removeAbandoned());
        log.info("超过时间限制时间(单位秒): " + druidDataSourceSlave.getRemoveAbandonedTimeout());
        log.info(" ---------------------- 德鲁伊配置信息 END----------------------");

        //配置多数据源
        Map map = new HashMap<>(5);
        map.put(DataSourceEnum.MASTER.getName(), master);    // key需要跟ThreadLocal中的值对应
        map.put(DataSourceEnum.SLAVE.getName(), slave);
        // master 作为默认数据源
        dataSourceRouter.setDefaultTargetDataSource(master);
        dataSourceRouter.setTargetDataSources(map);
        return dataSourceRouter;
    }

    // 注入动态数据源 DataSourceTransactionManager 用于事务管理(事务回滚只针对同一个数据源)
    @Bean(name = "transactionManager")
    public PlatformTransactionManager transactionManager(@Qualifier("dataSourceRouter") DataSource dataSource) {
        return new DataSourceTransactionManager(dataSource);
    }
}

 

package com.datasource.entity;

public enum DataSourceEnum {

	// 主表
	MASTER("master"),
	// 从表
	SLAVE("slave");

	private String name;

	private DataSourceEnum(String name) {
		this.name = name;
	}

	public String getName() {
		return name;
	}

	public void setName(String name) {
		this.name = name;
	}
}
package com.datasource.router;

import com.datasource.util.DataSourceContextHolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;

import javax.sql.DataSource;

/***
 * AbstractRoutingDataSource抽象类, 实现AOP动态切换的关键
 * 		1.AbstractRoutingDataSource中determineTargetDataSource()方法中获取数据源 
 * 			Object lookupKey = determineCurrentLookupKey();
 * 			DataSource dataSource = this.resolvedDataSources.get(lookupKey);
 * 			根据determineCurrentLookupKey()得到Datasource,并且此方法是抽象方法,应用可以实现
 *     2.resolvedDataSources 的值根据 targetDataSources 所得 afterPropertiesSet()方法[该方法在@Bean所在方法执行完成后执行]中:
 * 			Map.Entry entry : this.targetDataSources.entrySet()
 *     3.然后在xml中使用或者代码中@Bean 设置 dataSource 的defaultTargetDataSource(默认数据源)和 targetDataSources(多数据源)
 *     4.利用自定义注解,AOP拦截动态的设置ThreadLocal的值
 *     5.在DAO层与数据库建立连接时会根据ThreadLocal的key得到数据源
 */

// 在访问数据库前会调用该类的 determineCurrentLookupKey() 方法获取数据库实例的 key
public class DataSourceRouter extends AbstractRoutingDataSource {

    private Logger log = LoggerFactory.getLogger(this.getClass());

    @Override
    protected Object determineCurrentLookupKey() {
        log.info(" 当前数据源: " + DataSourceContextHolder.getCurrentDataSource());
        return DataSourceContextHolder.getCurrentDataSource();
    }


}
package com.datasource.util;

import com.datasource.entity.DataSourceEnum;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList;
import java.util.List;

/**
 * 动态数据源的上下文 threadlocal
 */
public class DataSourceContextHolder {
    private final static ThreadLocal local = new ThreadLocal<>();

    public static void putDataSource(String name) {
        local.set(name);
    }

    public static String getCurrentDataSource() {
        return local.get();
    }

    public static void removeCurrentDataSource() {
        local.remove();
    }

}

配置文件如下:

server:
  port: 8089

##mybatis
mybatis:
  mapper-locations: classpath:mapper/*.xml
  type-aliases-package: com.entity
  check-config-location: true
  configuration:
    log-impl: org.apache.ibatis.logging.stdout.StdOutImpl



spring:
  datasource: # 多数据源
    # type: com.alibaba.druid.pool.DruidDataSource
    # 主库
    master:
      type: com.alibaba.druid.pool.DruidDataSource
      url: jdbc:mysql://localhost:3306/test1?useSSL=false&useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull&serverTimezone=GMT%2B8
      username: root
      password: 123456
      driver-class-name: com.mysql.cj.jdbc.Driver
      # 配置初始值
      initial-size: "${global.druid.db.initial-size}"
      min-idle: "${global.druid.db.min-idle}"
      max-active: "${global.druid.db.max-active}"
      # 获取连接等待超时时间
      max-wait: "${global.druid.db.max-wait}"
      # 监控关闭空闲连接时间间隔
      time-between-eviction-runs-millis: "${global.druid.db.time-between-eviction-runs-millis}"
      # 每个连接池最小的生命周期
      min-evictable-idle-time-millis: "${global.druid.db.min-evictable-idle-time-millis}"
      validation-query: "${global.druid.db.validation-query}"
      test-while-idle: "${global.druid.db.test-while-idle}"
      test-on-borrow: "${global.druid.db.test-on-borrow}"
      test-on-return: "${global.druid.db.test-on-return}"
      remove-abandoned: "${global.druid.db.remove-abandoned}"
      remove-abandoned-timeout: "${global.druid.db.remove-abandoned-timeout}"
      log-abandoned: "${global.druid.db.log-abandoned}"
    # 从库
    slave:
      type: com.alibaba.druid.pool.DruidDataSource
      # 注意不是jdbcUrl, Durid是url
      url: jdbc:mysql://localhost:3306/test2?useSSL=false&useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull&serverTimezone=GMT%2B8
      username: root
      password: 123456
      driver-class-name: com.mysql.cj.jdbc.Driver
      # 配置初始值
      initial-size: "${global.druid.db.initial-size}"
      min-idle: "${global.druid.db.min-idle}"
      max-active: "${global.druid.db.max-active}"
      # 获取连接等待超时时间
      max-wait: "${global.druid.db.max-wait}"
      # 监控关闭空闲连接时间间隔
      time-between-eviction-runs-millis: "${global.druid.db.time-between-eviction-runs-millis}"
      # 每个连接池最小的生命周期
      min-evictable-idle-time-millis: "${global.druid.db.min-evictable-idle-time-millis}"
      validation-query: "${global.druid.db.validation-query}"
      test-while-idle: "${global.druid.db.test-while-idle}"
      test-on-borrow: "${global.druid.db.test-on-borrow}"
      test-on-return: "${global.druid.db.test-on-return}"
      remove-abandoned: "${global.druid.db.remove-abandoned}"
      remove-abandoned-timeout: "${global.druid.db.remove-abandoned-timeout}"
      log-abandoned: "${global.druid.db.log-abandoned}"
# page 分页插件
pagehelper:
  helper-dialect: sqlServer # 支持Oracle,Mysql,MariaDB,SQLite,Hsqldb,PostgreSQL等等
  reasonable: true # 分页合理化参数,默认值为false, true会合理化( pageNum<=0 和 pageNum>pages)
  support-methods-arguments: true
  params: count=countSql
  auto-runtime-dialect: true # 动态数据源 autoRuntimeDialect:true,确保使用不同数据源时,会使用匹配的分页进行查询
  close-conn: false # 默认true:每次获取数据源后自动关闭, 这里设置成false, 不关闭





 

你可能感兴趣的:(spring,boot)