数据库读写分离-springboot事务配置篇

spring事务配置见:https://blog.csdn.net/andyzhaojianhui/article/details/74357100?locationNum=9&fps=1

根据这篇文章做了一些修改以适用于springboot项目,可能还有一些未知问题,目前使用中尚未发现,欢迎指正,不胜感激

注意:我们约定

配置文件中的写库的连接信息spring.datasource开头,例如spring.datasource.url=

spring.read.datasource.name这项来确定有多少个读库,多个读库以英文逗号分隔

例如spring.read.datasource.name=read1,read2

读库连接信息以spring. + 上面的name对应的读库名 + .datasource开头,

例如

spring.read1.datasource.url=

spring.read2.datasource.url=

1.DataSourceConfiguration,实例化数据源,事务管理等

import com.github.pagehelper.PageInterceptor;
import org.apache.ibatis.plugin.Interceptor;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.tomcat.jdbc.pool.PoolProperties;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Profile;
import org.springframework.core.env.Environment;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.interceptor.TransactionInterceptor;
import org.springframework.util.StringUtils;

import javax.sql.DataSource;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

/**
 * 初始化数据源、事务管理器等
 */
@Configuration
public class DataSourceConfiguration {

    @Autowired
    private Environment env;

    @Bean
    public DataSource dataSource() {
        ReadWriteDataSource dataSource = new ReadWriteDataSource();
        dataSource.setWriteDataSource(writeDataSource());
        dataSource.setReadDataSourceMap(readDataSourceMap());
        return dataSource;
    }

    @Autowired
    private TransactionInterceptor txAdvice;

    @Bean
    public ReadWriteDataSourceProcessor readWriteDataSourceTransactionProcessor() {
        ReadWriteDataSourceProcessor processor = new ReadWriteDataSourceProcessor();
        processor.setForceChoiceReadWhenWrite(true);
        processor.postProcessAfterInitialization(txAdvice.getTransactionAttributeSource(), null);
        return processor;
    }

	//初始化写数据源
    public DataSource writeDataSource() {
        PoolProperties properties = new PoolProperties();
        properties.setUrl(env.getProperty("spring.datasource.url"));
        properties.setUsername(env.getProperty("spring.datasource.username"));
        properties.setPassword(env.getProperty("spring.datasource.password"));
        properties.setDriverClassName(env.getProperty("spring.datasource.jdbc.driver"));
        properties.setInitialSize(Integer.valueOf(env.getProperty("spring.datasource.tomcat.initial-size")));
        properties.setMinIdle(Integer.valueOf(env.getProperty("spring.datasource.tomcat.min-idle")));
        properties.setMaxActive(Integer.valueOf(env.getProperty("spring.datasource.tomcat.max-active")));
        properties.setMaxIdle(Integer.valueOf(env.getProperty("spring.datasource.tomcat.max-idle")));
        properties.setMaxWait(Integer.valueOf(env.getProperty("spring.datasource.tomcat.max-wait")));
        properties.setValidationQuery(env.getProperty("spring.datasource.tomcat.validation-query"));
        properties.setTestWhileIdle(Boolean.valueOf(env.getProperty("spring.datasource.tomcat.test-while-idle")));
        properties.setTimeBetweenEvictionRunsMillis(Integer.valueOf(env.getProperty("spring.datasource.tomcat.time-between-eviction-runs-millis")));
        org.apache.tomcat.jdbc.pool.DataSource dataSource = new org.apache.tomcat.jdbc.pool.DataSource(properties);
        return dataSource;
    }

	
	//初始化读数据源,这里可以看出为什么上面要进行那样的约定
    public Map readDataSourceMap() {
        String readCount = env.getProperty("spring.read.datasource.name");
        if (!StringUtils.isEmpty(readCount)) {
            String[] split = readCount.split(",");
            if (split.length > 0) {
                Map dMap = new HashMap<>(split.length);
                for (String s : split) {
                    PoolProperties properties = new PoolProperties();
                    properties.setDriverClassName(env.getProperty("spring.datasource.jdbc.driver"));
                    properties.setInitialSize(Integer.valueOf(env.getProperty("spring.datasource.tomcat.initial-size")));
                    properties.setMinIdle(Integer.valueOf(env.getProperty("spring.datasource.tomcat.min-idle")));
                    properties.setMaxActive(Integer.valueOf(env.getProperty("spring.datasource.tomcat.max-active")));
                    properties.setMaxWait(Integer.valueOf(env.getProperty("spring.datasource.tomcat.max-wait")));
                    properties.setValidationQuery(env.getProperty("spring.datasource.tomcat.validation-query"));
                    properties.setTestWhileIdle(Boolean.valueOf(env.getProperty("spring.datasource.tomcat.test-while-idle")));
                    properties.setMaxIdle(Integer.valueOf(env.getProperty("spring.datasource.tomcat.max-idle")));
                    properties.setTimeBetweenEvictionRunsMillis(Integer.valueOf(env.getProperty("spring.datasource.tomcat.time-between-eviction-runs-millis")));
                    properties.setUrl(env.getProperty("spring." + s + ".datasource.url"));
                    properties.setUsername(env.getProperty("spring." + s + ".datasource.username"));
                    properties.setPassword(env.getProperty("spring." + s + ".datasource.password"));
                    dMap.put(s, new org.apache.tomcat.jdbc.pool.DataSource(properties));
                }
                return dMap;
            }
        }
        return null;
    }


    @Bean
    public PlatformTransactionManager transactionManager() {
        return new DataSourceTransactionManager(dataSource());
    }

    @Bean
    public SqlSessionFactory sqlSessionFactoryBean() throws Exception {
        SqlSessionFactoryBean sqlSessionFactoryBean = new SqlSessionFactoryBean();
        sqlSessionFactoryBean.setDataSource(dataSource());
        PathMatchingResourcePatternResolver resolver = new PathMatchingResourcePatternResolver();
        sqlSessionFactoryBean.setMapperLocations(resolver.getResources(env.getProperty("mybatis.mapper-locations")));
        //mybatis的分页插件
        sqlSessionFactoryBean.setPlugins(mybatisPlugins());
        //自定义一些配置
        sqlSessionFactoryBean.setConfiguration(myConfiguration());
        return sqlSessionFactoryBean.getObject();
    }

    private Interceptor[] mybatisPlugins() {
        PageInterceptor interceptor = new PageInterceptor();
        Properties pageHelperProps = new Properties();
        pageHelperProps.setProperty("helperDialect", "mysql");
        pageHelperProps.setProperty("offsetAsPageNum", "true");
        pageHelperProps.setProperty("pageSizeZero", "true");
        pageHelperProps.setProperty("rowBoundsWithCount", "true");
        interceptor.setProperties(pageHelperProps);

        Interceptor[] plugins = {interceptor};
        return plugins;
    }

    private org.apache.ibatis.session.Configuration myConfiguration() {
        org.apache.ibatis.session.Configuration conf = new org.apache.ibatis.session.Configuration();
        //是否启用 数据中 a_column 自动映射 到 java类中驼峰命名的属性。[默认:false]
        conf.setMapUnderscoreToCamelCase(true);
        return conf;
    }
}

2.ReadWriteDataSource

借用本文开头的文章链接中的ReadWriteDataSource类

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.jdbc.datasource.AbstractDataSource;
import org.springframework.util.CollectionUtils;

import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * 读/写动态选择数据库实现
 * 目前实现功能
 * 一写库多读库选择功能,请参考
 * 默认按顺序轮询使用读库
 * 默认选择写库
 * 已实现:一写多读、当写时默认读操作到写库、当写时强制读操作到读库
 * 读库负载均衡、读库故障转移
 */
public class ReadWriteDataSource extends AbstractDataSource implements InitializingBean {

    private static final Logger log = LoggerFactory.getLogger(ReadWriteDataSource.class);

    private DataSource writeDataSource;
    private Map readDataSourceMap;

    private String[] readDataSourceNames;
    private DataSource[] readDataSources;
    private int readDataSourceCount;

    private AtomicInteger counter = new AtomicInteger(1);

    /**
     * 设置读库(name, DataSource)
     */
    public void setReadDataSourceMap(Map dMap) {
        this.readDataSourceMap = dMap;
    }

    /**
     * 配置写库
     */
    public void setWriteDataSource(DataSource dataSource) {
        this.writeDataSource = dataSource;
    }

    private DataSource determineDataSource() {
        if (ReadWriteDataSourceDecision.isChoiceWrite()) {
            return writeDataSource;
        }

        if (ReadWriteDataSourceDecision.isChoiceNone()) {
            return writeDataSource;
        }
        return determineReadDataSource();
    }

    private DataSource determineReadDataSource() {
        //按照顺序选择读库
        //算法改进
        int index = counter.incrementAndGet() % readDataSourceCount;
        if (index < 0) {
            index = -index;
        }
        return readDataSources[index];
    }

    @Override
    public Connection getConnection() throws SQLException {
        return determineDataSource().getConnection();
    }

    @Override
    public Connection getConnection(String username, String password) throws SQLException {
        return determineDataSource().getConnection(username, password);
    }

    @Override
    public void afterPropertiesSet() throws Exception {
        if (writeDataSource == null) {
            throw new IllegalArgumentException("property 'writeDataSource' is required");
        }
        if (CollectionUtils.isEmpty(readDataSourceMap)) {
            throw new IllegalArgumentException("property 'readDataSourceMap' is required");
        }
        readDataSourceCount = readDataSourceMap.size();

        readDataSources = new DataSource[readDataSourceCount];
        readDataSourceNames = new String[readDataSourceCount];

        int i = 0;
        for (Entry e : readDataSourceMap.entrySet()) {
            readDataSources[i] = e.getValue();
            readDataSourceNames[i] = e.getKey();
            i++;
        }

    }
}

3.ReadWriteDataSourceDecision

借用本文开头的文章链接中的ReadWriteDataSourceDecision类

import org.springframework.context.annotation.Configuration;

/**
 * 读/写动态数据库 决策者
 * 根据DataSourceType是write/read 来决定是使用读/写数据库
 * 通过ThreadLocal绑定实现选择功能
 */
@Configuration
public class ReadWriteDataSourceDecision {

    public enum DataSourceType {
        write, read;
    }

    private static final ThreadLocal holder = new ThreadLocal<>();

    public static void markWrite() {
        holder.set(DataSourceType.write);
    }

    public static void markRead() {
        holder.set(DataSourceType.read);
    }

    public static void reset() {
        holder.set(null);
    }

    public static boolean isChoiceNone() {
        return null == holder.get();
    }

    public static boolean isChoiceWrite() {
        return DataSourceType.write == holder.get();
    }

    public static boolean isChoiceRead() {
        return DataSourceType.read == holder.get();
    }

}

4.ReadWriteDataSourceProcessor

借用本文开头的文章链接中的ReadWriteDataSourceProcessor类

import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Pointcut;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.config.BeanPostProcessor;
import org.springframework.core.NestedRuntimeException;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.interceptor.NameMatchTransactionAttributeSource;
import org.springframework.transaction.interceptor.RuleBasedTransactionAttribute;
import org.springframework.transaction.interceptor.TransactionAttribute;
import org.springframework.util.PatternMatchUtils;
import org.springframework.util.ReflectionUtils;

import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;

/**
 * 此类实现了两个职责(为了减少类的数量将两个功能合并到一起了):
 * 读/写动态数据库选择处理器
 * 通过AOP切面实现读/写选择
 * 

* ★★读/写动态数据库选择处理器★★ * 1、首先读取事务属性配置 *

* 2、对于所有读方法设置 read-only="true" 表示读取操作(以此来判断是选择读还是写库),其他操作都是走写库 * 如 *

* 3、 forceChoiceReadOnWrite用于确定在如果目前是写(即开启了事务),下一步如果是读, * 是直接参与到写库进行读,还是强制从读库读
* forceChoiceReadOnWrite:true 表示目前是写,下一步如果是读,强制参与到写事务(即从写库读) * 这样可以避免写的时候从读库读不到数据 *

* 通过设置事务传播行为:SUPPORTS实现 *

* forceChoiceReadOnWrite:false 表示不管当前事务是写/读,都强制从读库获取数据 * 通过设置事务传播行为:NOT_SUPPORTS实现(连接是尽快释放) * 『此处借助了 NOT_SUPPORTS会挂起之前的事务进行操作 然后再恢复之前事务完成的』 * 4、配置方式 * * * *

* 5、目前只适用于情况 * 支持@Transactional注解事务 *

* ★★通过AOP切面实现读/写库选择★★ *

* 1、首先将当前方法 与 根据之前【读/写动态数据库选择处理器】 提取的读库方法 进行匹配 *

* 2、如果匹配,说明是读取数据: * 2.1、如果forceChoiceReadOnWrite:true,即强制走读库 * 2.2、如果之前是写操作且forceChoiceReadOnWrite:false,将从写库进行读取 * 2.3、否则,到读库进行读取数据 *

* 3、如果不匹配,说明默认将使用写库进行操作 *

* 4、配置方式 * * * * 4.1、此处order = Integer.MIN_VALUE 即最高的优先级(请参考http://jinnianshilongnian.iteye.com/blog/1423489) * 4.2、切入点:txPointcut 和 实施事务的切入点一样 * 4.3、determineReadOrWriteDB方法用于决策是走读/写库的,请参考 * @see cn.javass.common.datasource.ReadWriteDataSourceDecision * @see cn.javass.common.datasource.ReadWriteDataSource */ @Aspect public class ReadWriteDataSourceProcessor implements BeanPostProcessor { private static final Logger log = LoggerFactory.getLogger(ReadWriteDataSourceProcessor.class); @Pointcut(TxAdviceInterceptor.AOP_POINTCUT_EXPRESSION) public void txPointcut() { } private boolean forceChoiceReadWhenWrite = false; private Map readMethodMap = new HashMap<>(); /** * 当之前操作是写的时候,是否强制从从库读 默认(false) 当之前操作是写,默认强制从写库读 */ public void setForceChoiceReadWhenWrite(boolean forceChoiceReadWhenWrite) { this.forceChoiceReadWhenWrite = forceChoiceReadWhenWrite; } @Override public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof NameMatchTransactionAttributeSource)) { return bean; } try { NameMatchTransactionAttributeSource transactionAttributeSource = (NameMatchTransactionAttributeSource) bean; Field nameMapField = ReflectionUtils.findField(NameMatchTransactionAttributeSource.class, "nameMap"); nameMapField.setAccessible(true); @SuppressWarnings("unchecked") Map nameMap = (Map) nameMapField.get(transactionAttributeSource); for (Entry entry : nameMap.entrySet()) { RuleBasedTransactionAttribute attr = (RuleBasedTransactionAttribute) entry.getValue(); // 仅对read-only的处理 if (!attr.isReadOnly()) { continue; } String methodName = entry.getKey(); Boolean isForceChoiceRead = Boolean.FALSE; if (forceChoiceReadWhenWrite) { // 不管之前操作是写,默认强制从读库读 (设置为NOT_SUPPORTED即可) // NOT_SUPPORTED会挂起之前的事务 attr.setPropagationBehavior(Propagation.NOT_SUPPORTED .value()); isForceChoiceRead = Boolean.TRUE; } else { // 否则 设置为SUPPORTS(这样可以参与到写事务) attr.setPropagationBehavior(Propagation.SUPPORTS.value()); } readMethodMap.put(methodName, isForceChoiceRead); } } catch (Exception e) { throw new ReadWriteDataSourceTransactionException( "process read/write transaction error", e); } return bean; } @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { return bean; } private class ReadWriteDataSourceTransactionException extends NestedRuntimeException { private static final long serialVersionUID = 7537763615924915804L; public ReadWriteDataSourceTransactionException(String message, Throwable cause) { super(message, cause); } } /** * 确定选择哪个数据源(读库还是写库) * * @param pjp * @return * @throws Throwable */ @Around("txPointcut()") public Object determineReadOrWriteDB(ProceedingJoinPoint pjp) throws Throwable { if (isChoiceReadDB(pjp.getSignature().getName())) { ReadWriteDataSourceDecision.markRead(); } else { ReadWriteDataSourceDecision.markWrite(); } try { return pjp.proceed(); } finally { ReadWriteDataSourceDecision.reset(); } } /** * 根据方法名确定是否选择 读库 * * @param methodName 方法名 * @return */ private boolean isChoiceReadDB(String methodName) { String bestNameMatch = null; for (String mappedName : this.readMethodMap.keySet()) { if (isMatch(methodName, mappedName)) { bestNameMatch = mappedName; break; } } Boolean isForceChoiceRead = readMethodMap.get(bestNameMatch); // 表示强制选择 读 库 if (Objects.equals(isForceChoiceRead, Boolean.TRUE)) { return true; } // 如果之前选择了写库 现在还选择 写库 if (ReadWriteDataSourceDecision.isChoiceWrite()) { return false; } // 表示应该选择读库 if (isForceChoiceRead != null) { return true; } // 默认选择 写库 return false; } protected boolean isMatch(String methodName, String mappedName) { return PatternMatchUtils.simpleMatch(mappedName, methodName); } }

5.TxAdviceInterceptor

import org.springframework.aop.Advisor;
import org.springframework.aop.aspectj.AspectJExpressionPointcut;
import org.springframework.aop.support.DefaultPointcutAdvisor;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.TransactionDefinition;
import org.springframework.transaction.interceptor.*;

import java.util.Collections;
import java.util.HashMap;
import java.util.Map;

/**
 * 用于配置事务处理
 * requiredTx对应的是写事务,走写库
 * readOnlyTx对应的是读事务,走读库
 * 一个事务既有写又有读,走写库
 */
@Configuration
public class TxAdviceInterceptor {
    private static final int TX_METHOD_TIMEOUT = 3000;

    public static final String AOP_POINTCUT_EXPRESSION = "execution (* com.iclassmate.abel.service.*.*(..))";

    @Autowired
    private PlatformTransactionManager transactionManager;

    @Bean
    public TransactionInterceptor txAdvice() {
        NameMatchTransactionAttributeSource source = new NameMatchTransactionAttributeSource();
        /*只读事务,不做更新操作*/
        RuleBasedTransactionAttribute readOnlyTx = new RuleBasedTransactionAttribute();
        readOnlyTx.setReadOnly(true);
        readOnlyTx.setPropagationBehavior(TransactionDefinition.PROPAGATION_NOT_SUPPORTED);
        /*当前存在事务就使用当前事务,当前不存在事务就创建一个新的事务*/
        RuleBasedTransactionAttribute requiredTx = new RuleBasedTransactionAttribute();
        requiredTx.setRollbackRules(
                Collections.singletonList(new RollbackRuleAttribute(Exception.class)));
        requiredTx.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRED);
        requiredTx.setTimeout(TX_METHOD_TIMEOUT);
        Map txMap = new HashMap<>();
        txMap.put("save*", requiredTx);
        txMap.put("add*", requiredTx);
        txMap.put("create*", requiredTx);
        txMap.put("insert*", requiredTx);
        txMap.put("update*", requiredTx);
        txMap.put("delete*", requiredTx);
        txMap.put("merge*", requiredTx);
        txMap.put("remove*", requiredTx);
        txMap.put("put*", requiredTx);
        txMap.put("drop*", requiredTx);
        txMap.put("sync*",requiredTx);

        txMap.put("get*", readOnlyTx);
        txMap.put("query*", readOnlyTx);
        txMap.put("count*", readOnlyTx);
        txMap.put("exist*", readOnlyTx);
        txMap.put("find*", readOnlyTx);
        txMap.put("list*", readOnlyTx);
        txMap.put("translate*", readOnlyTx);
        txMap.put("select*", readOnlyTx);

        txMap.put("*", requiredTx);
        source.setNameMap(txMap);
        TransactionInterceptor txAdvice = new TransactionInterceptor(transactionManager, source);
        return txAdvice;
    }

    @Bean
    public Advisor txAdviceAdvisor() {
        AspectJExpressionPointcut pointcut = new AspectJExpressionPointcut();
        pointcut.setExpression(AOP_POINTCUT_EXPRESSION);
        return new DefaultPointcutAdvisor(pointcut, txAdvice());
    }
}


你可能感兴趣的:(工作纪要,学习记录)