springboot 中对于数据库mysql的读写分离,依赖于与mysql 的主从复制;即我们将写入的数据放到主库中,然后从库通过主库的binlog 完成数据的复制,当需要读取数据时,就从从库读取;Mysql架构篇–Mysql(M-S) 主从同步;
提示:以下是本篇文章正文内容,下面案例可供参考
MySQL读写分离是一种数据库架构设计策略,它将数据库的读操作和写操作分别路由到不同的数据库实例上,以提高系统的性能和可扩展性。读写分离的基本原理是将用户的读请求(如SELECT语句)发送到读库(从库),而写请求(如INSERT、UPDATE、DELETE语句)发送到写库(主库)。这样可以充分利用多个数据库实例的资源,并且减轻主库的压力。
实现MySQL读写分离的方式主要有以下两种:
代理中间件实现:使用代理中间件(如MySQL Proxy、MaxScale、Mycat)在应用程序和数据库之间充当一个代理,将读请求和写请求分别转发到不同的数据库实例上。代理中间件会根据事先配置好的规则,路由请求到正确的数据库实例。这种方式对于应用程序来说是透明的,应用程序无需修改代码。
应用程序层实现:代码层面实现读写分离也是一种常见方式,应用程序通过编码实现对读库和写库的明确访问。可以通过配置多个数据源,然后在应用程序中根据需求选择合适的数据源来进行读写操作。这种方式需要在应用程序中进行显式的切换数据库连接,通常会使用连接池来管理多个数据库连接。
实现MySQL读写分离可以提高系统的并发处理能力和可用性,缓解了单个数据库的读写压力,并且提高了系统的可扩展性和容错性。但是需要注意的是,读写分离可能会导致数据的一致性问题,因为从库的数据不一定与主库完全同步。因此,在应用程序中需要合理处理数据一致性的问题,如采用主从同步或者其他数据同步策略来确保数据的一致性。
本例 通过 应用程序层实现 mysql 的读写分离;
思路:通过配置多个数据源,然后定义一个标签,通过aop进行切面拦截,当方法上标记了改便签则从从库读取数据,否则从主库操作和获取数据;
<dependencies>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.projectlombokgroupId>
<artifactId>lombokartifactId>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-testartifactId>
<scope>testscope>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-openfeignartifactId>
<optional>trueoptional>
<version>3.1.6version>
dependency>
<dependency>
<groupId>com.baomidougroupId>
<artifactId>mybatis-plus-boot-starterartifactId>
<version>3.5.2version>
dependency>
<dependency>
<groupId>mysqlgroupId>
<artifactId>mysql-connector-javaartifactId>
<version>8.0.21version>
dependency>
<dependency>
<groupId>net.sf.jsqlparsergroupId>
<artifactId>jsqlparserartifactId>
<version>0.8.0version>
dependency>
<dependency>
<groupId>com.alibabagroupId>
<artifactId>druid-spring-boot-starterartifactId>
<version>1.1.9version>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-aopartifactId>
dependency>
<dependency>
<groupId>org.aspectjgroupId>
<artifactId>aspectjtoolsartifactId>
<version>1.8.13version>
dependency>
<dependency>
<groupId>io.swaggergroupId>
<artifactId>swagger-annotationsartifactId>
<version>1.6.2version>
dependency>
dependencies>
package com.example.mybatisreadwrite.config;
import com.baomidou.mybatisplus.annotation.DbType;
import com.baomidou.mybatisplus.core.MybatisConfiguration;
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
import com.baomidou.mybatisplus.extension.plugins.inner.PaginationInnerInterceptor;
import com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.ibatis.plugin.Interceptor;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.ibatis.type.JdbcType;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import javax.sql.DataSource;
import java.util.HashMap;
import java.util.Map;
@Configuration
// 开启事务支持后,然后在访问数据库的Service方法上添加注解 @Transactional 便可
@EnableTransactionManagement
// 配置xml 扫描文件位置
@MapperScan(basePackages = {"com.example.mybatisreadwrite.mapper"}, sqlSessionFactoryRef = "sqlSessionFactory")
public class DataSourceConfig {
@Autowired
private HikariBaseConfig hikariBaseConfig;
/**
* 写数据源
* Primary 标志这个 Bean 如果在多个同类 Bean 候选时,该 Bean 优先被考虑。
* 多数据源配置的时候注意,必须要有一个主数据源,用 @Primary 标志该 Bean
*/
@Primary
@Bean
@ConfigurationProperties(prefix = "spring.datasource.write")
public DataSource writeDataSource() {
HikariDataSource hikariDataSource = DataSourceBuilder.create().type(HikariDataSource.class).build();
return hikariBaseConfig.getDataSource(hikariDataSource);
//
// DataSource ds = DataSourceBuilder.create().type(DruidDataSource.class).build();
// return ds;
}
@Bean
@ConfigurationProperties(prefix = "spring.datasource.read1")
public DataSource readDataSource_1() {
HikariDataSource hikariDataSource = DataSourceBuilder.create().type(HikariDataSource.class).build();
return hikariBaseConfig.getDataSource(hikariDataSource);
}
/**
* 设置数据源路由,通过该类中的determineCurrentLookupKey决定使用哪个数据源
*/
@Bean
public DataSource routingDataSource() {
RoutingDataSource proxy = new RoutingDataSource();
Map<Object, Object> targetDataSources = new HashMap<>();
targetDataSources.put(DbContextHolder.WRITE, writeDataSource());
targetDataSources.put(DbContextHolder.READ + "1", readDataSource_1());
// 设置默认数据源
proxy.setDefaultTargetDataSource(writeDataSource());
// 设置可以选择的数据源
proxy.setTargetDataSources(targetDataSources);
return proxy;
}
/**
* 由于Spring容器中现在有多个数据源,所以我们需要为事务管理器和MyBatis手动指定一个明确的数据源。
*/
@Bean
public SqlSessionFactory sqlSessionFactory() throws Exception {
MybatisSqlSessionFactoryBean sqlSessionFactory = new MybatisSqlSessionFactoryBean();
sqlSessionFactory.setDataSource(routingDataSource());
MybatisConfiguration configuration = new MybatisConfiguration();
configuration.setJdbcTypeForNull(JdbcType.NULL);
configuration.setMapUnderscoreToCamelCase(true);
configuration.setCacheEnabled(false);
sqlSessionFactory.setConfiguration(configuration);
// 分页
Interceptor[] plugins = {mybatisPlusInterceptor()};
sqlSessionFactory.setPlugins(plugins);
return sqlSessionFactory.getObject();
}
@Bean
public DataSourceTransactionManager transactionManager() {
return new DataSourceTransactionManager(routingDataSource());
}
public MybatisPlusInterceptor mybatisPlusInterceptor() {
MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
interceptor.addInnerInterceptor(new PaginationInnerInterceptor(DbType.MYSQL));
return interceptor;
}
}
package com.example.mybatisreadwrite.config;
import com.zaxxer.hikari.HikariDataSource;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.springframework.context.annotation.Configuration;
@Configuration
public class HikariBaseConfig {
@Value("${spring.datasource.hikari.pool-name}")
private String poolName;
@Value("${spring.datasource.hikari.maximum-pool-size}")
private Integer maximumPoolSize;
@Value("${spring.datasource.hikari.connection-timeout}")
private Long connectionTimeout;
@Value("${spring.datasource.hikari.minimum-idle}")
private Integer minimumIdle;
@Value("${spring.datasource.hikari.max-lifetime}")
private Long maxLifetime;
@Value("${spring.datasource.hikari.connection-test-query}")
private String connectionTestQuery;
public HikariDataSource getDataSource(String driverClassName, String url, String username, String password) {
HikariDataSource hikariDataSource = DataSourceBuilder.create().type(HikariDataSource.class).driverClassName(driverClassName).username(username).url(url).password(password).build();
hikariDataSource.setConnectionTestQuery(connectionTestQuery);
hikariDataSource.setMaxLifetime(maxLifetime);
hikariDataSource.setMinimumIdle(minimumIdle);
hikariDataSource.setConnectionTimeout(connectionTimeout);
hikariDataSource.setPoolName(poolName);
hikariDataSource.setMaximumPoolSize(maximumPoolSize);
return hikariDataSource;
}
public HikariDataSource getDataSource(HikariDataSource hikariDataSource) {
hikariDataSource.setConnectionTestQuery(connectionTestQuery);
hikariDataSource.setMaxLifetime(maxLifetime);
hikariDataSource.setMinimumIdle(minimumIdle);
hikariDataSource.setConnectionTimeout(connectionTimeout);
hikariDataSource.setPoolName(poolName);
hikariDataSource.setMaximumPoolSize(maximumPoolSize);
return hikariDataSource;
}
}
package com.example.mybatisreadwrite.config;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;
public class RoutingDataSource extends AbstractRoutingDataSource {
@Value("${mysql.datasource.readNum:1}")
private int num;
@Override
protected Object determineCurrentLookupKey() {
String typeKey = DbContextHolder.getDbType();
if(typeKey.equals(DbContextHolder.WRITE)) {
return typeKey;
}
//使用随机数决定使用哪个读库
//在1-N之间生成整型随机数
int random = (int) (Math.random() * 1) + num;
return DbContextHolder.READ + random;
}
}
package com.example.mybatisreadwrite.config;
public class DbContextHolder {
public static final String WRITE = "write";
public static final String READ = "read";
private static ThreadLocal<String> contextHolder = new ThreadLocal<>();
public static void setDbType(String dbType) {
if(dbType == null) {
throw new NullPointerException();
}
contextHolder.set(dbType);
}
public static String getDbType() {
return contextHolder.get() == null ? WRITE : contextHolder.get();
}
public static void clearDbType() {
contextHolder.remove();
}
}
package com.example.mybatisreadwrite.aop;
import com.example.mybatisreadwrite.config.DbContextHolder;
import lombok.extern.slf4j.Slf4j;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.springframework.core.Ordered;
import org.springframework.stereotype.Component;
@Aspect
@Component
@Slf4j
public class DataSourceAop implements Ordered {
@Around("@annotation(readOnly)")
public Object setRead(ProceedingJoinPoint joinPoint, ReadOnly readOnly) throws Throwable {
try {
DbContextHolder.setDbType(DbContextHolder.READ);
return joinPoint.proceed();
} finally {
//清除DbType一方面为了避免内存泄漏,更重要的是避免对后续在本线程上执行的操作产生影响
DbContextHolder.clearDbType();
log.info("清除threadLocal");
}
}
@Override
public int getOrder() {
return 0;
}
}
package com.example.mybatisreadwrite.aop;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Target({ElementType.METHOD, ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
public @interface ReadOnly {
}
本文通过aop切面,在访问方法的时候 ,动态进行数据源的切换,从而实现mysql 的读写分离;