目录
1:ShardingSphere实现Spring的读写分离分库分表
2:什么是ShardingSphere
3:ShardingJDBC实现代码案例
3.1:pom准备
3.2:读写分离
3.3:垂直分库分表
3.4:水平单表不分库分片
3.5:水平单表分库分片
3.6:水平多表分库分表
4:ShardingProxy实现代码案例
4.1:下载解压配置ShardingProxy
4.2:配置conf下边的server.yaml
4.3:配置垂直拆分conf下的config-sharding.yaml
4.4:Proxy代码测试
参见ShadingSphere官网
Apache ShardingSphere
本项目基于jdk17、springboot3.1.2、sharding5.3.2、mysql8.0.15
版本问题很严重,在测试过程中存在很多冲突 ,一定要找到ShadingSphere的具体版本来进行测试
Apache ShardingSphere 是一款分布式的数据库生态系统, 可以将任意数据库转换为分布式数据库,并通过数据分片、弹性伸缩、加密等能力对原有数据库进行增强。
可以实现读写分离和分库分表
Apache ShardingSphere 由 ShardingSphere-JDBC 和 ShardingSphere-Proxy 这 2 款既能够独立部署,又支持混合部署配合使用的产品组成。 它们均提供标准化的基于数据库作为存储节点的增量功能,可适用于如 Java 同构、异构语言、云原生等各种多样化的应用场景,他们都能实现分库分表。
ShardingSphere-JDBC:ShardingSphere-JDBC 定位为轻量级 Java 框架,在 Java 的 JDBC 层提供的额外服务。
ShardingSphere-Proxy:ShardingSphere-Proxy 定位为透明化的数据库代理端,通过实现数据库二进制协议,对异构语言提供支持。 目前提供 MySQL 和 PostgreSQL 协议,透明化数据库操作,对 DBA 更加友好。
org.springframework.boot
spring-boot-starter-web
org.springframework.boot
spring-boot-devtools
runtime
true
org.projectlombok
lombok
true
org.springframework.boot
spring-boot-starter-test
test
org.apache.shardingsphere
shardingsphere-jdbc-core
5.3.2
com.baomidou
mybatis-plus-boot-starter
3.5.3.1
mysql
mysql-connector-java
8.0.33
mysql
mysql-connector-java
8.0.15
业务场景复现:三个数据库中分别有相同的表,分为1主2从,写数据去主库,查数据到从库
1:配置application.properties,通过切换yml实现不同的功能
spring.application.name=shardingJdbc_demo1
#开发环境
spring.profiles.active=dev
# 运行模式类型。可选配置:Memory、Standalone、Cluster
spring.shardingsphere.mode.type=Memory
# 配置 DataSource Driver
spring.datasource.driver-class-name=org.apache.shardingsphere.driver.ShardingSphereDriver
# 指定YAML配置文件 sharding_水平多表分库分片可以进行切换
spring.datasource.url=jdbc:shardingsphere:classpath:sharding_读写分离.yaml
2:配置sharding_读写分离.yaml
#读写分离测试配置 主数据配置:一个主库 二个从库
#查询走从库新增主库
#在这三个数据库中 都有表TestUser
dataSources:
# 主库写
master:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/W1?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
slave1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/W1R1?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
slave2:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/W1R2?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
#规则配置
rules:
- !READWRITE_SPLITTING
dataSources:
readwrite_ds:
staticStrategy:
# 写库数据源名称
writeDataSourceName: master
# 读库数据源名称 多个可以分割
readDataSourceNames:
- slave1
- slave2
loadBalancerName: random
# 负载均衡算法配置
loadBalancers:
# loadBalancerName 由用户指定,需要和读写分离规则中的 loadBalancerName 属性一致
random:
#ROUND_ROBIN 轮训算法 RANDOM 随机算法 WEIGHT权重算法
type: ROUND_ROBIN
# props:
# slave1: 1
# slave2: 1
# 配置其他数据源
props:
sql-show: true
3:实体entity(在这三个数据库中创建TestUser表)
/**
*
* @TableName TestUser
*/
@TableName(value ="TestUser")
@Data
public class Testuser implements Serializable {
@TableId(type = IdType.AUTO)
private Integer id;
private String name;
private String address;
@TableField(exist = false)
private static final long serialVersionUID = 1L;
}
4:测试类
@SpringBootTest
class 读写分离Test {
@Autowired
TestuserMapper testuserMapper;
/**
* 查询数据库操作 分别去从库1和从库2中轮训查询数据
* 负载均衡
*/
@Test
void select() {
Testuser testUser2 = testuserMapper.selectById(1);
System.out.println(testUser2);
Testuser testUser3 = testuserMapper.selectById(1);
System.out.println(testUser3);
Testuser testUser22 = testuserMapper.selectById(1);
System.out.println(testUser22);
Testuser testUser33 = testuserMapper.selectById(1);
System.out.println(testUser33);
}
/**
* 查询数据库操作 分别去从库1和从库2中轮训查询数据
* 负载均衡
*/
@Test
void selectAll() {
List users = testuserMapper.selectList(null);
users.forEach(System.out::println);
}
/**
* 添加数据到主库
* 没有事务 从库查询
*/
@Test
void add() {
Testuser testuser1 = new Testuser();
testuser1.setName("aaa");
testuser1.setAddress("a地址");
testuserMapper.insertUser(testuser1);
System.out.println("主键:"+testuser1.getId());
Testuser testuser2 = new Testuser();
testuser2.setName("bbb");
testuser2.setAddress("b地址");
testuserMapper.insertUser(testuser2);
System.out.println("主键:"+testuser2.getId());
//查询数据主库
List users = testuserMapper.selectList(null);
users.forEach(System.out::println);
}
/**
* 添加数据到主库
* 添加事务 第二条数据address过长
* 两条数据都不会插入成功
*
* 主库查询
* junit的@Transactional 默认进行回滚 这里进行验证
*/
@Transactional
@Test
void add事务() {
Testuser testuser1 = new Testuser();
testuser1.setName("事务1");
testuser1.setAddress("a地址");
testuserMapper.insertUser(testuser1);
System.out.println("主键:"+testuser1.getId());
Testuser testuser2 = new Testuser();
testuser2.setName("事务2");
testuser2.setAddress("b地址");// address=b地址阿发手动阀是的撒发生的方法打法上
testuserMapper.insertUser(testuser2);
System.out.println("主键:"+testuser2.getId());
List users = testuserMapper.selectList(null);
users.forEach(System.out::println);
}
}
业务场景复现:一个业务大表垂直分成拆分,分成两个或者多个小表,在不同的数据库中
1:配置application.properties,通过切换yml实现不同的功能
spring.application.name=shardingJdbc_demo1
#开发环境
spring.profiles.active=dev
# 运行模式类型。可选配置:Memory、Standalone、Cluster
spring.shardingsphere.mode.type=Memory
# 配置 DataSource Driver
spring.datasource.driver-class-name=org.apache.shardingsphere.driver.ShardingSphereDriver
# 指定YAML配置文件 sharding_水平多表分库分片可以进行切换
spring.datasource.url=jdbc:shardingsphere:classpath:sharding_垂直分片.yaml
2:配置sharding_垂直分片.yaml
#垂直分片,一个表属性太多,进行垂直分片
#将一个大表 垂直拆分到两个数据库W1(TestUser) W11(Orders)
dataSources:
# 主库写
master1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/W1?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
master2:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/W11?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
rules:
- !SHARDING
tables:
#表名
TestUser:
#绑定数据库的表名
actualDataNodes: master1.TestUser
#表名
orders:
#绑定数据库的表名
actualDataNodes: master2.orders
#日志开启
props:
sql-show: true
3:实体entity(在这两个数据库中创建TestUser、orders表)
/**
*
* @TableName TestUser
*/
@TableName(value ="TestUser")
@Data
public class Testuser implements Serializable {
@TableId(type = IdType.AUTO)
private Integer id;
private String name;
private String address;
@TableField(exist = false)
private static final long serialVersionUID = 1L;
}
@TableName(value = "orders")
@Data
public class Order implements Serializable {
/**
* 主键
* //默认主键自增
* //type =IdType.AUTO 主键必须设置自增 获取插入的主键值
* //type =IdType.NONE 默认值 获取不到主键
* //type =IdType.ASSIGN_UUID uuid不能是Int类型
* //IdType.ASSIGN_ID 雪花算法设置生成id 主键不能为递增
*/
// @TableId(type = IdType.AUTO)
// private int id;
//水平分片 雪花算法
// @TableId(type = IdType.ASSIGN_ID)
// private Long id;
//使用 keyGenerateStrategy 需要设置ONE
@TableId(type = IdType.NONE)
private Long id;
private Integer userId;//根据userid分库
private String orderNo;//根据order分表
private BigDecimal amount;
@TableField(exist = false)
private static final long serialVersionUID = 1L;
}
4:测试类
@SpringBootTest
class 垂直分片Test {
@Autowired
TestuserMapper testuserMapper;
@Autowired
OrderMapper orderMapper;
/**
* 将一个大表 垂直拆分到两个数据库W1(TestUser) W11(Orders)
* 插入两个表的数据 到不同的库下边的表
*/
//@Transactional
@Test
void addUser_Order() {
Testuser testuser=new Testuser();
testuser.setName("销售1");
testuser.setAddress("深圳1");
testuserMapper.insert(testuser);//添加用户信息
System.out.println(testuser);
Order order=new Order();
order.setUserId(testuser.getId());
order.setOrderNo("CN_111");
order.setAmount(new BigDecimal(100.3));
orderMapper.insert(order);
}
/**
* 将一个大表 垂直拆分到两个数据库W1(TestUser) W11(Orders)
* 查询去不同的库 查到数据
*/
//@Transactional
@Test
void selectUser_Order() {
Testuser testuser = testuserMapper.selectById(67);
System.out.println(testuser);
List orders = orderMapper.selectList(null);
orders.forEach(System.out::println);
}
}
业务场景在现:比如一个大表orders表,水平按行拆分。按照用户id拆分,分到orders0、orders1表中。
1:配置application.properties,通过切换yml实现不同的功能
#应用名字和端口
spring.application.name=shardingJdbc_demo1
#开发环境
spring.profiles.active=dev
# 运行模式类型。可选配置:Memory、Standalone、Cluster
spring.shardingsphere.mode.type=Memory
# 配置 DataSource Driver
spring.datasource.driver-class-name=org.apache.shardingsphere.driver.ShardingSphereDriver
# 指定YAML配置文件 sharding_水平多表分库分片可以进行切换
spring.datasource.url=jdbc:shardingsphere:classpath:sharding_水平单表不分库分片.yaml
2:配置sharding_水平单表不分库分片.yaml
#水平分片,将一个表按照一定的规则user_id 拆分成多个表 放到同一个数据库中
# * R_Order1 (数据库1)
# * orders0 (表0)
# * orders1 (表1)
# *
dataSources:
Order_0:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/R_Order1?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
rules:
- !SHARDING
tables:
# 逻辑表的名称
orders:
#Order_$->{0..1}.orders->{0..1}
#分库策略 数据库的表名 Order_0.orders0,Order_0.orders1
actualDataNodes: Order_0.orders${0..1}
tableStrategy:
#分片策略 根据user_id 不能主键自增
standard:
shardingColumn: user_id
shardingAlgorithmName: userid_inline
## #分片算法
shardingAlgorithms:
userid_inline:
#分片算法类型
type: INLINE
#user_id % 2 放到那个数据表中 r_o$->{user_id % 2} orders_$->{user_id % 2}
props:
algorithm-expression: orders$->{user_id % 2}
#日志开启
props:
sql-show: true
3:实体entity(在这1个数据库中创建orders0、orders1表)
@TableName(value = "orders")
@Data
public class Order implements Serializable {
/**
* 主键
* //默认主键自增
* //type =IdType.AUTO 主键必须设置自增 获取插入的主键值
* //type =IdType.NONE 默认值 获取不到主键
* //type =IdType.ASSIGN_UUID uuid不能是Int类型
* //IdType.ASSIGN_ID 雪花算法设置生成id 主键不能为递增
*/
// @TableId(type = IdType.AUTO)
// private int id;
//水平分片 雪花算法
// @TableId(type = IdType.ASSIGN_ID)
// private Long id;
//使用 keyGenerateStrategy 需要设置ONE
@TableId(type = IdType.NONE)
private Long id;//不能主键自增
private Integer userId;//根据userid分库
private String orderNo;//根据order分表
private BigDecimal amount;
@TableField(exist = false)
private static final long serialVersionUID = 1L;
}
4:测试类
@SpringBootTest
class 水平单表不分库分片Test {
@Autowired
OrderMapper orderMapper;
/**
* 将一个大表 根据user_ID分片
* 根据 user_id % 2 轮训插入一个数据库的 orders0和orders1表
*/
//@Transactional
@Test
void addOrder() {
Order order = new Order();
order.setUserId(17);//这里填写 不同的user_id来测试
order.setOrderNo("CN_111");
order.setAmount(new BigDecimal(100.3));
orderMapper.insert(order);
}
/**
*
*/
@Test
void selectOrder() {
//查询 汇总根据user_id匹配
Order order = orderMapper.selectByUserId(13);
System.out.println(order);
System.out.println("============");
//查询两个表 UNION ALL 连接
List orders = orderMapper.selectList(null);
orders.forEach(System.out::println);
}
}
业务场景在现:将一个用户表根据user_id水平拆分、分到不同的数据库,那么用户表关联的订单表再行拆分,分到指定的库中的不同的表
1:配置application.properties,通过切换yml实现不同的功能
# 运行模式类型。可选配置:Memory、Standalone、Cluster
spring.shardingsphere.mode.type=Memory
# 配置 DataSource Driver
spring.datasource.driver-class-name=org.apache.shardingsphere.driver.ShardingSphereDriver
# 指定YAML配置文件 sharding_水平多表分库分片可以进行切换
spring.datasource.url=jdbc:shardingsphere:classpath:sharding_水平单表分库分片.yaml
2:配置sharding_水平单表分库分片.yaml
#水平分片,将一个表分到两个库中 在分别进行两个分片
# * R_Order1 (数据库1)
# * orders0 (表0)
# * orders1 (表1)
# *
# * R_Order2 (数据库2)
# * orders0 (表0)
# * orders1 (表1)
dataSources:
# 主库写
master1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/W1?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
s_order0:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/R_Order1?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
s_order1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/R_Order2?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
rules:
- !SHARDING
tables:
#逻辑表的名称
TestUser:
#绑定数据库的表名
actualDataNodes: master1.TestUser
#逻辑表的名称
orders:
#配置节点名字 Order_0.orders0,Order_0.orders1,Order_1.orders0,Order_1.orders1
actualDataNodes: s_order${0..1}.orders${0..1}
#重点1:配置分库策略 根据user_id 分库
databaseStrategy:
standard:
shardingColumn: user_id
shardingAlgorithmName: userid_inline
#重点2:配置分表策略 根据order_no分表
tableStrategy:
standard:
shardingColumn: order_no
shardingAlgorithmName: order_no_inline
#雪花算法
keyGenerateStrategy:
column: id
keyGeneratorName: snowflake1
#2重点:分库,根据user_id 插入不同的数据库
# defaultDatabaseStrategy:
# standard:
# shardingColumn: user_id
# shardingAlgorithmName: userid_inline
#分库分表算法
shardingAlgorithms:
#分库策略 user_id决定库
userid_inline:
type: INLINE
props:
algorithm-expression: s_order${user_id % 2}
#分表策略 order_no决定表
order_no_inline:
type: HASH_MOD
props:
sharding-count: 2 # 表示有2个分片库数量
#雪花算法
keyGenerators:
snowflake1:
type: SNOWFLAKE
# props:
# worker-id: 1
#日志开启
props:
sql-show: true
3:实体entity(在这2个数据库中创建orders0、orders1表)
/**
* @TableName order
*/
@TableName(value = "orders")
@Data
public class Order implements Serializable {
/**
* 主键
* //默认主键自增
* //type =IdType.AUTO 主键必须设置自增 获取插入的主键值
* //type =IdType.NONE 默认值 获取不到主键
* //type =IdType.ASSIGN_UUID uuid不能是Int类型
* //IdType.ASSIGN_ID 雪花算法设置生成id 主键不能为递增
*/
// @TableId(type = IdType.AUTO)
// private int id;
//水平分片 雪花算法
// @TableId(type = IdType.ASSIGN_ID)
// private Long id;
//使用 keyGenerateStrategy 需要设置ONE
@TableId(type = IdType.NONE)
private Long id;//不能主键自增
private Integer userId;//根据userid分库
private String orderNo;//根据order分表
private BigDecimal amount;
@TableField(exist = false)
private static final long serialVersionUID = 1L;
}
4:测试类
@SpringBootTest
class 水平单表分库分片Test {
@Autowired
OrderMapper orderMapper;
/**
* 水平分片 不能使用逐渐自增
* 多表的主键会重复
* 两个数据库
*
* R_Order1 (数据库1)
* orders0 (表0)
* orders1 (表1)
*
* R_Order2 (数据库2)
* orders0 (表0)
* orders1 (表1)
*
* 指定YAML配置文件
* spring.datasource.url=jdbc:shardingsphere:classpath:sharding_水平单表分库分片.yaml
*/
@Test
void addOrder() {
for (int i = 0; i < 20; i++) {
Order order=new Order();
order.setUserId(i);//根据user_id分库
order.setOrderNo("CH"+String.valueOf(i));//根据order_no分表
order.setAmount(new BigDecimal(200));
orderMapper.insert(order);
}
}
/**
* 两个sql 查询拼接
* Actual SQL: s_order0 ::: SELECT id,user_id,order_no,amount FROM orders0 UNION ALL SELECT id,user_id,order_no,amount FROM orders1
* Actual SQL: s_order1 ::: SELECT id,user_id,order_no,amount FROM orders0 UNION ALL SELECT id,user_id,order_no,amount FROM orders1
*/
@Test
void selectOrder() {
List orders = orderMapper.selectList(null);
orders.forEach(System.out::println);
}
/**
* 根据id查询
* Actual SQL: s_order0 :::
* select * from orders0 where user_id=? UNION ALL select * from orders1 where user_id=? ::: [40, 40]
*/
@Test
void selectOrderByUserId() {
Order order = orderMapper.selectByUserId(40);
System.out.println(order);
QueryWrapper queryWrapper=new QueryWrapper<>();
queryWrapper.eq("user_id",30);
List orders = orderMapper.selectList(queryWrapper);
orders.forEach(System.out::println);
}
/**
* 雪花算法
*/
@Test
void addOrder1() {
for (int i = 100; i < 110; i++) {
Order order=new Order();
order.setUserId(i);
order.setOrderNo(String.valueOf(i));
order.setAmount(new BigDecimal(120));
orderMapper.insert(order);
}
}
}
业务场景在现:将一个订单表、订单详情表。根据user_id水平拆分、分到不同的数据库的不同表中 这里有两个数据库4张表
1:配置application.properties,通过切换yml实现不同的功能
# 运行模式类型。可选配置:Memory、Standalone、Cluster
spring.shardingsphere.mode.type=Memory
# 配置 DataSource Driver
spring.datasource.driver-class-name=org.apache.shardingsphere.driver.ShardingSphereDriver
# 指定YAML配置文件 sharding_水平多表分库分片可以进行切换
spring.datasource.url=jdbc:shardingsphere:classpath:sharding_水平多表分库分片.yaml
2:配置sharding_水平多表分库分片.yaml
#水平分片,将一个表分到两个库中 在分别进行两个分片
# * R_Order1 (数据库1 订单表和订单详情表)
# * orders0 (表0) order_item0
# * orders1 (表1) order_item1
# *
# * R_Order2 (数据库2)
# * orders0 (表0) order_item0
# * orders1 (表1) order_item1
dataSources:
s_order0:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/R_Order1?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
s_order1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/R_Order2?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
# 主库写
master1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.jdbc.Driver
jdbcUrl: jdbc:mysql://localhost:3306/W1?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
rules:
- !SHARDING
tables:
#逻辑表的名称 实际表名TestUser 只会插入master1
TestUser:
#绑定数据库的表名
actualDataNodes: master1.TestUser
# #逻辑表的名称 实际表名dict
# dict:
# #绑定数据库的节点
# actualDataNodes: master1.dict,s_order${0..1}.dict
#逻辑表orders的名称 实际表名orders0 orders1
orders:
#配置节点名字 Order_0.orders0,Order_0.orders1,Order_1.orders0,Order_1.orders1
actualDataNodes: s_order${0..1}.orders${0..1}
#重点1:配置分库策略 根据user_id 分库
databaseStrategy:
standard:
shardingColumn: user_id
shardingAlgorithmName: userid_inline
#重点2:配置分表策略 根据order_no分表
tableStrategy:
standard:
shardingColumn: order_no
shardingAlgorithmName: order_no_inline
#雪花算法
keyGenerateStrategy:
column: id
keyGeneratorName: snowflake1
order_item:
#配置节点名字 Order_0.orders0,Order_0.orders1,Order_1.orders0,Order_1.orders1
actualDataNodes: s_order${0..1}.order_item${0..1}
#重点1:配置分库策略 根据user_id 分库
databaseStrategy:
standard:
shardingColumn: user_id
shardingAlgorithmName: userid_inline
#重点2:配置分表策略 根据order_no分表
tableStrategy:
standard:
shardingColumn: order_no
shardingAlgorithmName: order_no_inline
#雪花算法
keyGenerateStrategy:
column: id
keyGeneratorName: snowflake1
#这两个表的分表规则一致 必须需使用分片键进行关联 绑定表减少笛卡尔积 会少查询
bindingTables:
- orders,order_item
#广播表
broadcastTables:
- dict
#分库分表算法
shardingAlgorithms:
#分库策略 user_id决定库
userid_inline:
type: INLINE
props:
algorithm-expression: s_order${user_id % 2}
#分表策略 order_no决定表
order_no_inline:
type: HASH_MOD
props:
sharding-count: 2 # 表示有2个分片库数量
#雪花算法
keyGenerators:
snowflake1:
type: SNOWFLAKE
#日志开启
props:
sql-show: true
3:实体entity(在这2个数据库中创建orders0、orders1、order_item0、order_item1)
/**
* @TableName order
*/
@TableName(value = "orders")
@Data
public class Order implements Serializable {
/**
* 主键
* //默认主键自增
* //type =IdType.AUTO 主键必须设置自增 获取插入的主键值
* //type =IdType.NONE 默认值 获取不到主键
* //type =IdType.ASSIGN_UUID uuid不能是Int类型
* //IdType.ASSIGN_ID 雪花算法设置生成id 主键不能为递增
*/
// @TableId(type = IdType.AUTO)
// private int id;
//水平分片 雪花算法
// @TableId(type = IdType.ASSIGN_ID)
// private Long id;
//使用 keyGenerateStrategy 需要设置ONE
@TableId(type = IdType.NONE)
private Long id;//不能主键自增
private Integer userId;//根据userid分库
private String orderNo;//根据order分表
private BigDecimal amount;
@TableField(exist = false)
private static final long serialVersionUID = 1L;
}
@TableName(value ="order_item")
@Data
public class OrderItem implements Serializable {
/**
* type=IdType.NONE 让jdbc的分布式序列生成主键
*/
@TableId(type=IdType.AUTO)
private Long id;
private String orderNo;//根据orderNo分表
private Long userId;
private BigDecimal price;//价格
private Integer count;//数量
@TableField(exist = false)
private static final long serialVersionUID = 1L;
}
/**
* 字典表 无论哪个数据都有数据
* @TableName dict
*/
@TableName(value ="dict")
@Data
public class Dict implements Serializable {
@TableId
private Long id;
private String dictType;
@TableField(exist = false)
private static final long serialVersionUID = 1L;
}
4:测试类
@SpringBootTest
class 水平多表分库分片Test {
@Autowired
TestuserMapper testuserMapper;
@Autowired
OrderMapper orderMapper;
@Autowired
DictMapper dictMapper;
/**
* #水平分片,将一个表分到两个库中 在分别进行两个分片
* # * R_Order1 (数据库1 订单表和订单详情表)
* # * orders0 (表0) order_item0
* # * orders1 (表1) order_item1
* # *
* # * R_Order2 (数据库2)
* # * orders0 (表0) order_item0
* # * orders1 (表1) order_item1
*
* 指定YAML配置文件
* spring.datasource.url=jdbc:shardingsphere:classpath:sharding_水平多表分库分片.yaml
*/
//测试关联表插入数据
@Test
void addOrderAndItem() {
//插入数据库1 user_id=1
for (int i = 0; i < 5; i++) {
Order order = new Order();
order.setUserId(1);//进入1库
order.setOrderNo("HU" + i);//不同表
order.setAmount(new BigDecimal(666));
orderMapper.insert(order);
for (int j = 0; j < 3; j++) {
OrderItem orderItem=new OrderItem();
orderItem.setUserId(1L);
orderItem.setOrderNo("HU"+i);
orderItem.setCount(2);
orderItem.setPrice(new BigDecimal(300.33));
orderItemMapper.insert(orderItem);
}
}
//插入数据库0 user_id=1
for (int i = 5; i < 10; i++) {
Order order = new Order();
order.setUserId(2);//进入0库
order.setOrderNo("FU" + i);
//order.setAmount(new BigDecimal(888));
orderMapper.insert(order);
for (int j = 0; j < 3; j++) {
OrderItem orderItem=new OrderItem();
orderItem.setUserId(2L);
orderItem.setOrderNo("FU"+i);
orderItem.setCount(2);
orderItem.setPrice(new BigDecimal(400.44));
orderItemMapper.insert(orderItem);
}
}
}
@Autowired
OrderItemMapper orderItemMapper;
//测试关联表插入数据
@Test
void addOrderItem() {
//插入数据库1 user_id=1
for (int i = 0; i < 5; i++) {
OrderItem orderItem = new OrderItem();
orderItem.setUserId(1L);
orderItem.setOrderNo("HU" + i);
orderItem.setCount(2);
orderItem.setPrice(new BigDecimal(300.33));
orderItemMapper.insert(orderItem);
}
}
/**
* 关联表查询 需要配置关联关系
*
* bindingTables:
* - orders,order_item
*
*
* select o.order_no,SUM(i.count*i.price) as amount
* from orders0 o LEFT JOIN order_item0 i
* on o.order_no=i.order_no
* GROUP BY o.order_no
*
* 会出现笛卡尔集 order0*order_item1h 和 order1*order_item0不会出现这种情况
* order0*order_item0 order0*order_item1
* order1*order_item0 order1*order_item1
*/
@Test
public void select关联表(){
List aa = orderMapper.selectOrderAmount(11);
aa.forEach(System.out::println);
}
//三个库都创建字典表 add的时候会插入三个数据库
@Test
public void 字典表插入(){
Dict dict=new Dict();
dict.setDictType("字典表5");
int insert = dictMapper.insert(dict);
System.out.println(dict);
List dicts = dictMapper.selectList(null);
dicts.forEach(System.out::println);
}
//三个库随机查询
@Test
public void 字典表查询(){
List dicts = dictMapper.selectList(null);
dicts.forEach(System.out::println);
}
}
ShardingProxy好处就是可以单机可以集群。当我们的项目有很多的时候,不需要修改每一个机器上的yml。ShardingProxy会统一管理yml,只需要修改一份就可以了
使用二进制发布包启动 Proxy,需要环境具备 Java JRE 8 或更高版本。
使用二进制发布包 :: ShardingSphere
下载解压之后:
# 配置proxy的用户名和密码 两个用户root和sharding
authority:
users:
- user: root@%
password: root
- user: sharding
password: sharding
privilege:
type: ALL_PERMITTED
#
#transaction:
# defaultType: XA
# providerType: Atomikos
#
#sqlParser:
# sqlCommentParseEnabled: false
# sqlStatementCache:
# initialCapacity: 2000
# maximumSize: 65535
# parseTreeCache:
# initialCapacity: 128
# maximumSize: 1024
#
#logging:
# loggers:
# - loggerName: ShardingSphere-SQL
# additivity: true
# level: INFO
# props:
# enable: false
#
props:
# system-log-level: INFO
# max-connections-size-per-query: 1
# kernel-executor-size: 16 # Infinite by default.
# proxy-frontend-flush-threshold: 128 # The default value is 128.
# proxy-hint-enabled: false
# # sql-show is the same as props in logger ShardingSphere-SQL, and its priority is lower than logging rule
sql-show: true
# check-table-metadata-enabled: false
# # Proxy backend query fetch size. A larger value may increase the memory usage of ShardingSphere Proxy.
# # The default value is -1, which means set the minimum value for different JDBC drivers.
# proxy-backend-query-fetch-size: -1
# proxy-frontend-executor-size: 0 # Proxy frontend executor size. The default value is 0, which means let Netty decide.
# # Available options of proxy backend executor suitable: OLAP(default), OLTP. The OLTP option may reduce time cost of writing packets to client, but it may increase the latency of SQL execution
# # and block other clients if client connections are more than `proxy-frontend-executor-size`, especially executing slow SQL.
# proxy-backend-executor-suitable: OLAP
# proxy-frontend-max-connections: 0 # Less than or equal to 0 means no limitation.
# # Available sql federation type: NONE (default), ORIGINAL, ADVANCED
# sql-federation-type: NONE
proxy-mysql-default-version: 8.0.11 # 这里使用8.0.11
# proxy-default-port: 3307 # Proxy default port.
# proxy-netty-backlog: 1024 # Proxy netty backlog.
# cdc-server-port: 33071 # CDC server port
这里的配置和ShardingJDBC的垂直拆分配置一样,在这里配置好了,就不需要每个项目都配置了
######################################################################################################
#垂直分片设置
#垂直分片,一个表属性太多,进行垂直分片
#将一个大表 垂直拆分到两个数据库W1(TestUser) W11(Orders)
#数据库名字 springboot的配置需要这里的数据库名字
databaseName: sharding_db
dataSources:
master1:
url: jdbc:mysql://localhost:3306/W1?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 50
minPoolSize: 1
master2:
url: jdbc:mysql://localhost:3306/W11?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: root
password: 123456
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 50
minPoolSize: 1
rules:
- !SHARDING
tables:
#表名
TestUser:
#绑定数据库的表名
actualDataNodes: master1.TestUser
#表名
orders:
#绑定数据库的表名
actualDataNodes: master2.orders
1:导入pom
com.baomidou
mybatis-plus-boot-starter
3.5.3.1
mysql
mysql-connector-java
8.0.33
2:配置 application.properties 数据源是Proxy代理的是数据源
#ShardingProxy 测试直接配置数据源即可
#注意:数据源是Proxy的数据源 账户、密码、数据库都在proxy的配置文件中
spring.datasource.driver-class-name=com.mysql.cj.jdbc.Driver
#readwrite_splitting_db(读写分离数据库)
#sharding_db (垂直分片数据库)
spring.datasource.url=jdbc:mysql://127.0.0.1:3307/sharding_db?useSSL=false&useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&serverTimezone=UTC
spring.datasource.username=sharding
spring.datasource.password=sharding
3:代码测试
@SpringBootTest
class ShardingProxy垂直分片Test {
@Autowired
TestuserMapper testuserMapper;
@Autowired
OrderMapper orderMapper;
/**
* 将一个大表 垂直拆分到两个数据库W1(TestUser) W11(Orders)
* 插入数据 分别插入两个数据库
*/
@Transactional
@Test
void addUser_Order() {
Testuser testuser=new Testuser();
testuser.setName("proxy6");
testuser.setAddress("proxy6");
testuserMapper.insert(testuser);//添加用户信息
System.out.println(testuser);
Order order=new Order();
order.setUserId(testuser.getId());
order.setOrderNo("2222");
order.setAmount(new BigDecimal(500));
orderMapper.insert(order);
}
/**
* 将一个大表 垂直拆分到两个数据库W1(TestUser) W11(Orders)
*/
//@Transactional
@Test
void selectUser_Order() {
Testuser testuser = testuserMapper.selectById(52);
System.out.println(testuser);
List orders = orderMapper.selectList(null);
orders.forEach(System.out::println);
}
}