观看该文建议浏览:Mybatis源码分析
分库分表:垂直拆分–按字段拆分。水平拆分–按行拆分。
分库分表策略相关配置主要包括三部分:多库相关配置信息、针对库以及表相关分配策略、其他配置信息。
@Configuration
@ComponentScan("org.apache.shardingsphere.spring.boot.converter")
// EnableConfigurationProperties都是用于加载配置信息
@EnableConfigurationProperties({
SpringBootShardingRuleConfigurationProperties.class,
SpringBootMasterSlaveRuleConfigurationProperties.class, SpringBootEncryptRuleConfigurationProperties.class,
SpringBootPropertiesConfigurationProperties.class, SpringBootShadowRuleConfigurationProperties.class})
@ConditionalOnProperty(prefix = "spring.shardingsphere", name = "enabled", havingValue = "true", matchIfMissing = true)
// shardingsphere初始化 DataSource 优先于DataSourceAutoConfiguration,同时MybatisAutoConfiguration初始化落后于 DataSourceAutoConfiguration
@AutoConfigureBefore(DataSourceAutoConfiguration.class)
@RequiredArgsConstructor
public class SpringBootConfiguration implements EnvironmentAware {
private final SpringBootShardingRuleConfigurationProperties shardingRule;
private final Map<String, DataSource> dataSourceMap = new LinkedHashMap<>();
@Bean
@Conditional(ShardingRuleCondition.class)
public DataSource shardingDataSource() throws SQLException {
return ShardingDataSourceFactory.createDataSource(
dataSourceMap, //库表连接等相关配置信息
new ShardingRuleConfigurationYamlSwapper().swap(shardingRule),
props.getProps()
);
}
}
public final class ShardingRuleConfigurationYamlSwapper{
public ShardingRuleConfiguration swap(final YamlShardingRuleConfiguration yamlConfiguration) {
ShardingRuleConfiguration result = new ShardingRuleConfiguration();
// 针对逻辑表定制化处理
for (Entry<String, YamlTableRuleConfiguration> entry : yamlConfiguration.getTables().entrySet()) {
//表相关的配置项
YamlTableRuleConfiguration tableRuleConfig = entry.getValue();
// 逻辑表:comment
tableRuleConfig.setLogicTable(entry.getKey());
result.getTableRuleConfigs().add(tableRuleConfigurationYamlSwapper.swap(tableRuleConfig));
}
result.setDefaultDataSourceName(yamlConfiguration.getDefaultDataSourceName());
if (null != yamlConfiguration.getDefaultDatabaseStrategy()) {// 设置默认分库策略
result.setDefaultDatabaseShardingStrategyConfig(
shardingStrategyConfigurationYamlSwapper.swap(
yamlConfiguration.getDefaultDatabaseStrategy()
)
);
}
if (null != yamlConfiguration.getDefaultTableStrategy()) {// 设置默认的分表策略
result.setDefaultTableShardingStrategyConfig(
shardingStrategyConfigurationYamlSwapper.swap(
yamlConfiguration.getDefaultTableStrategy()
)
);
}
return result;
}
}
public final class ShardingDataSourceFactory {
public static DataSource createDataSource(Map<String, DataSource> dataSourceMap,ShardingRuleConfiguration
shardingRuleConfig,Properties props){
return new ShardingDataSource(dataSourceMap, new ShardingRule(shardingRuleConfig, dataSourceMap.keySet()),
props);
}
}
// ShardingRule 分库分表的分片策略信息
public ShardingDataSource(Map<String, DataSource> dataSourceMap,ShardingRule shardingRule,Properties props){
// #7 其中dataSourceMap最终由 抽象类 AbstractDataSourceAdapter持有
super(dataSourceMap);//库表连接等相关配置信息
runtimeContext = new ShardingRuntimeContext(dataSourceMap, shardingRule, props, getDatabaseType());
}
public class ShardingRule implements BaseRule {
private final ShardingRuleConfiguration ruleConfiguration;
private final ShardingDataSourceNames shardingDataSourceNames;
private final Collection<TableRule> tableRules;
private final ShardingStrategy defaultDatabaseShardingStrategy;
private final ShardingStrategy defaultTableShardingStrategy;
private final ShardingKeyGenerator defaultShardingKeyGenerator;
public ShardingRule(final ShardingRuleConfiguration shardingRuleConfig, final Collection<String> dataSourceNames) {
this.ruleConfiguration = shardingRuleConfig;
shardingDataSourceNames = new ShardingDataSourceNames(shardingRuleConfig, dataSourceNames);
tableRules = createTableRules(shardingRuleConfig);
broadcastTables = shardingRuleConfig.getBroadcastTables();
bindingTableRules = createBindingTableRules(shardingRuleConfig.getBindingTableGroups());
defaultDatabaseShardingStrategy =
createDefaultShardingStrategy(shardingRuleConfig.getDefaultDatabaseShardingStrategyConfig());
defaultTableShardingStrategy =
createDefaultShardingStrategy(shardingRuleConfig.getDefaultTableShardingStrategyConfig());
defaultShardingKeyGenerator = createDefaultKeyGenerator(shardingRuleConfig.getDefaultKeyGeneratorConfig());
masterSlaveRules = createMasterSlaveRules(shardingRuleConfig.getMasterSlaveRuleConfigs());
encryptRule = createEncryptRule(shardingRuleConfig.getEncryptRuleConfig());
}
}
总结:综上所述有关库表的分库分表策略信息均被ShardingDataSource实例持有。
目的加载配置文件信息。
在初始化Bean SpringBootConfiguration过程中,BeanPostProcessor
之 ApplicationContextAwareProcessor
会回调#postProcessBeforeInitialization#setEnvironment方法。
@Override
public final void setEnvironment(final Environment environment) {
String prefix = "spring.shardingsphere.datasource.";
// 获取 prefix + name 配置「spring.shardingsphere.datasource.name」的所有库名
for (String each : getDataSourceNames(environment, prefix)) {
// dataSourceMap 维护配置文件中配置的所有 DataSource
dataSourceMap.put(each, getDataSource(environment, prefix, each));
}
}
private DataSource getDataSource(final Environment environment, final String prefix, final String dataSourceName)
throws ReflectiveOperationException, NamingException {
Map<String, Object> dataSourceProps = PropertyUtil.handle(environment, prefix + dataSourceName.trim(), Map.class);
// 根据配置的 type属性初始化对应类型的 DataSource 「HikariDataSource」
DataSource result = DataSourceUtil.getDataSource(dataSourceProps.get("type").toString(), dataSourceProps);
...
return result;
}
public static DataSource getDataSource(final String dataSourceClassName, final Map<String, Object>
dataSourceProperties) throws ReflectiveOperationException {
// 根据配置的driver-class-name实例化 DataSource
DataSource result = (DataSource) Class.forName(dataSourceClassName).newInstance();
// 设置DataSource的属性包括:用户名、密码、jdbcUrl等
for (Entry<String, Object> entry : dataSourceProperties.entrySet()) {
callSetterMethod(result, getSetterMethodName(entry.getKey()), null == entry.getValue() ? null :
entry.getValue().toString());
}
return result;
}
private Statement prepareStatement(StatementHandler handler, Log statementLog) throws SQLException {
Statement stmt;
//ShardingConnection:通过之前Mybatis源码分析可知,最终通过 SpringManagedTransaction 持有的 ShardingDataSource获取连接ShardingConnection
Connection connection = getConnection(statementLog);
stmt = handler.prepare(connection, transaction.getTimeout());
handler.parameterize(stmt);
return stmt;
}
public class ShardingDataSource extends AbstractDataSourceAdapter {
@Override
public final ShardingConnection getConnection() {
// getDataSourceMap() 获取 抽象类AbstractDataSourceAdapter持有的DataSourceMap
return new ShardingConnection(getDataSourceMap(), runtimeContext, TransactionTypeHolder.get());
}
}
public ShardingConnection(final Map<String, DataSource> dataSourceMap, final ShardingRuntimeContext runtimeContext,
final TransactionType transactionType) {
// 持有当前应用所有的dataSourceMap
this.dataSourceMap = dataSourceMap;
this.runtimeContext = runtimeContext;
this.transactionType = transactionType;
shardingTransactionManager =
runtimeContext.getShardingTransactionManagerEngine().getTransactionManager(transactionType);
}
//返回的为 ShardingPreparedStatement
public class PreparedStatementHandler extends BaseStatementHandler {
protected Statement instantiateStatement(Connection connection) throws SQLException {
String sql = boundSql.getSql();
if (mappedStatement.getKeyGenerator() instanceof Jdbc3KeyGenerator) {
String[] keyColumnNames = mappedStatement.getKeyColumns();
if (keyColumnNames == null) {
return connection.prepareStatement(sql, PreparedStatement.RETURN_GENERATED_KEYS);
} else {
return connection.prepareStatement(sql, keyColumnNames);
}
} else if (mappedStatement.getResultSetType() == ResultSetType.DEFAULT) {
return connection.prepareStatement(sql);
} else {
return connection.prepareStatement(sql, mappedStatement.getResultSetType().getValue(),
ResultSet.CONCUR_READ_ONLY);
}
}
}
@Override
public <E> List<E> query(Statement statement, ResultHandler resultHandler) throws SQLException {
// ShardingPreparedStatement
PreparedStatement ps = (PreparedStatement) statement;
ps.execute();
return resultSetHandler.handleResultSets(ps);
}
public boolean execute() throws SQLException {
// 此处开始执行 分库分表插件 的逻辑
prepare();
// #5 初始化 StatementExecuteUnit,就是 PreparedStatement 最终执行SQL的最小单位。其实就是初始化AbstractStatementExecutor属性#inputGroups
initPreparedStatementExecutor();
// 通过回调真正执行SQL
return preparedStatementExecutor.execute();
}
public final class SQLExecuteCallbackFactory {
public static SQLExecuteCallback<Boolean> getPreparedSQLExecuteCallback(final DatabaseType databaseType, final
boolean isExceptionThrown) {
return new SQLExecuteCallback<Boolean>(databaseType, isExceptionThrown) {
@Override
protected Boolean executeSQL(final String sql, final Statement statement, final ConnectionMode
connectionMode) throws SQLException {
// 真正执行原生jdbc
return ((PreparedStatement) statement).execute();
}
};
}
}
AbstractStatementExecutor#inputGroups
包含SQL执行需要的SQL、sql参数 & PrepareStatement等。PrepareStatement存在数据库连接、jdbcUrl等SQL执行需要必须条件。
public final class PreparedStatementExecutor extends AbstractStatementExecutor {
public void init(final ExecutionContext executionContext) throws SQLException {
setSqlStatementContext(executionContext.getSqlStatementContext());
//获取 AbstractStatementExecutor类属性 inputGroups
getInputGroups().addAll(obtainExecuteGroups(executionContext.getExecutionUnits()));
cacheStatements();
}
private Collection<InputGroup<StatementExecuteUnit>> obtainExecuteGroups(final Collection<ExecutionUnit>
executionUnits) throws SQLException {
return getSqlExecutePrepareTemplate().getExecuteUnitGroups(executionUnits, new SQLExecutePrepareCallback() {
@Override
public List<Connection> getConnections(final ConnectionMode connectionMode, final String dataSourceName,
final int connectionSize) throws SQLException {
//dataSourceName:是用户实现PreciseShardingAlgorithm接口返回的目标库名
// 通过 dataSourceName 从 AbstractDataSourceAdapter中dataSourceMap中获取对应的dataSource
// 通过 dataSource 获取对应的数据库连接
return PreparedStatementExecutor.super.getConnection().getConnections(connectionMode, dataSourceName,
connectionSize);
}
@Override
public StatementExecuteUnit createStatementExecuteUnit(final Connection connection, final ExecutionUnit
executionUnit, final ConnectionMode connectionMode) throws SQLException {
// 创建 StatementExecuteUnit:createPreparedStatement通过
return new StatementExecuteUnit(executionUnit, createPreparedStatement(connection,
executionUnit.getSqlUnit().getSql()), connectionMode);
}
});
}
private PreparedStatement createPreparedStatement(final Connection connection, final String sql) throws
SQLException {
// 通过 HikariProxyConnection 获取 prepareStatement
return returnGeneratedKeys ? connection.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS)
: connection.prepareStatement(sql, getResultSetType(), getResultSetConcurrency(),
getResultSetHoldability());
}
}
public final class StatementExecuteUnit {
private final ExecutionUnit executionUnit;
private final Statement statement;
private final ConnectionMode connectionMode;
}
public final class ExecutionUnit {
private final String dataSourceName;
private final SQLUnit sqlUnit;
}
public final class SQLUnit {
private final String sql;
private final List<Object> parameters;
}
public abstract class AbstractDataSourceAdapter{
private final Map<String, DataSource> dataSourceMap;
private final DatabaseType databaseType;
private DatabaseType createDatabaseType(){
DatabaseType result = null;
for (DataSource each : dataSourceMap.values()) {
// 决定数据库类型:Mysql、Oracle、MariaDB等
DatabaseType databaseType = createDatabaseType(each);
result = databaseType;
}
return result;
}
}
利用 HikariDataSource
完成以下功能:
public class ShardingDataSource extends AbstractDataSourceAdapter{}
public ExecutionContext prepare(final String sql, final List<Object> parameters) {
List<Object> clonedParameters = cloneParameters(parameters);
// 开始执行分库分表策略 routeContext包含目标库、目标表、SQL、以及参数等
RouteContext routeContext = executeRoute(sql, clonedParameters);
ExecutionContext result = new ExecutionContext(routeContext.getSqlStatementContext());
// 利用目标库、目标表重新生成SQL
result.getExecutionUnits().addAll(executeRewrite(sql, clonedParameters, routeContext));
return result;
}
// sql 原始SQL
private Collection<ExecutionUnit> executeRewrite(String sql,List<Object> parameters,RouteContext routeContext) {
...
SQLRewriteContext sqlRewriteContext = rewriter.createSQLRewriteContext(sql, parameters, routeContext.getSqlStatementContext(), routeContext);
return routeContext.getRouteResult().getRouteUnits().isEmpty() ? rewrite(sqlRewriteContext) : rewrite(routeContext, sqlRewriteContext);
}
public Map<RouteUnit, SQLRewriteResult> rewrite(SQLRewriteContext sqlRewriteContext,RouteResult
routeResult) {
Map<RouteUnit, SQLRewriteResult> result = new LinkedHashMap<>(routeResult.getRouteUnits().size(), 1);
for (RouteUnit each : routeResult.getRouteUnits()) {
// toSQL():利用目标库、目标表拼接新的SQL
result.put(each, new SQLRewriteResult(new RouteSQLBuilder(sqlRewriteContext, each).toSQL(),
getParameters(sqlRewriteContext.getParameterBuilder(), routeResult, each)));
}
return result;
}
public RouteContext decorate(RouteContext routeContext,ShardingSphereMetaData metaData,ShardingRule shardingRule,
ConfigurationProperties properties) {
SQLStatementContext sqlStatementContext = routeContext.getSqlStatementContext();
List<Object> parameters = routeContext.getParameters();
ShardingStatementValidatorFactory.newInstance(
sqlStatementContext.getSqlStatement()).ifPresent(validator -> validator.validate(shardingRule, sqlStatementContext.getSqlStatement(), parameters));
ShardingConditions shardingConditions = getShardingConditions(parameters, sqlStatementContext, metaData.getSchema(), shardingRule);
boolean needMergeShardingValues = isNeedMergeShardingValues(sqlStatementContext, shardingRule);
if (sqlStatementContext.getSqlStatement() instanceof DMLStatement && needMergeShardingValues) {
checkSubqueryShardingValues(sqlStatementContext, shardingRule, shardingConditions);
mergeShardingConditions(shardingConditions);
}
ShardingRouteEngine shardingRouteEngine = ShardingRouteEngineFactory.newInstance(shardingRule, metaData,
sqlStatementContext, shardingConditions, properties);
// ShardingStandardRoutingEngine#route 进行分库分表策略
// routeResult 得到最终目标库 & 目标表
RouteResult routeResult = shardingRouteEngine.route(shardingRule);
return new RouteContext(sqlStatementContext, parameters, routeResult);
}
private Collection<DataNode> routeByShardingConditionsWithCondition(final ShardingRule shardingRule, final TableRule tableRule) {
Collection<DataNode> result = new LinkedList<>();
for (ShardingCondition each : shardingConditions.getConditions()) {
Collection<DataNode> dataNodes = route0(shardingRule, tableRule,
getShardingValuesFromShardingConditions(shardingRule,
// 获取数据库分库策略
shardingRule.getDatabaseShardingStrategy(tableRule).getShardingColumns(), each),
getShardingValuesFromShardingConditions(shardingRule,
// // 获取表分表策略
shardingRule.getTableShardingStrategy(tableRule).getShardingColumns(), each));
result.addAll(dataNodes);
originalDataNodes.add(dataNodes);
}
return result;
}
private Collection<DataNode> route0(ShardingRule shardingRule,TableRule tableRule,List<RouteValue>
databaseShardingValues,List<RouteValue> tableShardingValues) {
//分库逻辑
Collection<String> routedDataSources = routeDataSources(shardingRule, tableRule, databaseShardingValues);
Collection<DataNode> result = new LinkedList<>();
for (String each : routedDataSources) {
// 分表逻辑
result.addAll(routeTables(shardingRule, tableRule, each, tableShardingValues));
}
// 包含分库分表后的库、表信息
return result;
}
private RouteResult generateRouteResult(final Collection<DataNode> routedDataNodes) {
RouteResult result = new RouteResult();
result.getOriginalDataNodes().addAll(originalDataNodes);
for (DataNode each : routedDataNodes) {
result.getRouteUnits().add(
//RouteUnit:dataSourceMapper
new RouteUnit(new RouteMapper(each.getDataSourceName(), each.getDataSourceName()),
//RouteUnit:tableMappers
Collections.singletonList(new RouteMapper(logicTableName, each.getTableName()))));
}
return result;
}
public static void main(String[] args) throws Exception {
//1、注册驱动
Class.forName("com.mysql.jdbc.Driver");
//2、获取连接对象
Connection con = DriverManager.getConnection("jdbc:mysql://127.0.0.1:3306/day01_db",
"root", "1234");
//3、获取发送SQL语句的对象
PreparedStatement ps = con.prepareStatement("select * from category");
//4、发送SQL语句,返回结果集
ResultSet rs = ps.executeQuery();
//5、遍历结果集
while (rs.next()){
//遍历该行数据
int cid = rs.getInt("cid");
String cname = rs.getString("cname");
System.out.println("cid:"+cid+"\t cname:"+cname);
}
//6、关闭资源
rs.close();
ps.close();
con.close();
}