9. 执行StandardRoutingEngine#route获取路由结果,获取分表规则以及分库分表对应的字段列,获取分库分表对应的字段对应的具体值,
public RoutingResult route() {
TableRule tableRule = shardingRule.getTableRule(logicTableName);
Collection databaseShardingColumns = shardingRule.getDatabaseShardingStrategy(tableRule).getShardingColumns();
Collection tableShardingColumns = shardingRule.getTableShardingStrategy(tableRule).getShardingColumns();
Collection routedDataNodes = new LinkedHashSet<>();
if (HintManagerHolder.isUseShardingHint()) {
List databaseShardingValues = getDatabaseShardingValuesFromHint(databaseShardingColumns);
List tableShardingValues = getTableShardingValuesFromHint(tableShardingColumns);
Collection dataNodes = route(tableRule, databaseShardingValues, tableShardingValues);
for (ShardingCondition each : shardingConditions.getShardingConditions()) {
if (each instanceof InsertShardingCondition) {
((InsertShardingCondition) each).getDataNodes().addAll(dataNodes);
}
}
routedDataNodes.addAll(dataNodes);
} else {
if (shardingConditions.getShardingConditions().isEmpty()) {
routedDataNodes.addAll(route(tableRule, Collections.emptyList(), Collections.emptyList()));
} else {
for (ShardingCondition each : shardingConditions.getShardingConditions()) {
List databaseShardingValues = getShardingValues(databaseShardingColumns, each);
List tableShardingValues = getShardingValues(tableShardingColumns, each);
Collection dataNodes = route(tableRule, databaseShardingValues, tableShardingValues);
routedDataNodes.addAll(dataNodes);
if (each instanceof InsertShardingCondition) {
((InsertShardingCondition) each).getDataNodes().addAll(dataNodes);
}
}
}
}
return generateRoutingResult(routedDataNodes);
}
路由到对应的数据节点,获取到实际存在的数据库源和表名,再根据xml文件中配置的策略进行选择StandardShardingStrategy#doSharding,PreciseShardingAlgorithm#doSharding选择符合条件的数据节点。
private Collection route(final TableRule tableRule, final List databaseShardingValues, final List tableShardingValues) {
Collection routedDataSources = routeDataSources(tableRule, databaseShardingValues);
Collection result = new LinkedList<>();
for (String each : routedDataSources) {
result.addAll(routeTables(tableRule, each, tableShardingValues));
}
return result;
}
生成最后的路由结果,保存对应的数据库原,保存逻辑表名和实际表名的对应关系。
10. 创建sql语句重写引擎SQLRewriteEngine,重写表名appendTablePlaceholder(result, (TableToken) each, count, sqlTokens);重写()字段列值appendInsertValuesToken(result, (InsertValuesToken) each, count, sqlTokens);
public SQLBuilder rewrite(final boolean isRewriteLimit) {
SQLBuilder result = new SQLBuilder(parameters);
if (sqlTokens.isEmpty()) {
result.appendLiterals(originalSQL);
return result;
}
int count = 0;
sortByBeginPosition();
for (SQLToken each : sqlTokens) {
if (0 == count) {
result.appendLiterals(originalSQL.substring(0, each.getBeginPosition()));
}
if (each instanceof TableToken) {
appendTablePlaceholder(result, (TableToken) each, count, sqlTokens);
} else if (each instanceof SchemaToken) {
appendSchemaPlaceholder(result, (SchemaToken) each, count, sqlTokens);
} else if (each instanceof IndexToken) {
appendIndexPlaceholder(result, (IndexToken) each, count, sqlTokens);
} else if (each instanceof ItemsToken) {
appendItemsToken(result, (ItemsToken) each, count, sqlTokens);
} else if (each instanceof InsertValuesToken) {
appendInsertValuesToken(result, (InsertValuesToken) each, count, sqlTokens);
} else if (each instanceof RowCountToken) {
appendLimitRowCount(result, (RowCountToken) each, count, sqlTokens, isRewriteLimit);
} else if (each instanceof OffsetToken) {
appendLimitOffsetToken(result, (OffsetToken) each, count, sqlTokens, isRewriteLimit);
} else if (each instanceof OrderByToken) {
appendOrderByToken(result, count, sqlTokens);
} else if (each instanceof InsertColumnToken) {
appendSymbolToken(result, (InsertColumnToken) each, count, sqlTokens);
}
count++;
}
return result;
}
用SQLRewriteEngine#generateSQL组合成真正的sql语句,获取逻辑实际表的对应关系,以及绑定表的关系。用SQLBuilder拼接sql,不属于占位符的直接拼接,根据逻辑表名获取实际表名,拼接表名以及字段名,解析()参数值以及具体参数值,组合成SQLUnit返回,最后把数据源以及sql语句和具体的参数值包装成SQLExecutionUnit放进SQLRouteResult返回。
public SQLUnit toSQL(final TableUnit tableUnit, final Map logicAndActualTableMap, final ShardingRule shardingRule) {
List
11. 继续回到ShardingPreparedStatement#route,根据数据源获取数据库连接AbstractConnectionAdapter#getConnection,放进连接缓存中cachedConnections.put(dataSourceName, result);,执行钩子方法等WrapperAdapter#replayMethodsInvocation,获取PreparedStatement并保存
for (SQLExecutionUnit each : routeResult.getExecutionUnits()) {
PreparedStatement preparedStatement = generatePreparedStatement(each);
routedStatements.add(preparedStatement);
replaySetParameter(preparedStatement, each.getSqlUnit().getParameterSets().get(0));
result.add(new PreparedStatementUnit(each, preparedStatement));
}
private PreparedStatement generatePreparedStatement(final SQLExecutionUnit sqlExecutionUnit) throws SQLException {
Connection connection = getConnection().getConnection(sqlExecutionUnit.getDataSource());
return returnGeneratedKeys ? connection.prepareStatement(sqlExecutionUnit.getSqlUnit().getSql(), Statement.RETURN_GENERATED_KEYS)
: connection.prepareStatement(sqlExecutionUnit.getSqlUnit().getSql(), resultSetType, resultSetConcurrency, resultSetHoldability);
}
使用反射方式构造设置参数的方法SetParameterMethodInvocation,执行对应的设置参数的方法invoke,把具体的参数值设置到preparedStatement中,最后返回PreparedStatementUnit。
private void recordSetParameter(final String methodName, final Class[] argumentTypes, final Object... arguments) {
try {
setParameterMethodInvocations.add(new SetParameterMethodInvocation(PreparedStatement.class.getMethod(methodName, argumentTypes), arguments, arguments[1]));
} catch (final NoSuchMethodException ex) {
throw new ShardingException(ex);
}
}
protected void replaySetParameter(final PreparedStatement preparedStatement, final List
12. 把preparedStatement放入线程池中开始执行,执行结果后清理一些本次缓存的数据,批量信息,参数信息等等以及另外一些收尾工作。
public boolean execute() throws SQLException {
try {
Collection preparedStatementUnits = route();
return new PreparedStatementExecutor(
getConnection().getShardingContext().getExecutorEngine(), routeResult.getSqlStatement().getType(), preparedStatementUnits).execute();
} finally {
JDBCShardingRefreshHandler.build(routeResult, connection).execute();
clearBatch();
}
}
public boolean execute() throws SQLException {
List result = executorEngine.execute(sqlType, preparedStatementUnits, new ExecuteCallback() {
@Override
public Boolean execute(final BaseStatementUnit baseStatementUnit) throws Exception {
return ((PreparedStatement) baseStatementUnit.getStatement()).execute();
}
});
if (null == result || result.isEmpty() || null == result.get(0)) {
return false;
}
return result.get(0);
}
ExecutorEngine#execute,取出第一个执行单元同步执行,然后异步执行其他的执行单元,最后把所有的结果包装成ListenableFuture返回,至此,insert语句就解析完毕了。
public List execute(
final SQLType sqlType, final Collection extends BaseStatementUnit> baseStatementUnits, final ExecuteCallback executeCallback) throws SQLException {
if (baseStatementUnits.isEmpty()) {
return Collections.emptyList();
}
OverallExecutionEvent event = new OverallExecutionEvent(sqlType, baseStatementUnits.size());
EventBusInstance.getInstance().post(event);
Iterator extends BaseStatementUnit> iterator = baseStatementUnits.iterator();
BaseStatementUnit firstInput = iterator.next();
ListenableFuture> restFutures = asyncExecute(sqlType, Lists.newArrayList(iterator), executeCallback);
T firstOutput;
List restOutputs;
try {
firstOutput = syncExecute(sqlType, firstInput, executeCallback);
restOutputs = restFutures.get();
// CHECKSTYLE:OFF
} catch (final Exception ex) {
// CHECKSTYLE:ON
event.setException(ex);
event.setEventExecutionType(EventExecutionType.EXECUTE_FAILURE);
EventBusInstance.getInstance().post(event);
ExecutorExceptionHandler.handleException(ex);
return null;
}
event.setEventExecutionType(EventExecutionType.EXECUTE_SUCCESS);
EventBusInstance.getInstance().post(event);
List result = Lists.newLinkedList(restOutputs);
result.add(0, firstOutput);
return result;
}
private T executeInternal(final SQLType sqlType, final BaseStatementUnit baseStatementUnit, final ExecuteCallback executeCallback,
final boolean isExceptionThrown, final Map dataMap) throws Exception {
synchronized (baseStatementUnit.getStatement().getConnection()) {
T result;
ExecutorExceptionHandler.setExceptionThrown(isExceptionThrown);
ExecutorDataMap.setDataMap(dataMap);
List events = new LinkedList<>();
for (List each : baseStatementUnit.getSqlExecutionUnit().getSqlUnit().getParameterSets()) {
events.add(getExecutionEvent(sqlType, baseStatementUnit, each));
}
for (AbstractExecutionEvent event : events) {
EventBusInstance.getInstance().post(event);
}
try {
result = executeCallback.execute(baseStatementUnit);
} catch (final SQLException ex) {
for (AbstractExecutionEvent each : events) {
each.setEventExecutionType(EventExecutionType.EXECUTE_FAILURE);
each.setException(ex);
EventBusInstance.getInstance().post(each);
ExecutorExceptionHandler.handleException(ex);
}
return null;
}
for (AbstractExecutionEvent each : events) {
each.setEventExecutionType(EventExecutionType.EXECUTE_SUCCESS);
EventBusInstance.getInstance().post(each);
}
return result;
}
}
private AbstractExecutionEvent getExecutionEvent(final SQLType sqlType, final BaseStatementUnit baseStatementUnit, final List parameters) {
AbstractExecutionEvent result;
if (SQLType.DQL == sqlType) {
result = new DQLExecutionEvent(baseStatementUnit.getSqlExecutionUnit().getDataSource(), baseStatementUnit.getSqlExecutionUnit().getSqlUnit(), parameters);
} else {
result = new DMLExecutionEvent(baseStatementUnit.getSqlExecutionUnit().getDataSource(), baseStatementUnit.getSqlExecutionUnit().getSqlUnit(), parameters);
}
return result;
}