二十六、Flink源码阅读--sql执行转换过程

flink sql 在执行中是如何从sql语句或者是table api 转为最后的DataStream任务或者是DataSet任务的,本篇我们从源码角度看下中间的执行和转换过程。

DEMO

这是flink的一个单元测试方法,模拟实时数据查询

@Test
	public void testSelect() throws Exception {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
		StreamITCase.clear();

		DataStream> ds = JavaStreamTestData.getSmall3TupleDataSet(env);
		Table in = tableEnv.fromDataStream(ds, "a,b,c");
		tableEnv.registerTable("MyTable", in);

		String sqlQuery = "SELECT * FROM MyTable";
		Table result = tableEnv.sqlQuery(sqlQuery);

		DataStream resultSet = tableEnv.toAppendStream(result, Row.class);
		resultSet.addSink(new StreamITCase.StringSink());
		env.execute();

		List expected = new ArrayList<>();
		expected.add("1,1,Hi");
		expected.add("2,2,Hello");
		expected.add("3,2,Hello world");

		StreamITCase.compareWithList(expected);
	}
注册表
tableEnv.registerTable("MyTable", in);
==>
StreamTableEnvironment.registerDataStream
==>
registerDataStreamInternal
==>
registerTableInternal
==>
protected def registerTableInternal(name: String, table: AbstractTable): Unit = {
  if (isRegistered(name)) {
    throw new TableException(s"Table \'$name\' already exists. " +
      s"Please, choose a different name.")
  } else {
    rootSchema.add(name, table)
  }
}
将表结构添加到schema中,注册表成功。
Table生成过程
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

Table result = tableEnv.sqlQuery(sqlQuery);

===>
def sqlQuery(query: String): Table = {
   val planner = new FlinkPlannerImpl(getFrameworkConfig, getPlanner, getTypeFactory)
   // parse the sql query
   val parsed = planner.parse(query)//生成SqlNode抽象语法树,SqlNode是抽象类,子类为SqlSelect,SqlDelete,SqlJoin,SqlAlter等
   if (null != parsed && parsed.getKind.belongsTo(SqlKind.QUERY)) {
     // validate the sql query
     val validated = planner.validate(parsed)//校验 SqlNode抽象语法树
     // transform to a relational treex
     val relational = planner.rel(validated)//Ast--> logic plan
     new Table(this, LogicalRelNode(relational.rel))//relational.rel表示Logic plan,在这里是 LogicalProject类型
   } else {
     throw new TableException(
       "Unsupported SQL query! sqlQuery() only accepts SQL queries of type " +
         "SELECT, UNION, INTERSECT, EXCEPT, VALUES, and ORDER_BY.")
   }
 }

构造Table对象的过程就是将sql 转为 SqlNode ,再校验,再转为逻辑计划。调用的过程都是和calsite同理,calsite可以参考 https://matt33.com/2019/03/07/apache-calcite-process-flow/

Table 转为 DataStream过程
DataStream resultSet = tableEnv.toAppendStream(result, Row.class);
resultSet.addSink(new StreamITCase.StringSink());
env.execute();

==>

def toAppendStream[T](
     table: Table,
     clazz: Class[T],
     queryConfig: StreamQueryConfig): DataStream[T] = {
   val typeInfo = TypeExtractor.createTypeInfo(clazz)
   TableEnvironment.validateType(typeInfo)
   translate[T](table, queryConfig, updatesAsRetraction = false, withChangeFlag = false)(typeInfo)
 }

==>

protected def translate[A](
    table: Table,
    queryConfig: StreamQueryConfig,
    updatesAsRetraction: Boolean,
    withChangeFlag: Boolean)(implicit tpe: TypeInformation[A]): DataStream[A] = {
  val relNode = table.getRelNode//获取逻辑计划
  val dataStreamPlan = optimize(relNode, updatesAsRetraction)//优化生成物理执行计划

  val rowType = getResultType(relNode, dataStreamPlan)

  translate(dataStreamPlan, rowType, queryConfig, withChangeFlag)
}
==》translateToCRow ==》DataStreamScan.translateToPlan ==》convertToInternalRow ==》generateConversionProcessFunction 生成具体的算子

DataSet也是同理的翻译过程,最终sql 就可以像DataStream一样执行任务。

你可能感兴趣的:(Apache,Flink)