flink upsert kafka sql

pom:

xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
4.0.0

com.sf.bdp
kafka-wide-table
1.0-SNAPSHOT

    1.8
    1.8
    UTF-8
    2.11.12
    2.11
    2.7.3
    1.0.5
    1.12.2
    2.3.1



    
        org.apache.flink
        flink-table
        ${flink.version}
    

    
        org.apache.flink
        flink-connector-kafka_2.11
        ${flink.version}
    

    
        org.apache.flink
        flink-streaming-java_2.11
        ${flink.version}
    
    
        org.apache.flink
        flink-clients_2.11
        ${flink.version}
    
    
        org.apache.flink
        flink-runtime_2.11
        ${flink.version}
    

    
        org.apache.flink
        flink-java
        ${flink.version}
    
    
        org.apache.flink
        flink-table-api-java-bridge_2.11
        ${flink.version}
    

    
    
        org.apache.flink
        flink-table-planner-blink_2.11
        ${flink.version}
    

    
        org.apache.flink
        flink-json
        ${flink.version}
    

code:

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class KafkaSoureTest {
public static void main(String[] args) throws Exception {
//StreamExecutionEnvironment streamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
//EnvironmentSettings bsSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
//StreamTableEnvironment tEnv = StreamTableEnvironment.create(streamEnv, bsSettings);

    StreamExecutionEnvironment streamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    StreamTableEnvironment tEnv = StreamTableEnvironment.create(streamEnv);
    tEnv.executeSql("CREATE TABLE soruce1 (\n" +
            "    id     STRING,     \n" +
            "    pv          BIGINT,     \n" +
            "    uv          BIGINT,     \n" +
            "    PRIMARY KEY (id) NOT ENFORCED\n" +
            ") WITH (\n" +
            "  'connector' = 'upsert-kafka',\n" +
            "  'topic' = 'soruce1_pvuv',\n" +
            "  'properties.bootstrap.servers' = '10.202.116.43:9092',\n" +
            "  'key.json.ignore-parse-errors' = 'true',\n" +
            "  'value.json.fail-on-missing-field' = 'false',\n" +
            //"  'scan.startup.mode' = 'latest-offset',\n" +
            "  'value.json.ignore-parse-errors' = 'true',\n" +
            "  'key.format' = 'json',\n" +
            "  'value.format' = 'json',\n" +
            "  'value.fields-include' = 'EXCEPT_KEY'\n" +
            ")");
    tEnv.executeSql("CREATE TABLE sink1 (\n" +
            "    id     STRING,     \n" +
            "    pv          BIGINT,     \n" +
            "    uv          BIGINT,     \n" +
            "    PRIMARY KEY (id) NOT ENFORCED\n" +
            ") WITH (\n" +
            "  'connector' = 'upsert-kafka',\n" +
            "  'topic' = 'sink1_pvuv',\n" +
            "  'properties.bootstrap.servers' = '10.202.116.43:9092',\n" +
            "  'key.json.ignore-parse-errors' = 'true',\n" +
            "  'value.json.fail-on-missing-field' = 'false',\n" +
            //"  'scan.startup.mode' = 'latest-offset',\n" +
            "  'value.json.ignore-parse-errors' = 'true',\n" +
            "  'key.format' = 'json',\n" +
            "  'value.format' = 'json',\n" +
            "  'value.fields-include' = 'EXCEPT_KEY'\n" +
            ")");
    tEnv.executeSql("INSERT INTO sink1 SELECT * from soruce1");
    tEnv.executeSql("select * from sink1").print();
    tEnv.execute("test source");
}

}

append 模式 KafkaSoureTest10
12> (true,1,apple,null,null)
12> (false,1,apple,null,null)
12> (true,1,apple,1,liujie)
12> (true,1,apple9,1,liujie)
12> (true,1,apple9,1,zhangshan)
12> (true,1,apple,1,zhangshan)
12> (true,1,apple10,1,zhangshan)
12> (true,1,apple10,1,liujie)
12> (true,1,apple10,1,wangwu)
12> (true,1,apple9,1,wangwu)
12> (true,1,apple,1,wangwu)


{"productId":"1","name":"apple"}
{"productId":"1","name":"apple9"}
{"productId":"1","name":"apple10"}

{"goodsId":"1","userName":"liujie"}
{"goodsId":"1","userName":"zhangshan"}
{"goodsId":"1","userName":"wangwu"}

upsert 模式

{"productId":"2"} {"productId":"2","name":"apple"}
| +I | 2 | apple | (NULL) | (NULL) |
{"goodsId":"2"} {"goodsId":"2","userName":"name1"}
| -D | 2 | apple | (NULL) | (NULL) |
| +I | 2 | apple | 2 | name1 |
{"productId":"2"} {"productId":"2","name":"apple10"}
| -D | 2 | apple | 2 | name1 |
| +I | 2 | apple10 | 2 | name1 |
{"goodsId":"2"} {"goodsId":"2","userName":"name2"}
| -U | 2 | apple10 | 2 | name1 |
| +I | 2 | apple10 | (NULL) | (NULL) |
| -D | 2 | apple10 | (NULL) | (NULL) |
| +I | 2 | apple10 | 2 | name2 |
{"productId":"2"} {}
| -D | 2 | apple10 | 2 | name2 |
| +I | 2 | (NULL) | 2 | name2 |

你可能感兴趣的:(flink upsert kafka sql)