flinkcdc將MySQL數據寫入kafka

1.導入依賴

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.atguigu</groupId>
    <artifactId>atguigu-flink-cdc</artifactId>
    <version>1.0-SNAPSHOT</version>

    <properties>
        <!--        <flink-version>1.14.6</flink-version>-->
        <java.version>1.8</java.version>
        <flink-version>1.13.6</flink-version>
        <maven.compiler.source>${java.version}</maven.compiler.source>
        <maven.compiler.target>${java.version}</maven.compiler.target>
        <scala.version>2.12</scala.version>
        <hadoop.version>3.1.3</hadoop.version>

    </properties>

    <dependencies>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-java</artifactId>
            <version>${flink-version}</version>
        </dependency>

        <!-- https://mvnrepository.com/artifact/org.apache.flink/flink-streaming-java -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-java_2.12</artifactId>
            <version>${flink-version}</version>

        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-clients_${scala.version}</artifactId>
            <version>${flink-version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>3.1.3</version>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>5.1.49</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner-blink_${scala.version}</artifactId>
            <version>${flink-version}</version>
        </dependency>

        <!-- https://mvnrepository.com/artifact/org.apache.flink/flink-table-planner -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner_2.12</artifactId>
            <version>${flink-version}</version>
        </dependency>


        <dependency>
            <groupId>com.ververica</groupId>
            <artifactId>flink-connector-mysql-cdc</artifactId>
            <version>2.0.2</version>
        </dependency>

        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.75</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-kafka_${scala.version}</artifactId>
            <version>${flink-version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-json</artifactId>
            <version>${flink-version}</version>
        </dependency>

        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>1.18.16</version>
            <scope>compile</scope>
        </dependency>


        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-statebackend-rocksdb_${scala.version}</artifactId>
            <version>${flink-version}</version>
        </dependency>


        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-runtime-web_${scala.version}</artifactId>
            <version>${flink-version}</version>
            <scope>provided</scope>
        </dependency>



    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-assembly-plugin</artifactId>
                <version>3.0.0</version>
                <configuration>
                    <descriptorRefs>
                        <descriptorRef>jar-with-dependencies</descriptorRef>
                    </descriptorRefs>
                </configuration>
                <executions>
                    <execution>
                        <id>make-assembly</id>
                        <phase>package</phase>
                        <goals>
                            <goal>single</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>


</project>

2.主程序


import com.ververica.cdc.connectors.mysql.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.DebeziumSourceFunction;
import com.ververica.cdc.debezium.StringDebeziumDeserializationSchema;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;

import java.util.Properties;

public class FlinkCDC {

    public static void main(String[] args) throws Exception {

        String ckAndGroupIdAndJobName = "test2";
        //1.创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        //2.Flink-CDC将读取binlog的位置信息以状态的方式保存在CK,如果想要做到断点续传,需要从Checkpoint或者Savepoint启动程序
        //2.1 开启Checkpoint,每隔5秒钟做一次CK
        env.enableCheckpointing(5000L);
        //2.2 指定CK的一致性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //2.3 设置任务关闭的时候保留最后一次CK数据
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //2.4 指定从CK自动重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 2000L));


        //2.5 设置状态后端
        env.setStateBackend(new HashMapStateBackend());
        // 3. 设置 checkpoint 的存储路径
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop103:8020/ck/" + ckAndGroupIdAndJobName);
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://mycluster:8020/ck/" + ckAndGroupIdAndJobName);

        //2.6 设置访问HDFS的用户名
        System.setProperty("HADOOP_USER_NAME", "sarah");


        //3.创建Flink-MySQL-CDC的Source
        //initial (default): Performs an initial snapshot on the monitored database tables upon first startup, and continue to read the latest binlog.
        //latest-offset: Never to perform snapshot on the monitored database tables upon first startup, just read from the end of the binlog which means only have the changes since the connector was started.
        //timestamp: Never to perform snapshot on the monitored database tables upon first startup, and directly read binlog from the specified timestamp. The consumer will traverse the binlog from the beginning and ignore change events whose timestamp is smaller than the specified timestamp.
        //specific-offset: Never to perform snapshot on the monitored database tables upon first startup, and directly read binlog from the specified offset.

        DebeziumSourceFunction<String> sourceFunction = MySqlSource.<String>builder()
//                .hostname("hadoop102")
//                .port(3306)
//                .username("root")
//                .password("%3!4xaHXml")
//                .databaseList("student")

                .hostname("wiltechs-sjfood3-replica-i.mysql.rds.aliyuncs.com")
                .port(3306)
                .username("meta_ro")
                .password("hlmabcmY6xWF")
                .databaseList("db_core")
               // .tableList("db_core.tb_readonly_invoice_data") //可选配置项,如果不指定该参数,则会读取上一个配置下的所有表的数据,注意:指定的时候需要使用"db.table"的方式
             // .tableList("db_core.cc_location") //可选配置项,如果不指定该参数,则会读取上一个配置下的所有表的数据,注意:指定的时候需要使用"db.table"的方式


                .startupOptions(StartupOptions.initial())
                .deserializer(new CustomerDeserialization())
//                .deserializer(new StringDebeziumDeserializationSchema())
                .build();

        DataStreamSource<String> streamSource = env.addSource(sourceFunction);

        //4.使用CDC Source从MySQL读取数据
        //3.打印数据并将数据写入 Kafka

        streamSource.addSink(getKafkaProducer("172.16.11.144:9092")).name(ckAndGroupIdAndJobName).uid(ckAndGroupIdAndJobName+"uid2");;


        streamSource.print();
        //4.启动任务
        env.execute("FlinkCDC");

    }


    //kafka 生产者
    public static FlinkKafkaProducer<String> getKafkaProducer(String brokers) {
        return new FlinkKafkaProducer<String>("hadoop104:9092", "db_core_test", new SimpleStringSchema());
    }

}

3.自定義序列化器

import com.alibaba.fastjson.JSONObject;

import com.ververica.cdc.debezium.DebeziumDeserializationSchema;
import io.debezium.data.Envelope;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.util.Collector;
import org.apache.kafka.connect.data.Field;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class CustomerDeserialization implements DebeziumDeserializationSchema<String> {

  
   @Override
   public void deserialize(SourceRecord sourceRecord, Collector<String> collector) throws Exception {

       //1.创建 JSON 对象用于存储最终数据
       JSONObject result = new JSONObject();

       //2.获取库名&表名放入 source
       String topic = sourceRecord.topic();
       String[] fields = topic.split("\\.");

       String database = fields[1];
       String tableName = fields[2];



       Struct value = (Struct) sourceRecord.value();
       //3.获取"before"数据
       Struct before = value.getStruct("before");

       HashMap<String, Object> sourceOffset = (HashMap<String, Object>) sourceRecord.sourceOffset();
       Long ts_sec = ((Number) sourceOffset.get("ts_sec")).longValue();

       JSONObject beforeJson = new JSONObject();
       if (before != null) {
           Schema beforeSchema = before.schema();
           List<Field> beforeFields = beforeSchema.fields();
           for (Field field : beforeFields) {
               Object beforeValue = before.get(field);
               beforeJson.put(field.name(), beforeValue);
          }
      }


       //4.获取"after"数据
       Struct after = value.getStruct("after");
       JSONObject afterJson = new JSONObject();
       if (after != null) {
           Schema afterSchema = after.schema();
           List<Field> afterFields = afterSchema.fields();
           for (Field field : afterFields) {
               Object afterValue = after.get(field);
               afterJson.put(field.name(), afterValue);
          }
      }

       //5.获取操作类型 CREATE UPDATE DELETE 进行符合 Debezium-op 的字母
       Envelope.Operation operation = Envelope.operationFor(sourceRecord);
       String type = operation.toString().toLowerCase();
       if ("insert".equals(type)) {
           type = "c";
      }
       if ("update".equals(type)) {
           type = "u";
      }
       if ("delete".equals(type)) {
           type = "d";
      }
       if ("create".equals(type)) {
           type = "c";
      }
       //6.将字段写入 JSON 对象
//       result.put("source", source);

       result.put("database",database);
       result.put("table",tableName);
       result.put("before", beforeJson);
       result.put("after", afterJson);
       result.put("op", type);
       result.put("ts", ts_sec);
       //7.输出数据
       collector.collect(result.toJSONString());
  }

   @Override
   public TypeInformation<String> getProducedType() {
       return BasicTypeInfo.STRING_TYPE_INFO;
  }
}


以前寫過一次,凡是忘記保存了,代碼不知道爲什麽不見了,調試了半天,又是被自己蠢哭的一天

你可能感兴趣的:(flink,SQL,kafka,mysql,flinkcdc,flink)