官网
Flink provides an Apache Kafka connector for reading data from and writing data to Kafka topics with exactly-once guaruntees.
org.apache.flink
flink-connector-kafka_2.12
1.11.2
[root@bigdatatest03 bin]# ./kafka-topics.sh --create --zookeeper bigdatatest02:2181,bigdatatest02:2181,bigdatatest03:2181 --partitions 3 --replication-factor 3 --topic flink_kafka_source
[root@bigdatatest03 bin]# ./kafka-console-producer.sh \
> --broker-list bigdatatest01:9092,bigdatatest02:9092,bigdatatest03:9092 \
> --topic flink_kafka_source
[root@bigdatatest03 bin]# ./kafka-console-consumer.sh \
> --bootstrap-server bigdatatest01:9092,bigdatatest02:9092,bigdatatest03:9092 \
> --topic flink_kafka_source \
> --from-beginning
package com.xk.bigdata.flink.datastream.connector
import java.util.Properties
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
/**
* 使用 Kafka 作为数据源
*/
object KafkaSourceApp {
def main(args: Array[String]): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val properties = new Properties()
val bootStrap = "bigdatatest02:9092,bigdatatest03:9092,bigdatatest04:9092"
val sourceTopic = "flink_kafka_source"
properties.setProperty("bootstrap.servers", bootStrap)
properties.setProperty("group.id", "demo")
val kafkaSource = new FlinkKafkaConsumer[String](sourceTopic, new SimpleStringSchema(), properties)
val stream = env
.addSource(kafkaSource)
stream.flatMap(_.split(","))
.map((_,1))
.keyBy(_._1)
.sum(1)
.print()
env.execute(this.getClass.getSimpleName)
}
}
>spark
>spark,hadoop
1> (spark,1)
11> (hadoop,1)
1> (spark,2)
package com.xk.bigdata.flink.datastream.connector
import java.util.Properties
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer
/**
* 数据输出到 Kafka
*/
object KafkaSinkApp {
def main(args: Array[String]): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val stream = env.readTextFile("data/wc.txt")
val bootStrap = "bigdatatest02:9092,bigdatatest03:9092,bigdatatest04:9092"
val topic = "flink_kafka_sink"
val myProducer = new FlinkKafkaProducer[String](
bootStrap, // target topic
topic,
new SimpleStringSchema()) // serialization schema
stream.addSink(myProducer)
env.execute(this.getClass.getSimpleName)
}
}
spark
hadoop,spark,flink
spark,hadoop