大数据学习之 spark写入数据到kafka中

目录

 

1. kafka 环境的搭建请参考:

2. 准备KafkaSink

3.实现代码:

4 总结:


1. kafka 环境的搭建请参考:

https://blog.csdn.net/weixin_37835915/article/details/103786157

2. 准备KafkaSink

package com.spark.self

import java.util.concurrent.Future
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord, RecordMetadata}

class KafkaSink[K, V](createProducer: () => KafkaProducer[K, V]) extends Serializable {
  lazy val producer = createProducer()

  def send(topic: String, key: K, value: V): Future[RecordMetadata] =
    producer.send(new ProducerRecord[K, V](topic, key, value))

  def send(topic: String, value: V): Future[RecordMetadata] =
    producer.send(new ProducerRecord[K, V](topic, value))
}

object KafkaSink {

  import scala.collection.JavaConversions._

  def apply[K, V](config: Map[String, Object]): KafkaSink[K, V] = {
    val createProducerFunc = () => {
      val producer = new KafkaProducer[K, V](config)
      sys.addShutdownHook {
        // Ensure that, on executor JVM shutdown, the Kafka producer sends
        // any buffered messages to Kafka before shutting down.
        producer.close()
      }
      producer
    }
    new KafkaSink(createProducerFunc)
  }

  def apply[K, V](config: java.util.Properties): KafkaSink[K, V] = apply(config.toMap)
}

3.实现代码:

package com.spark.self

import java.util.Properties
import org.apache.kafka.common.serialization.StringSerializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object WriteDataIntoKakfa {
  def main(args: Array[String]): Unit = {
    Logger.getRootLogger.setLevel(Level.WARN)
    //设置log显示级别的,报错就把这个删了
    val conf = new SparkConf().setMaster("local").setAppName("app")
    val sc: SparkContext = new SparkContext(conf)
    val rdd: RDD[String] = sc.parallelize(Array("1", "2", "3", "4"))
    // 广播KafkaSink
    val kafkaProducerConfig = {
      val p = new Properties()
      p.setProperty("bootstrap.servers", "localhost:9092") //修改为你的kafka地址
      p.setProperty("key.serializer", classOf[StringSerializer].getName)
      p.setProperty("value.serializer", classOf[StringSerializer].getName)
      p
    }
    val kafkaProducer: Broadcast[KafkaSink[String, String]] = {

      sc.broadcast(KafkaSink[String, String](kafkaProducerConfig))
    }
    rdd.foreach(record => {
      kafkaProducer.value.send("test", record)
    })
  }
}

执行结束后consumer的结果如下:

大数据学习之 spark写入数据到kafka中_第1张图片

4 总结:

注意:本文中的数据只是定义了一个元祖,也可以从hive,hdfs 中读取数据

https://blog.csdn.net/weixin_37835915/article/details/103616965

https://blog.csdn.net/weixin_37835915/article/details/103531043

本文基于本地搭建好kafka环境

你可能感兴趣的:(kafka,Spark)