2019独角兽企业重金招聘Python工程师标准>>>
maven依赖:
代码:
package com.suning.sevs.bussiness
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.SparkConf
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.kafka.KafkaUtils
import kafka.serializer.StringDecoder
import org.slf4j.LoggerFactory
import org.elasticsearch.spark._
//测试kafka
object testKafka {
def main(args: Array[String]): Unit = {
val logger = LoggerFactory.getLogger(testKafka.getClass)
val sparkconf = new SparkConf().setAppName("testKafka ")
.set("HADOOP_USER_NAME", “user”)
.set("HADOOP_GROUP_NAME", "user")
.set("es.nodes", "10.10.2.1,10.10.2.2")
.set("es.port", "9900")
// val spark = SparkSession
// .builder
// .appName("testKafka")
// .config(sparkconf)
// .getOrCreate()
// import spark.implicits._
// val topic = spark.readStream.format("kafka")
// .option("kafka.bootstrap.servers", "10.10.1.245:9092,10.10.1.246:9092")
// .option("subscribe", "mytopic")
// .option("startingOffsets", "latest")
// .option("minPartitions", "2")
// .load()
//
// val query=topic.writeStream.format("console").outputMode(OutputMode.Append()).start()
val ssc = new StreamingContext(sparkconf, Seconds(1))
val topicsSet = "mytopic".split(",").toSet
val kafkaParams = Map[String, String]("metadata.broker.list" -> "10.10.1.245:9092,10.10.1.246:9092")
val directKafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
ssc, kafkaParams, topicsSet)
val lines = directKafkaStream.map(_._2)
lines.foreachRDD(rdd=>{
val esRdd=rdd.map(line=>{
Map("sys"->line,"mycode" -> "1")
}
)
esRdd.saveToEs("indexName/typeName")
})
// Start the computation
ssc.start()
ssc.awaitTermination()
}
}