使用java 编写spark-streaming从指定的offset开始消费kafka 并且打印出每一条信息的offset

话不多说直接上代码,相信有点功底的spark 程序员都可以看懂这个代码 。

第一步 创建kafka主题。(鄙人的测试时使用的1副本,3分区)

kafka-topics.sh --create -zookeeper localhost:2181 --replication-factor 1 --partitions 3 --topic test

第二步 使用java 程序实时的向kafka中生产数据

import java.io.BufferedReader;
import java.io.FileReader;
import java.util.Properties;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
public class main {
    public static void main(String[] args) throws Exception {

        Properties props = new Properties();
        props.put("bootstrap.servers", "192.168.200.10:9092");
        props.put("acks", "1");
        props.put("retries", 3);
        props.put("batch.size", 16384); // 16K
        props.put("linger.ms", 1);
        props.put("buffer.memory", 33554432); // 32M
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        Producer producer = new KafkaProducer<>(props);



        int i= 0;
        while(true) {
            i++;
            // 创建 ProducerRecord 可以指定 topic、partition、key、value,其中 partition 和 key 是可选的
            // ProducerRecord record = new ProducerRecord<>("dev3-yangyunhe-topic001", 0, "key", line);
            // ProducerRecord record = new ProducerRecord<>("dev3-yangyunhe-topic001", "key", line);
            ProducerRecord record = new ProducerRecord<>("test", i+"");

            // 只管发送消息,不管是否发送成功
            producer.send(record);
            Thread.sleep(1000);
        }

    }
}
第三步 使用spark-streaming(java方式)消费kafka的数据并且打印出offset和分区等信息(在实际开发中可以利用着一点灵活变通,比如将已经消费的主题对应分区中的offset存放在其他持久化工具中能够【如:redise】) ,代码如下
import java.util.*;
import java.util.regex.Pattern;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.rdd.RDD;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;

import scala.Tuple2;

/**
 * 使用Direct 的方式拉取kafka 数据 从指定的offset开始消费  并且打印出每一条数据的分区id和偏移量以及key value值
 */
public class SparkStreamingFromkafkaDirect {

    public static void main(String[] args) throws Exception {
        // TODO Auto-generated method stub
        //声明conf
        SparkConf sparkConf  = new SparkConf().setMaster("local[*]").setAppName("SparkStreamingFromkafka");
        // 窗口间隔
        JavaStreamingContext streamingContext = new JavaStreamingContext(sparkConf , Durations.seconds(5));

        // kafka 参数
        Map kafkaParams = new HashMap<>();
        kafkaParams.put("bootstrap.servers", "192.168.200.10:9092");//多个可用ip可用","隔开
        kafkaParams.put("key.deserializer", StringDeserializer.class);
        kafkaParams.put("value.deserializer", StringDeserializer.class);
        kafkaParams.put("group.id", "sparkStreaming");
        Collection topics = Arrays.asList("test");//配置topic,可以是数组
        Map topicsAndOffset = new HashMap<>();//  配置对应的主题分区的offset,从指定offset消费
        topicsAndOffset.put(new TopicPartition("test",0),0L);
        topicsAndOffset.put(new TopicPartition("test",1),10L);
        topicsAndOffset.put(new TopicPartition("test",2),330L);

        //声明 streaming-kafka-Direct 方式拉取数据  分区一一对应
        JavaInputDStream> javaInputDStream =KafkaUtils.createDirectStream(
                streamingContext,
                LocationStrategies.PreferConsistent(), //分区策略
               //ConsumerStrategies.Subscribe(topics, kafkaParams);//消费者策略   方式一 从每个分区的 0 offset 开始消费
                ConsumerStrategies.Subscribe( topics,kafkaParams,topicsAndOffset) //消费者策略   方式二 从每个分区的 指定的offset 开始消费
        );

        // 将kafka中的数据拉去出来之后进行 解析获取key和value值(使用print打印)
        JavaPairDStream javaPairDStream = javaInputDStream.mapToPair(new PairFunction, String, String>(){
            private static final long serialVersionUID = 1L;
            @Override
            //获取数据中的 key和value  (ConsumerRecord 保存了完整的kafka中信息  包括分区 偏移量 等)
            public Tuple2 call(ConsumerRecord consumerRecord) throws Exception {
                System.out.println("--------------- ConsumerRecord ------------------------ ");
                System.out.println("partition:   "+consumerRecord.partition());
                System.out.println("offset:   "+consumerRecord.offset());
                System.out.println("toString:   "+consumerRecord.toString());
                Thread.sleep(10000);//时间间隔 长只是为了便于查看日志,观察是不是从指定的偏移量开始消费的
                return new Tuple2<>(consumerRecord.key(), consumerRecord.value());

            }
        });
        //使用action算子打印出value值(主要是为了触发行动算子)
        javaPairDStream.foreachRDD(new VoidFunction>() {
            @Override
            public void call(JavaPairRDD javaPairRDD) throws Exception {
                // TODO Auto-generated method stub
                javaPairRDD.foreach(new VoidFunction>() {
                    @Override
                    public void call(Tuple2 tuple2)
                            throws Exception {
                        // TODO Auto-generated method stub
                        System.out.println(tuple2._2);
                    }
                });
            }
        });
        streamingContext.start();
        streamingContext.awaitTermination();
    }

}

你可能感兴趣的:(spark,streaming,spark,java,kafka)