大数据——Kafka生产者与消费者代码

导入依赖

    
       2.0.0
    

    
      org.apache.kafka
      kafka_2.12
      ${kafka.version}
    
    
      org.apache.kafka
      kafka-clients
      ${kafka.version}
    

生产者(java版)+消费者(scala版)

KafkaProducer生产者模式

java版

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Properties;
import java.util.concurrent.ExecutionException;


public class KafkaKb12Producer {
    public static void main(String[] args) throws IOException, ExecutionException, InterruptedException {
        Properties config = new Properties();
        //连接
        config.setProperty("bootstrap.servers","192.168.131.200:9092");
        //容错
        config.setProperty("retries","2");
        config.setProperty("acks","-1");
        //批处理:满足一个都会推送消息
        config.setProperty("batch.size","128"); //达到指定字节
        config.setProperty("linger.ms","100"); //达到指定时间
        //消息键值的序列化
        config.setProperty("key.serializer","org.apache.kafka.common.serialization.LongSerializer");
        config.setProperty("value.serializer","org.apache.kafka.common.serialization.StringSerializer");

        KafkaProducer producer = new KafkaProducer(config);
        BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
        long count = 0;
        final  String TOPIC ="kb12_01"; //发送的主题
        final int PATITION =0; //指定主题的分区

        while (true){
            String input = reader.readLine();
            if (input.equalsIgnoreCase("exit")){
                break;
            }
            ProducerRecord record
                    = new ProducerRecord(TOPIC,PATITION,++count,input);
            RecordMetadata rmd = producer.send(record).get();
            System.out.println(rmd.topic()+"\t"+rmd.partition()+"\t"+rmd.offset()+"\t"+count+":"+input);
        }
        reader.close();
        producer.close();
    }
}

KafkaConsumer消费者模式

scala版

import java.time.Duration

import java.util
import java.util.Properties

import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.kafka.common.serialization.LongDeserializer


object KafkaKB12Consumer {
  def main(args: Array[String]): Unit = {
    val config:Properties = new Properties()
    config.setProperty("bootstrap.servers","192.168.131.200:9092")
    //设置key的反序列数据类型  
    config.setProperty("key.deserializer","org.apache.kafka.common.serialization.LongDeserializer")
    //设置value的反序列数据类型
    config.setProperty("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer")
    //设置消费者的groupid
    config.setProperty("group.id","test01")
    //enable.auto.commit默认值为true,表示自动提交,如果需要手动处理,需要设置为false
    config.setProperty("enable.auto.commit","true")
    //auto.commit.interval.ms默认值为5000,表示每5秒consumer会提交offset给kafka,或者每一次数据从指定的 Topic 取回时,将会提交最后一次的 Offset
    config.setProperty("auto.commit.interval.ms","5000")
    //earliest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
    //latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据
    //none:topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
    config.setProperty("auto.offset.reset","earliest")
    val topics = util.Arrays.asList("kb12_01")
    val consumer:KafkaConsumer[Long,String] = new KafkaConsumer(config)
    consumer.subscribe(topics)
    try {
      while (true){
        consumer.poll(Duration.ofSeconds(5)).records("kb12_01").forEach(e=>{
          println(s"${e.key()}\t${e.value()}")
        })
      }
    }finally {
      consumer.close()
    }
  }
}

生产者(scala版)+消费者(java版)

KafkaProducer生产者模式

scala版

import java.util.Properties
import java.util.concurrent.Future

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord, RecordMetadata}

import scala.util.Random
object KafkaKB12Producer {
  def main(args: Array[String]): Unit = {
    val properties = new Properties()
    properties.setProperty("bootstrap.servers","192.168.131.200:9092")
    properties.setProperty("key.serializer","org.apache.kafka.common.serialization.LongSerializer")
    properties.setProperty("value.serializer","org.apache.kafka.common.serialization.FloatSerializer")
    properties.setProperty("acks","1")
    properties.setProperty("retries","2")
    properties.setProperty("linger.ms","500")
    properties.setProperty("batchSize","10")

    val TOPIC = "kb12_02"
    val PARTITION = 0
    val producer: KafkaProducer[Long, Float] = new KafkaProducer[Long, Float](properties)
    var count = 0
    val rand = new Random()
    try{
      while (true){
        count += 1
        val value = 100 + rand.nextInt(1000)
        val producerRecord: ProducerRecord[Long, Float] = new ProducerRecord[Long, Float](TOPIC,PARTITION,count,value)
        val send: Future[RecordMetadata] = producer.send(producerRecord)
        val metadata = send.get()
        println(s"${metadata.topic()}\t${metadata.partition()}\t${metadata.offset()}\t$count->$value")
      }
    }finally {
      producer.close()
    }
  }
}

KafkaConsumer消费者模式

java版

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.time.Duration;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

public class KafkaKb12Consumer {
    public static void main(String[] args) {
        Properties config = new Properties();
        config.setProperty("bootstrap.servers","192.168.131.200:9092");
        config.setProperty("group.id","test01");
        config.setProperty("key.deserializer","org.apache.kafka.common.serialization.LongDeserializer");
        config.setProperty("value.deserializer","org.apache.kafka.common.serialization.FloatDeserializer");
        config.setProperty("enable.auto.commit","true");
        config.setProperty("auto.offset.reset","earliest");
        config.setProperty("auto.commit.interval.ms","5000");

        List TOPIC = Arrays.asList("kb12_02");
        KafkaConsumer consumer = new KafkaConsumer(config);
        consumer.subscribe(TOPIC);
        try{
            while (true){
                ConsumerRecords records = consumer.poll(Duration.ofSeconds(5));
                for (ConsumerRecord record : records) {
                    System.out.println(record.key()+"->"+record.value());
                }
            }
        }finally {
            consumer.close();
        }
    }
}

你可能感兴趣的:(kafka,大数据)