【Kafka】kafka代码实战

3 代码实战部分

3.1 Spring-Boot集成Kafka

  • 添加依赖

    <dependency>
        <groupId>org.springframework.kafkagroupId>
        <artifactId>spring-kafkaartifactId>
    dependency>
    
    <dependency>
        <groupId>org.apache.kafkagroupId>
        <artifactId>kafka-clientsartifactId>
        <version>1.1.0version>
    dependency>
    
  • application.xml中配置

    spring:
      kafka:
        topic: Channeltopic
        bootstrap-servers: 10.81.128.114:9092,10.81.128.213:9092,10.81.128.163:9092
        producer:
          key-serializer: org.apache.kafka.common.serialization.StringDeserializer
          value-serializer: org.apache.kafka.common.serialization.StringDeserializer
          batch-size: 16384
          buffer-memory: 33554432
    
  • kakfa配置

    KafkaConfigProperties.java

  import lombok.AllArgsConstructor;
  import lombok.Data;
  import lombok.NoArgsConstructor;
  import org.springframework.beans.factory.annotation.Value;
  import org.springframework.stereotype.Component;
  
  @Data
  @NoArgsConstructor
  @AllArgsConstructor
  @Component
  public class KafkaConfigProperties {
  
      @Value("${spring.kafka.bootstrap-servers}")
      private String brokerAddress;
  
      @Value("${spring.kafka.producer.batch-size}")
      private String batchSize;
  
      @Value("${spring.kafka.producer.buffer-memory}")
      private String bufferMemory;
  
      @Value("${spring.kafka.topic}")
      private String kafkaTopic;
  
  }

KafkaConfig.java

  import org.apache.kafka.clients.producer.KafkaProducer;
  import org.apache.kafka.clients.producer.Producer;
  import org.apache.kafka.clients.producer.ProducerConfig;
  import org.apache.kafka.clients.producer.ProducerRecord;
  import org.apache.kafka.common.serialization.StringSerializer;
  import org.springframework.beans.factory.annotation.Autowired;
  import org.springframework.context.annotation.Bean;
  import org.springframework.stereotype.Component;
  
  import java.util.HashMap;
  import java.util.Map;
  
  @Component
  public class KafkaConfig {
  
      @Autowired
      private KafkaConfigProperties configProperties;
  
      @Bean
      public Producer<String, String> producer() {
  
          Map<String, Object> props = new HashMap<>();
  
          // Kafka Broker机器地址
          props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configProperties.getBrokerAddress());
  
          props.put(ProducerConfig.LINGER_MS_CONFIG, 100);
  
          // 不能保证短时间内集群恢复该重试参数尽可能设最大
          props.put(ProducerConfig.RETRIES_CONFIG, Integer.MAX_VALUE);
  
          // 保证集群高可用数据不丢失核心参数
          props.put(ProducerConfig.ACKS_CONFIG, "all");
  
          // 尽可能的保证顺序,防止topic同分区下的消息乱序
          props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1);
  
          // 设置消息压缩算法 lz4 > snappy > gzip
          props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "lz4");
  
          props.put(ProducerConfig.BATCH_SIZE_CONFIG, configProperties.getBatchSize());
          props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, configProperties.getBufferMemory());
          props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
          props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  
          // 以下参数配置为broke端配置
  
          // props.put("delete.topic.enable", true);
          //
          // // 控制某条消息至少被写入多少ISR副本才算成功 该参数只有配合acks才生效
          // props.put("min.insync.replicas", 2);
          //
          // // 设置副本数 务必确保 replication.factor > min.insync.replicas 推荐配置为 replication.factor = min.insync.replicas + 1
          // props.put("replication.factor",3);
          //
          // // 关闭unclean leader选举 不允许非ISR中的副本被选举为leader 从而避免broker端造成消息丢失
          // props.put("unclean.leader.election.enable", false);
  
          Producer<String, String> producer = new KafkaProducer<>(props);
  
          return producer;
      }
  
      /**
       * 为保证消息的可靠性,发送方式采用同步发送
       * @param topic
       * @param message
       */
      public void send(String topic, String message){
          try {
              producer().send(new ProducerRecord<>(topic, message), (metadata, exception) -> {
                  if(exception!=null){
                      System.out.println(metadata.timestamp() + "," + metadata.topic() + "," + metadata.partition() + "," + metadata.offset());
                      exception.printStackTrace();
                  }
              });
          } catch (Exception e) {
              e.printStackTrace();
          }
      }
  
  }

在需要发消息的地址注入KafkaConfig中的Producer使用内部声明的send()方法发送消息

3.2 JAVA API代码示例

消息生产者

package com.example.demo;

import org.apache.kafka.clients.producer.*;

import java.util.Properties;

/**
 *
 * Kafka集群高可用Produce端代码
 *
 * server.properties
 *
 * ①、broker server config
 *
 * delete.topic.enable=true
 * min.insync.replicas=2
 * unclean.leader.election.enable=false
 *
 * ②、topic config
 *
 * unclean.leader.election.enable=false
 * min.insync.replicas=2
 *
 * ③、create topic
 *
 * bin/kafka-topics.sh --create --zookeeper 192.168.1.128:2181,192.168.1.81:2181,192.168.1.118:2181 --replication-factor 3 --partitions 6 --topic ChannelClick
 *
 * ④、topic describe
 *
 * bin/kafka-topics.sh --zookeeper 192.168.1.128:2181,192.168.1.81:2181,192.168.1.118:2181 --describe --topic ChannelClick
 * Topic:ChannelClick	PartitionCount:6	ReplicationFactor:3	Configs:unclean.leader.election.enable=false,min.insync.replicas=2
 * Topic: ChannelClick	Partition: 0	Leader: 1	Replicas: 1,3,2	Isr: 2,1,3
 * Topic: ChannelClick	Partition: 1	Leader: 2	Replicas: 2,1,3	Isr: 2,1,3
 * Topic: ChannelClick	Partition: 2	Leader: 3	Replicas: 3,2,1	Isr: 2,1,3
 * Topic: ChannelClick	Partition: 3	Leader: 1	Replicas: 1,2,3	Isr: 2,1,3
 * Topic: ChannelClick	Partition: 4	Leader: 2	Replicas: 2,3,1	Isr: 2,1,3
 * Topic: ChannelClick	Partition: 5	Leader: 3	Replicas: 3,1,2	Isr: 2,1,3
 *
 */
public class Produce {


    public static void main(String[] args) {

        Properties props = new Properties();
        props.put("bootstrap.servers", "47.100.76.107:9092,47.100.76.181:9092,47.100.76.204:9092");
        // 保证集群高可用数据不丢失核心参数
        props.put("acks", "all");
        props.put("linger.ms", 100);
        // 不能保证短时间内集群恢复该重试参数尽可能设最大
        props.put("retries", Integer.MAX_VALUE);
        // 尽可能的保证顺序
        props.put("max.in.flight.requests.per.connection", 1);
        // 设置消息压缩算法
        props.put("compression.type", "snappy");
        props.put("batch.size", 16384);
        props.put("buffer.memory", 33554432);
        // key value序列化
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        // 不确定以下参数在代码端配置是否生效。为了保险起见还是写上
        props.put("delete.topic.enable", true);
        props.put("min.insync.replicas", 2);
        props.put("unclean.leader.election.enable", false);

        Producer<String, String> producer = new KafkaProducer<>(props);

        for (int i = 1; i <= 1000000; i++) {

            String json = "{\"clientIP\":\"223.104.186.215\",\"fromIP\":\"127.0.0.1\",\"clickTime\":null,\"channelTime\":\"2018-04-20T15:22:48.902+0800\",\"callAdvTime\":null,\"delPlat\":\"ios\",\"delMode\":\"CPA\",\"sourceId\":39,\"orderId\":35,\"orderInputId\":42,\"ideaId\":null,\"advId\":44,\"proId\":36,\"landingPageId\":null,\"callAdvUrl\":\"https://itunes.apple.com/cn/app/id990531994?mt\\u003d8\",\"callChannelUrl\":null,\"repeatTime\":0,\"responseMsg\":\"防作弊校验失败!\",\"channelType\":\"CPA\",\"logType\":\"ChannelClick\",\"source\":\"changsi\",\"appid\":\"990531994\",\"scid\":\"jcdefault\",\"uuid\":\"CA2606BB-1917-4CCE-968F-U18ACLH3\",\"status\":12,\"cid\":null,\"sc_name\":null,\"orderSourceId\":44}";

            producer.send(new ProducerRecord<>("topic-1031", json), (metadata, exception) -> {
                if (exception!=null) {
                    exception.printStackTrace();
                }
            });

        }

        producer.close();

    }

}

消费者

package com.example.demo;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.errors.WakeupException;

import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicLong;

public class Consumer {

    public static void main(String[] args) throws Exception {

        AtomicLong atomicLong = new AtomicLong(0);

        Properties props = new Properties();
        props.put("bootstrap.servers", "47.100.76.107:9092,47.100.76.181:9092,47.100.76.204:9092");
        props.put("group.id", "test");
        props.put("enable.auto.commit", "true");
        props.put("auto.commit.interval.ms", "1000");
        props.put("auto.offset.reset", "earliest");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("topic-1031"));

        // 注册JVM关闭时的回调钩子,当JVM关闭时调用此钩子。
        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            System.out.println("Starting exit...");
            //调用消费者的wakeup方法通知主线程退出
            consumer.wakeup();
            try {
                //等待主线程退出
                Thread.currentThread().join();
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }));


        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(100);
                for (ConsumerRecord<String, String> record : records){
                    System.out.println("-------------->"+atomicLong.incrementAndGet());
                    System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
                }
            }
        } catch (WakeupException e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }



    }
}

你可能感兴趣的:(【Kafka】kafka代码实战)