Kafka多线程生产消费

一、kafka生产者

       kafka目前在0.9版本后采用java版本实现,生产者KafkaProducer是线程安全对象,所以我们建议KafkaProducer采用单例模式,多个线程共享一个实例

    

[java]  view plain  copy
  1. package com.kafka.singleton;  
  2.   
  3. import java.io.IOException;  
  4. import java.io.InputStream;  
  5. import java.util.Properties;  
  6. import java.util.Random;  
  7.   
  8. import org.apache.kafka.clients.producer.Callback;  
  9. import org.apache.kafka.clients.producer.KafkaProducer;  
  10. import org.apache.kafka.clients.producer.ProducerRecord;  
  11. import org.apache.kafka.clients.producer.RecordMetadata;  
  12. import org.slf4j.Logger;  
  13. import org.slf4j.LoggerFactory;  
  14.   
  15. public final class KafkaProducerSingleton {  
  16.   
  17.     private static final Logger LOGGER = LoggerFactory  
  18.             .getLogger(KafkaProducerSingleton.class);  
  19.   
  20.     private static KafkaProducer kafkaProducer;  
  21.   
  22.     private Random random = new Random();  
  23.   
  24.     private String topic;  
  25.   
  26.     private int retry;  
  27.   
  28.     private KafkaProducerSingleton() {  
  29.   
  30.     }  
  31.       
  32.   
  33.     /** 
  34.      * 静态内部类 
  35.      *  
  36.      * @author tanjie 
  37.      * 
  38.      */  
  39.     private static class LazyHandler {  
  40.   
  41.         private static final KafkaProducerSingleton instance = new KafkaProducerSingleton();  
  42.     }  
  43.   
  44.     /** 
  45.      * 单例模式,kafkaProducer是线程安全的,可以多线程共享一个实例 
  46.      *  
  47.      * @return 
  48.      */  
  49.     public static final KafkaProducerSingleton getInstance() {  
  50.         return LazyHandler.instance;  
  51.     }  
  52.   
  53.     /** 
  54.      * kafka生产者进行初始化 
  55.      *  
  56.      * @return KafkaProducer 
  57.      */  
  58.     public void init(String topic,int retry) {  
  59.         this.topic = topic;  
  60.         this.retry = retry;  
  61.         if (null == kafkaProducer) {  
  62.             Properties props = new Properties();  
  63.             InputStream inStream = null;  
  64.             try {  
  65.                 inStream = this.getClass().getClassLoader()  
  66.                         .getResourceAsStream("kafka.properties");  
  67.                 props.load(inStream);  
  68.                 kafkaProducer = new KafkaProducer(props);  
  69.             } catch (IOException e) {  
  70.                 LOGGER.error("kafkaProducer初始化失败:" + e.getMessage(), e);  
  71.             } finally {  
  72.                 if (null != inStream) {  
  73.                     try {  
  74.                         inStream.close();  
  75.                     } catch (IOException e) {  
  76.                         LOGGER.error("kafkaProducer初始化失败:" + e.getMessage(), e);  
  77.                     }  
  78.                 }  
  79.             }  
  80.         }  
  81.     }  
  82.   
  83.     /** 
  84.      * 通过kafkaProducer发送消息 
  85.      *  
  86.      * @param topic 
  87.      *            消息接收主题 
  88.      * @param partitionNum 
  89.      *            哪一个分区 
  90.      * @param retry 
  91.      *            重试次数 
  92.      * @param message 
  93.      *            具体消息值 
  94.      */  
  95.     public void sendKafkaMessage(final String message) {  
  96.         /** 
  97.          * 1、如果指定了某个分区,会只讲消息发到这个分区上 2、如果同时指定了某个分区和key,则也会将消息发送到指定分区上,key不起作用 
  98.          * 3、如果没有指定分区和key,那么将会随机发送到topic的分区中 4、如果指定了key,那么将会以hash的方式发送到分区中 
  99.          */  
  100.         ProducerRecord record = new ProducerRecord(  
  101.                 topic, random.nextInt(3), "", message);  
  102.         // send方法是异步的,添加消息到缓存区等待发送,并立即返回,这使生产者通过批量发送消息来提高效率  
  103.         // kafka生产者是线程安全的,可以单实例发送消息  
  104.         kafkaProducer.send(record, new Callback() {  
  105.             public void onCompletion(RecordMetadata recordMetadata,  
  106.                     Exception exception) {  
  107.                 if (null != exception) {  
  108.                     LOGGER.error("kafka发送消息失败:" + exception.getMessage(),  
  109.                             exception);  
  110.                     retryKakfaMessage(message);  
  111.                 }  
  112.             }  
  113.         });  
  114.     }  
  115.   
  116.     /** 
  117.      * 当kafka消息发送失败后,重试 
  118.      *  
  119.      * @param retryMessage 
  120.      */  
  121.     private void retryKakfaMessage(final String retryMessage) {  
  122.         ProducerRecord record = new ProducerRecord(  
  123.                 topic, random.nextInt(3), "", retryMessage);  
  124.         for (int i = 1; i <= retry; i++) {  
  125.             try {  
  126.                 kafkaProducer.send(record);  
  127.                 return;  
  128.             } catch (Exception e) {  
  129.                 LOGGER.error("kafka发送消息失败:" + e.getMessage(), e);  
  130.                 retryKakfaMessage(retryMessage);  
  131.             }  
  132.         }  
  133.     }  
  134.   
  135.     /** 
  136.      * kafka实例销毁 
  137.      */  
  138.     public void close() {  
  139.         if (null != kafkaProducer) {  
  140.             kafkaProducer.close();  
  141.         }  
  142.     }  
  143.   
  144.     public String getTopic() {  
  145.         return topic;  
  146.     }  
  147.   
  148.     public void setTopic(String topic) {  
  149.         this.topic = topic;  
  150.     }  
  151.   
  152.     public int getRetry() {  
  153.         return retry;  
  154.     }  
  155.   
  156.     public void setRetry(int retry) {  
  157.         this.retry = retry;  
  158.     }  
  159.   
  160. }  

   HandlerProducer

  

[java]  view plain  copy
  1. package com.travelsky.kafka.singleton;  
  2.   
  3.   
  4. public class HandlerProducer implements Runnable {  
  5.   
  6.     private String message;  
  7.   
  8.     public HandlerProducer(String message) {  
  9.         this.message = message;  
  10.     }  
  11.   
  12.     @Override  
  13.     public void run() {  
  14.         KafkaProducerSingleton kafkaProducerSingleton = KafkaProducerSingleton  
  15.                 .getInstance();  
  16.         kafkaProducerSingleton.init("test_find",3);  
  17.         System.out.println("当前线程:" + Thread.currentThread().getName()  
  18.                 + ",获取的kafka实例:" + kafkaProducerSingleton);  
  19.         kafkaProducerSingleton.sendKafkaMessage("发送消息" + message);  
  20.     }  
  21.   
  22. }  

    kafka.properties

[java]  view plain  copy
  1. bootstrap.servers=master:9092,slave1:9092,slave2:9092  
  2. acks=1  
  3. retries=0  
  4. batch.size=1000  
  5. compression.type=gzip  
  6. #buffer.memory=33554432  
  7. key.serializer=org.apache.kafka.common.serialization.StringSerializer  
  8. value.serializer=org.apache.kafka.common.serialization.StringSerializer  

  二、kafka消费者

[java]  view plain  copy
  1. package com.kafka.consumer;  
  2.   
  3. import java.util.Arrays;  
  4. import java.util.Properties;  
  5. import java.util.concurrent.ExecutorService;  
  6. import java.util.concurrent.Executors;  
  7. import java.util.concurrent.TimeUnit;  
  8.   
  9. import org.apache.kafka.clients.consumer.ConsumerRecords;  
  10. import org.apache.kafka.clients.consumer.KafkaConsumer;  
  11.   
  12. public final class Kafka_Consumer {  
  13.   
  14.     /** 
  15.      * kafka消费者不是线程安全的 
  16.      */  
  17.     private final KafkaConsumer consumer;  
  18.   
  19.     private ExecutorService executorService;  
  20.   
  21.     public Kafka_Consumer() {  
  22.         Properties props = new Properties();  
  23.         props.put("bootstrap.servers",  
  24.                 "ip,port");  
  25.         props.put("group.id""group");  
  26.         // 关闭自动提交  
  27.         props.put("enable.auto.commit""false");  
  28.         props.put("auto.commit.interval.ms""1000");  
  29.         props.put("session.timeout.ms""30000");  
  30.         props.put("key.deserializer",  
  31.                 "org.apache.kafka.common.serialization.StringDeserializer");  
  32.         props.put("value.deserializer",  
  33.                 "org.apache.kafka.common.serialization.StringDeserializer");  
  34.         consumer = new KafkaConsumer(props);  
  35.         consumer.subscribe(Arrays.asList("test_find"));  
  36.     }  
  37.   
  38.     public void execute() {  
  39.         executorService = Executors.newFixedThreadPool(3);  
  40.         while (true) {  
  41.             ConsumerRecords records = consumer.poll(10);  
  42.             if (null != records) {  
  43.                 executorService.submit(new ConsumerThread(records, consumer));  
  44.             }  
  45.         }  
  46.     }  
  47.   
  48.     public void shutdown() {  
  49.         try {  
  50.             if (consumer != null) {  
  51.                 consumer.close();  
  52.             }  
  53.             if (executorService != null) {  
  54.                 executorService.shutdown();  
  55.             }  
  56.             if (!executorService.awaitTermination(10, TimeUnit.SECONDS)) {  
  57.                 System.out.println("Timeout");  
  58.             }  
  59.         } catch (InterruptedException ignored) {  
  60.             Thread.currentThread().interrupt();  
  61.         }  
  62.     }  
  63.   
  64. }  

    ConsumerThread

[java]  view plain  copy
  1. package com.kafka.consumer;  
  2.   
  3. import java.util.Collections;  
  4. import java.util.List;  
  5.   
  6. import org.apache.kafka.clients.consumer.ConsumerRecord;  
  7. import org.apache.kafka.clients.consumer.ConsumerRecords;  
  8. import org.apache.kafka.clients.consumer.KafkaConsumer;  
  9. import org.apache.kafka.clients.consumer.OffsetAndMetadata;  
  10. import org.apache.kafka.common.TopicPartition;  
  11.   
  12. /** 
  13.  * 多消费者,多个work线程,难保证分区消息消费的顺序性 
  14.  *  
  15.  * @author tanjie 
  16.  * 
  17.  */  
  18. public final class ConsumerThread implements Runnable {  
  19.   
  20.     private ConsumerRecords records;  
  21.   
  22.     private KafkaConsumer consumer;  
  23.   
  24.     public ConsumerThread(ConsumerRecords records,  
  25.             KafkaConsumer consumer) {  
  26.         this.records = records;  
  27.         this.consumer = consumer;  
  28.     }  
  29.   
  30.     @Override  
  31.     public void run() {  
  32.         for (TopicPartition partition : records.partitions()) {  
  33.             List> partitionRecords = records  
  34.                     .records(partition);  
  35.             for (ConsumerRecord record : partitionRecords) {  
  36.                 System.out.println("当前线程:" + Thread.currentThread() + ","  
  37.                         + "偏移量:" + record.offset() + "," + "主题:"  
  38.                         + record.topic() + "," + "分区:" + record.partition()  
  39.                         + "," + "获取的消息:" + record.value());  
  40.             }  
  41.             // 消费者自己手动提交消费的offest,确保消息正确处理后再提交  
  42.             long lastOffset = partitionRecords.get(partitionRecords.size() - 1)  
  43.                     .offset();  
  44.             consumer.commitSync(Collections.singletonMap(partition,  
  45.                     new OffsetAndMetadata(lastOffset + 1)));  
  46.         }  
  47.     }  
  48. }  

   Main方法

[java]  view plain  copy
  1. public static void main(String[] args) {  
  2.         Kafka_Consumer kafka_Consumer = new Kafka_Consumer();  
  3.         try {  
  4.             kafka_Consumer.execute();  
  5.             Thread.sleep(20000);  
  6.         } catch (InterruptedException e) {  
  7.             e.printStackTrace();  
  8.         } finally {  
  9.             kafka_Consumer.shutdown();  
  10.         }  
  11.     }  

 三、运行效果

     先起消费者,再起生产者,运行效果如下

     消费者:

[java]  view plain  copy
  1. 当前线程:Thread[pool-1-thread-1,5,main],偏移量:44,主题:test_find,分区:1,获取的消息:发送消息:1  
  2. 当前线程:Thread[pool-1-thread-2,5,main],偏移量:45,主题:test_find,分区:1,获取的消息:发送消息:2  
  3. 当前线程:Thread[pool-1-thread-1,5,main],偏移量:46,主题:test_find,分区:1,获取的消息:发送消息:3  
  4. 当前线程:Thread[pool-1-thread-1,5,main],偏移量:39,主题:test_find,分区:0,获取的消息:发送消息:4  
  5. 当前线程:Thread[pool-1-thread-2,5,main],偏移量:47,主题:test_find,分区:1,获取的消息:发送消息:5  
  6. 当前线程:Thread[pool-1-thread-3,5,main],偏移量:40,主题:test_find,分区:0,获取的消息:发送消息:6  
  7. 当前线程:Thread[pool-1-thread-2,5,main],偏移量:37,主题:test_find,分区:2,获取的消息:发送消息:7  
  8. 当前线程:Thread[pool-1-thread-2,5,main],偏移量:38,主题:test_find,分区:2,获取的消息:发送消息:8  
  9. 当前线程:Thread[pool-1-thread-1,5,main],偏移量:48,主题:test_find,分区:1,获取的消息:发送消息:9  
  10. 当前线程:Thread[pool-1-thread-2,5,main],偏移量:39,主题:test_find,分区:2,获取的消息:发送消息:10  

    生产者:

  

[java]  view plain  copy
  1. import java.util.concurrent.ExecutorService;  
  2. import java.util.concurrent.Executors;  
  3.   
  4. import org.junit.Test;  
  5. import org.junit.runner.RunWith;  
  6. import org.springframework.test.context.ContextConfiguration;  
  7. import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;  
  8.   
  9. import com.travelsky.kafka.singleton.HandlerProducer;  
  10.   
  11. @RunWith(SpringJUnit4ClassRunner.class)  
  12. @ContextConfiguration(locations = { "classpath:applicationContext.xml" })  
  13. public class Kafka生产_多线程单实例 {  
  14.       
  15.   
  16.     @Test  
  17.     public void testSendMessageSingleton() throws InterruptedException {  
  18.         ExecutorService executor = Executors.newFixedThreadPool(3);  
  19.         for (int i = 1; i <= 10; i++) {  
  20.             Thread.sleep(1000);  
  21.             executor.submit(new HandlerProducer(":" + i));  
  22.         }  
  23.     }  
  24.   
  25. }  

   

[java]  view plain  copy
  1. 当前线程:pool-1-thread-1,获取的kafka实例:com.kafka.singleton.KafkaProducerSingleton@15eb475  
  2. 当前线程:pool-1-thread-2,获取的kafka实例:com.kafka.singleton.KafkaProducerSingleton@15eb475  
  3. 当前线程:pool-1-thread-3,获取的kafka实例:com.kafka.singleton.KafkaProducerSingleton@15eb475  
  4. 当前线程:pool-1-thread-1,获取的kafka实例:com.kafka.singleton.KafkaProducerSingleton@15eb475  
  5. 当前线程:pool-1-thread-2,获取的kafka实例:com.kafka.singleton.KafkaProducerSingleton@15eb475  
  6. 当前线程:pool-1-thread-3,获取的kafka实例:com.kafka.singleton.KafkaProducerSingleton@15eb475  
  7. 当前线程:pool-1-thread-1,获取的kafka实例:com.kafka.singleton.KafkaProducerSingleton@15eb475  
  8. 当前线程:pool-1-thread-2,获取的kafka实例:com.kafka.singleton.KafkaProducerSingleton@15eb475  
  9. 当前线程:pool-1-thread-3,获取的kafka实例:com.kafka.singleton.KafkaProducerSingleton@15eb475  
  10. 当前线程:pool-1-thread-1,获取的kafka实例:com.kafka.singleton.KafkaProducerSingleton@15eb475  

 

你可能感兴趣的:(JAVA)