spring4和kafka_0.10.0.0集成

当前环境:

spring-4.3.1

kafka-0.10.0.0

 

pom.xml

 
  
                
			org.apache.kafka
			kafka-clients
			0.10.0.0
		
		
			org.springframework.kafka
			spring-kafka 
			1.1.1.RELEASE
		
		
			org.springframework.integration
			spring-integration-kafka
			1.3.0.RELEASE
		
 
  
 
  

 

spring-kafka-producer.xml

 
       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context" 
       xsi:schemaLocation="http://www.springframework.org/schema/beans 
         http://www.springframework.org/schema/beans/spring-beans.xsd 
         http://www.springframework.org/schema/context 
         http://www.springframework.org/schema/context/spring-context.xsd"> 
  
     
     
         
             
                 
                 
                 
                 
                 
                 
                                       value="org.apache.kafka.common.serialization.StringSerializer" /> 
                                       value="org.apache.kafka.common.serialization.StringSerializer" /> 
           
 
       
 
   
 
 
     
              class="org.springframework.kafka.core.DefaultKafkaProducerFactory"> 
         
             
       
 
   
 
 
     
     
         
         
         
         
   
 
 
     

 

spring-kafka-consumer.xml

 
       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
       xmlns:context="http://www.springframework.org/schema/context" 
       xsi:schemaLocation="http://www.springframework.org/schema/beans 
     http://www.springframework.org/schema/beans/spring-beans-3.0.xsd  
      "> 
 
 
     
     
         
             
                 
                
                 
                 
                 
                 
                 
          
 
       
 
   
 
 
     
     
         
             
       
 
   
 
 
     
     
 
 
     
         
         
   
 
 
 
             init-method="doStart"> 
        
        
  
 

  
 

web.xml


         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://xmlns.jcp.org/xml/ns/javaeehttp://xmlns.jcp.org/xml/ns/javaee/web-app_3_1.xsd"
         version="3.1">
   
   
        contextConfigLocation
        classpath:spring-database.xml;classpath:spring-kafka-producer.xml;classpath:spring-kafka-consumer.xml
   

     ...

 kafkaProducer监听器KafkaProducerListener.java

import org.apache.kafka.clients.producer.RecordMetadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.support.ProducerListener;

/**
 * kafkaProducer监听器,在producer配置文件中开启
 *
 * @author
 *
 */
@SuppressWarnings("rawtypes")
public class KafkaProducerListener implements ProducerListener {
 protected final Logger LOG = LoggerFactory.getLogger("kafkaProducer");

 /**
  * 发送消息成功后调用
  */
 public void onSuccess(String topic, Integer partition, Object key, Object value, RecordMetadata recordMetadata) {
  LOG.info("==========kafka发送数据成功(日志开始)==========");
  LOG.info("----------topic:" + topic);
  LOG.info("----------partition:" + partition);
  LOG.info("----------key:" + key);
  LOG.info("----------value:" + value);
  LOG.info("----------RecordMetadata:" + recordMetadata);
  LOG.info("~~~~~~~~~~kafka发送数据成功(日志结束)~~~~~~~~~~");
 }

 /**
  * 发送消息错误后调用
  */
 public void onError(String topic, Integer partition, Object key, Object value, Exception exception) {
  LOG.info("==========kafka发送数据错误(日志开始)==========");
  LOG.info("----------topic:" + topic);
  LOG.info("----------partition:" + partition);
  LOG.info("----------key:" + key);
  LOG.info("----------value:" + value);
  LOG.info("----------Exception:" + exception);
  LOG.info("~~~~~~~~~~kafka发送数据错误(日志结束)~~~~~~~~~~");
  exception.printStackTrace();
 }

 /**
  * 方法返回值代表是否启动kafkaProducer监听器
  */
 public boolean isInterestedInSuccess() {
  LOG.info("///kafkaProducer监听器启动///");
  return true;
 }

}

 

kafka producer发送器

KafkaProducer.java

import java.util.Map;

public interface KafkaProducer {
 /**
  *
  * @param topic 主题
  * @param data messageValue
  * @return
  */
 public Map sndMesForTemplate(String topic, String data);
 /**
  * kafka发送消息模板
  *
  * @param topic
  *            主题
  * @param value
  *            messageValue
  * @param ifPartition
  *            是否使用分区 0是\1不是
  * @param partitionNum
  *            分区数 如果是否使用分区为0,分区数必须大于0
  * @param role
  *            角色:bbc app erp...
  */
 public Map sndMesForTemplate(String topic, Object value, String ifPartition, Integer partitionNum,
   String role);
}

KafkaProducerImpl.java

import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.ExecutionException;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Service;
import org.springframework.util.concurrent.ListenableFuture;

import com.alibaba.fastjson.JSON;

import cn.xbsafe.hgc.kafka.KafkaMesConstant;
import cn.xbsafe.hgc.kafka.producer.service.KafkaProducer;

@Service
public class KafkaProducerImpl implements KafkaProducer{
  @Autowired 
  private KafkaTemplate kafkaTemplate; 
 public Map sndMesForTemplate(String topic, Object value, String ifPartition, Integer partitionNum,
   String role) {
  String key = role + "-" + value.hashCode();
  String valueString = JSON.toJSONString(value);
  if (ifPartition.equals("0")) {
   // 表示使用分区
   int partitionIndex = getPartitionIndex(key, partitionNum);
   ListenableFuture> result = kafkaTemplate.send(topic, partitionIndex, key,
     valueString);
   Map res = checkProRecord(result);
   return res;
  } else {
   ListenableFuture> result = kafkaTemplate.send(topic, key, valueString);
   Map res = checkProRecord(result);
   return res;
  }
 }
 
 public Map sndMesForTemplate(String topic, String data){
  ListenableFuture> result = kafkaTemplate.send(topic, data);
  Map res = checkProRecord(result);
  return res;
 }
 

 /**
  *  根据key值获取分区索引
     * @param key
     * @param partitionNum
  * @return
 */
 private int getPartitionIndex(String key, int partitionNum){ 
         if (key == null) { 
             Random random = new Random(); 
             return random.nextInt(partitionNum); 
         } 
         else { 
             int result = Math.abs(key.hashCode())%partitionNum; 
             return result; 
         } 
     }
 /**
     * 检查发送返回结果record
     * @param res
     * @return
     */
 @SuppressWarnings("rawtypes") 
     private Map checkProRecord(ListenableFuture> res){ 
         Map m = new HashMap(); 
         if(res!=null){ 
             try { 
                 SendResult r = res.get();//检查result结果集 
                 /*检查recordMetadata的offset数据,不检查producerRecord*/ 
                 Long offsetIndex = r.getRecordMetadata().offset(); 
                 if(offsetIndex!=null && offsetIndex>=0){ 
                     m.put("code", KafkaMesConstant.SUCCESS_CODE); 
                     m.put("message", KafkaMesConstant.SUCCESS_MES); 
                     return m; 
                 }else{ 
                    m.put("code", KafkaMesConstant.KAFKA_NO_OFFSET_CODE); 
                    m.put("message", KafkaMesConstant.KAFKA_NO_OFFSET_MES); 
                     return m; 
                 } 
             } catch (InterruptedException e) { 
                 e.printStackTrace(); 
                 m.put("code", KafkaMesConstant.KAFKA_SEND_ERROR_CODE); 
                 m.put("message", KafkaMesConstant.KAFKA_SEND_ERROR_MES); 
                 return m; 
             } catch (ExecutionException e) { 
                e.printStackTrace(); 
                 m.put("code", KafkaMesConstant.KAFKA_SEND_ERROR_CODE); 
                m.put("message", KafkaMesConstant.KAFKA_SEND_ERROR_MES); 
                return m; 
            } 
         }else{ 
           m.put("code", KafkaMesConstant.KAFKA_NO_RESULT_CODE); 
           m.put("message", KafkaMesConstant.KAFKA_NO_RESULT_MES); 
             return m; 
         } 
     }

}

 

测试kafka producer

import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;

 

@Controller
@RequestMapping("/subscriber")
public class SubscriberController {
 private Logger logger = LoggerFactory.getLogger(getClass());

 @Autowired
 private KafkaProducer kafkaProducer;

.....

  Map kafkasend=kafkaProducer.sndMesForTemplate(TOPIC, data);
   logger.info("kafkaproducer send result--->"+kafkasend);

.....

 

kafka监听器KafkaConsumerServer.java

import org.apache.kafka.clients.consumer.ConsumerRecord; 
import org.slf4j.Logger; 
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.listener.MessageListener;

import com.alibaba.fastjson.JSONObject;
 

public class KafkaConsumerServer implements MessageListener
    protected final Logger LOG = LoggerFactory.getLogger("KafkaConsumerServer");  
  
   
  
    /**
     * 监听器自动执行该方法
     *     消费消息
     *     自动提交offset
     *     执行业务代码
     *     (high level api 不提供offset管理,不能指定offset进行消费)
     */ 
   public void onMessage(ConsumerRecord record)  {  
   

       try { 
          LOG.info("=============kafkaConsumer开始消费============="); 
         String topic = record.topic(); 
           String key = record.key(); 
           String value = record.value(); 
          long offset = record.offset(); 
           int partition = record.partition(); 
          LOG.info("-------------topic:" + topic); 
            LOG.info("-------------value:" + value); 
          LOG.info("-------------key:" + key); 
            LOG.info("-------------offset:" + offset); 
          LOG.info("-------------partition:" + partition); 
          LOG.info("~~~~~~~~~~~~~kafkaConsumer消费结束~~~~~~~~~~~~~");  
    //get kafka data info ,to do something....
      }catch (Exception e){  
          e.printStackTrace();
      }  finally {
         // elasticSearchClientPool.returnAClientToPool(esClient);
      }
  } 
 

当producer成功后,consumer可以监听订阅到发布的数据信息

 

 

 

 

 
  
 
 

你可能感兴趣的:(java)