springboot整合kafka集群

一、引入maven依赖:



    org.springframework.kafka
    spring-kafka
    1.3.6.RELEASE

二、application.properties配置:

kafka.consumer.zookeeper.connect = 172.25.62.100:2181,172.25.62.101:2181,172.25.62.102:2181
kafka.consumer.servers = 172.25.62.100:9092,172.25.62.101:9092,172.25.62.102:9092
kafka.consumer.enable.auto.commit = true
kafka.consumer.session.timeout = 30000
kafka.consumer.auto.commit.interval = 20000
kafka.consumer.auto.offset.reset = latest
kafka.consumer.group.id = 70001
kafka.consumer.concurrency = 10

kafka.producer.servers = 172.25.62.100:9092,172.25.62.101:9092,172.25.62.102:9092
kafka.producer.retries = 0
kafka.producer.batch.size = 4096
kafka.producer.linger = 1
kafka.producer.buffer.memory = 40960
kafka.consumer.heartbeat.interval.ms = 7000

三、生产者配置:

/**
 * @Author: guandezhi
 * @Date: 2019/7/10 16:55
 */
@Configuration
@EnableKafka
public class KafkaProducerConfig {

    @Value("${kafka.producer.servers}")
    private String servers;
    @Value("${kafka.producer.retries}")
    private int retries;
    @Value("${kafka.producer.batch.size}")
    private int batchSize;
    @Value("${kafka.producer.linger}")
    private int linger;
    @Value("${kafka.producer.buffer.memory}")
    private int bufferMemory;


    @Bean
    public Map producerConfigs() {
        Map props = new HashMap();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        props.put(ProducerConfig.RETRIES_CONFIG, retries);
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
        props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "4096000");
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "lz4");
        return props;
    }

    @Bean
    public ProducerFactory producerFactory() {
        return new DefaultKafkaProducerFactory(producerConfigs());
    }

    @Bean
    public KafkaTemplate kafkaTemplate() {
        KafkaTemplate kafkaTemplate = new KafkaTemplate(producerFactory());
        return kafkaTemplate;
    }

四、消费者配置:

/**
 * @Author: guandezhi
 * @Date: 2019/7/10 16:55
 */
@Configuration
@EnableKafka
public class KafkaConsumerConfig {
    @Value("${kafka.consumer.servers}")
    private String servers;
    @Value("${kafka.consumer.enable.auto.commit}")
    private boolean enableAutoCommit;
    @Value("${kafka.consumer.session.timeout}")
    private String sessionTimeout;
    @Value("${kafka.consumer.auto.commit.interval}")
    private String autoCommitInterval;
    @Value("${kafka.consumer.group.id}")
    private String groupId;
    @Value("${kafka.consumer.auto.offset.reset}")
    private String autoOffsetReset;
    @Value("${kafka.consumer.concurrency}")
    private int concurrency;
    @Value("${kafka.consumer.heartbeat.interval.ms}")
    private String heartbeatIntervalMS;
    @Bean
    public KafkaListenerContainerFactory> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(concurrency);
        factory.getContainerProperties().setPollTimeout(6000);
        return factory;
    }
    @Bean
    public ConsumerFactory consumerFactory() {
        return new DefaultKafkaConsumerFactory(consumerConfigs());
    }

    @Bean
    public Map consumerConfigs() {
        Map propsMap = new HashMap();
        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
        propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 10);
        propsMap.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, heartbeatIntervalMS);
        //propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        return propsMap;
    }

    @Bean
    public KafkaListenerContainerFactory batchFactory() {
        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
        //设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
        factory.setBatchListener(true);
        return factory;
    }

五、生产者:

/**
 * @Author: guandezhi
 * @Date: 2019/7/10 11:56
 */
@RestController
public class IndexController {

    @Autowired
    private KafkaTemplate kafkaTemplate;

    @PostMapping("/sendMsg")
    public void test() {
        kafkaTemplate.send("test-topic", "测试一下");
    }

六、消费者:

/**
 * @Author: guandezhi
 * @Date: 2019/7/10 18:06
 */
@Slf4j
@Component
public class KafkaListner {

    @KafkaListener(topics = "test-topic")
    public void exec(String msg) {
        log.info("receive msg : {}", msg);
    }
}

 

你可能感兴趣的:(springboot)