一、安装(结合zookeeper)
由于kafka需要依赖于zookeeper,因此在这里先安装zookeeper
1.拉取zookeeper镜像
docker pull wurstmeister/zookeeper
2.启动zookeeper
docker run -d --name zookeeper -p 2181:2181 -e TZ="Asia/Shanghai" --restart always wurstmeister/zookeeper
我们可以查看zookeepr启动日志
docker logs -f zookeeper
二、安装kafka
1.拉取kafka
docker pull wurstmeister/kafka
2.启动kafka
docker run --name kafka \
-p 9092:9092 \
-e KAFKA_BROKER_ID=0 \
-e KAFKA_ZOOKEEPER_CONNECT=<自己zookeeper服务地址+端口> \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://服务地址+:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
-d wurstmeister/kafka
三、安装kafka-Map可视化界面
1.拉取kafka-manager镜像
docker pull sheepkiller/kafka-manager
2.启动
docker run -d \
-p 8080:8080 \
-v /opt/kafka-map/data:/usr/local/kafka-map/data \
-e DEFAULT_USERNAME=<初始账户>\
-e DEFAULT_PASSWORD=<初始密码>\
--name kafka-map \
--restart always dushixiang/kafka-map:latest
四、查看运行状态
注意: 我这边使用的是腾讯云的轻量服务器,kafka启动时回去连接zookeeper,所以大家一定要记得开放zookeeper的端口访问限制;
五、登录kafka-Map
账户和密码都是刚刚启动时设置的,我们可以看到当前集群,主题topic,Broker以及消费组,工具的具体使用在这就不多做讲解了,可以问下度娘,有很详细的使用的教程
六、SpringBoot项目集成
1.新建boot项目,并导入相关依赖
org.springframework.boot
spring-boot-starter-parent
2.2.2.RELEASE
org.projectlombok
lombok
1.18.24
org.springframework.boot
spring-boot-starter-web
org.springframework.kafka
spring-kafka
2.3.4.RELEASE
2.新建目录结构
3. yml中的配置
4.项目代码:
config:
package com.kafka.config;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import java.util.HashMap;
import java.util.Map;
@Slf4j
@Configuration
@EnableKafka
public class KafkaConfiguration {
@Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
@Value("${spring.kafka.consumer.enable-auto-commit}")
private Boolean autoCommit;
@Value("${spring.kafka.consumer.auto-commit-interval}")
private Integer autoCommitInterval;
@Value("${spring.kafka.consumer.group-id}")
private String groupId;
@Value("${spring.kafka.consumer.auto-offset-reset}")
private String autoOffsetReset;
@Value("${spring.kafka.consumer.max-poll-records}")
private Integer maxPollRecords;
@Value("${spring.kafka.producer.batch-size}")
private Integer batchSize;
@Value("${spring.kafka.producer.buffer-memory}")
private Integer bufferMemory;
@Value("${spring.kafka.producer.retries}")
private Integer retries;
// @Value("${spring.kafka.producer.properties.sasl.jaas.config}")
// private String producerJaasConfig;
//
// @Value("${spring.kafka.consumer.properties.sasl.jaas.config}")
// private String consumerJaasConfig;
/**
* 生产者配置信息
*/
@Bean
public Map producerConfigs(){
Map props = new HashMap<>();
props.put(ProducerConfig.ACKS_CONFIG, "0");
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ProducerConfig.RETRIES_CONFIG, retries);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
// props.put("sasl.jaas.config",producerJaasConfig);
return props;
}
/**
* 生产者工厂
*/
@Bean
public ProducerFactory producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
/**
* 生产者模板
*/
@Bean
public KafkaTemplate kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
/**
* 消费者配置信息
*/
@Bean
public Map consumerConfigs() {
Map props = new HashMap<>();
props.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG,false);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 120000);
props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
// props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
// props.put("sasl.jaas.config",consumerJaasConfig);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
return props;
}
/**
* 消费者批量工厂
*/
@Bean
public KafkaListenerContainerFactory> batchFactory() {
ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
//设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
factory.setBatchListener(true);
return factory;
}
/*================================= 新加的 ================================*/
@Bean("adminClient")
public AdminClient adminClient() {
KafkaAdmin kafkaAdmin = new KafkaAdmin(consumerConfigs());
return AdminClient.create(kafkaAdmin.getConfig());
}
// @Bean("messageProducer")
// public MessageProducer messageProducer(KafkaTemplate kafkaTemplate,
// RedissonClient redissonClient) {
// return new MessageProducer(kafkaTemplate, redissonClient);
// }
//
// @Bean("messageTopicGenerator")
// public MessageTopicGenerator messageTopicGenerator(@Qualifier("adminClient") AdminClient adminClient,
// RedissonClient redissonClient,
// StringRedisTemplate stringRedisTemplate,
// ApplicationContext applicationContext,
// Environment environment) {
// return new MessageTopicGenerator(adminClient, redissonClient,
// stringRedisTemplate, applicationContext, environment);
// }
}
kafka
package com.kafka.kafka;
import com.sun.org.glassfish.external.statistics.Statistic;
import lombok.extern.slf4j.Slf4j;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
import java.util.List;
/**
* @author xiaozhou
*
*/
@Slf4j
@Component
public class BaseKafkaProducer {
private final KafkaTemplate kafkaTemplate;
public BaseKafkaProducer(KafkaTemplate kafkaTemplate) {
this.kafkaTemplate = kafkaTemplate;
}
/**
* 发送kafka信息
*
* @param topic
* @param key
* @param msg 0:代表指定分区
*/
@Async
public void send(String topic, String key, String msg) {
send(topic, key, 0, msg);
}
public void send(String topic, String key, Integer partition, String msg) {
ListenableFuture> future = kafkaTemplate.send(topic, partition, key, msg);
future.addCallback(new ListenableFutureCallback>() {
@Override
public void onFailure(Throwable e) {
//System.out.println("kafka producer fail:" + e.getMessage());
log.debug("kafka producer fail:{}", e.getMessage());
}
@Override
public void onSuccess(SendResult sendResult) {
//System.out.println("kafka producer success:" + sendResult.toString());
log.debug("kafka producer success:{}", sendResult.toString());
}
});
}
/**
* 批量发送
*
* @param topic
* @param key
* @param msgList
*/
@Async
public void sendBatch(String topic, String key, List msgList) {
msgList.forEach(item -> {
send(topic, key, item);
});
}
}
测试controller
package com.kafka.controller;
import com.kafka.kafka.BaseKafkaProducer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
/**
* @author xiaozhou
*
*/
@RestController
@RequestMapping("/kafka")
public class DemoController {
@Autowired
private BaseKafkaProducer baseKafkaProducer;
@GetMapping("/msg/send")
public String send(@RequestParam(value = "id")Integer id) {
baseKafkaProducer.send("demo","123",id.toString());
return "success";
}
}
消费者:
package com.kafka.consumer;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.stereotype.Service;
/**
* @author xiaozhou
*
*/
@Slf4j
@Service
public class DemoConsumer {
// @KafkaListener(topics = "demo", groupId = "kafka-demo")
@KafkaListener(topicPartitions = {
@TopicPartition(topic = "demo",partitions = {"0"})
})
public void listen(ConsumerRecord record) {
System.out.println("topic:" + record.topic() + ",key:" + record.key() + ",value:" + record.value());
System.out.println("kafka报文信息:" + record.value());
}
}
5.启动项目
至此,kafka简易版demo搭建并测试通过,我个人认为,关键点在于zookeeper的安装,以及后续代码中对kafka的配置;由于这个版本的demo没有去开通kafka的安全认证,相对于简单,另外一个比较烦人的就是序列化问题,我刚开始的时候就在序列化问题上卡了很久,大家可以参考一下我的,,如有大佬,勿喷.......