tar -xzf kafka_2.13-3.2.1.tgz -C /opt/
cd /opt/kafka_2.13-3.2.1
# config/zookeeper.properties
dataDir=/data/zookeeper
clientPort=2181
maxClientCnxns=100
tickTime=2000
initLimit=10
syncLimit=5
server.1=node1:2888:3888
server.2=node2:2888:3888
server.3=node3:2888:3888
在每个节点创建myid文件:
# node1
echo "1" > /data/zookeeper/myid
# node2
echo "2" > /data/zookeeper/myid
# node3
echo "3" > /data/zookeeper/myid
# config/server.properties
broker.id=1 # 每个节点唯一
listeners=PLAINTEXT://node1:9092
advertised.listeners=PLAINTEXT://node1:9092
log.dirs=/data/kafka-logs
num.partitions=3
default.replication.factor=3
min.insync.replicas=2
zookeeper.connect=node1:2181,node2:2181,node3:2181
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=3
transaction.state.log.min.isr=2
# 启动Zookeeper(每个节点)
bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
# 启动Kafka(每个节点)
bin/kafka-server-start.sh -daemon config/server.properties
# 创建topic测试
bin/kafka-topics.sh --create --bootstrap-server node1:9092,node2:9092,node3:9092 \
--replication-factor 3 --partitions 3 --topic test-ha
# 查看topic详情
bin/kafka-topics.sh --describe --bootstrap-server node1:9092 --topic test-ha
<dependency>
<groupId>org.springframework.kafkagroupId>
<artifactId>spring-kafkaartifactId>
<version>2.8.6version>
dependency>
spring:
kafka:
bootstrap-servers: node1:9092,node2:9092,node3:9092
producer:
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
acks: all
retries: 3
consumer:
group-id: springboot-group
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
auto-offset-reset: earliest
enable-auto-commit: false
listener:
ack-mode: manual_immediate
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Service;
@Service
public class KafkaProducerService {
private final KafkaTemplate<String, String> kafkaTemplate;
public KafkaProducerService(KafkaTemplate<String, String> kafkaTemplate) {
this.kafkaTemplate = kafkaTemplate;
}
public void sendMessage(String topic, String message) {
kafkaTemplate.send(topic, message)
.addCallback(
result -> System.out.println("Message sent: " + message),
ex -> System.err.println("Failed to send message: " + ex.getMessage())
);
}
// 发送带key的消息
public void sendMessageWithKey(String topic, String key, String message) {
kafkaTemplate.send(topic, key, message);
}
}
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Service;
@Service
public class KafkaConsumerService {
@KafkaListener(topics = "test-ha", groupId = "springboot-group")
public void consume(ConsumerRecord<String, String> record, Acknowledgment ack) {
try {
System.out.printf("Received message: key=%s, value=%s, partition=%d, offset=%d%n",
record.key(), record.value(), record.partition(), record.offset());
// 业务处理逻辑
// 手动提交offset
ack.acknowledge();
} catch (Exception e) {
// 处理异常,可选择不提交offset以便重试
System.err.println("Error processing message: " + e.getMessage());
}
}
// 批量消费
@KafkaListener(topics = "batch-topic", groupId = "springboot-group", containerFactory = "batchFactory")
public void consumeBatch(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
System.out.println("Received batch with " + records.size() + " messages");
// 批量处理逻辑
ack.acknowledge();
}
}
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ContainerProperties;
import java.util.HashMap;
import java.util.Map;
@Configuration
public class KafkaConfig {
// 批量消费工厂
@Bean
public ConcurrentKafkaListenerContainerFactory<String, String> batchFactory(
ConsumerFactory<String, String> consumerFactory) {
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory);
factory.setBatchListener(true); // 开启批量消费
factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
return factory;
}
// 自定义生产者配置(可选)
@Bean
public ProducerFactory<String, String> producerFactory() {
Map<String, Object> configProps = new HashMap<>();
configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "node1:9092,node2:9092,node3:9092");
configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
configProps.put(ProducerConfig.ACKS_CONFIG, "all");
configProps.put(ProducerConfig.RETRIES_CONFIG, 3);
return new DefaultKafkaProducerFactory<>(configProps);
}
}
高可用配置建议:
性能优化:
错误处理:
安全配置(生产环境建议):
监控与运维:
通过以上配置和示例,可以搭建一个高可用的Kafka集群,并在SpringBoot应用中实现可靠的消息生产和消费。