(1)新建Spring Boot项目,参考以下创建过程;
创建一个Spring Boot项目
(2)pom文件中添加spring-kafka框架
<dependency>
<groupId>org.springframework.kafkagroupId>
<artifactId>spring-kafkaartifactId>
dependency>
核心类:
@Service
public class KafkaService {
@Autowired
private KafkaTemplate kafkaTemplate;
public void sendKafkaMsg(String topic, String data) {
kafkaTemplate.send(topic, data);
}
}
就这样,发送消息代码就实现了。
这里关键的代码为 kafkaTemplate.send() 方法,传入的值为topic(主题)和要发送的数据data;Kafka 里的生产者这个topic 在 Java 程序中是不需要提前在 Kafka 中设置的,因为它会在发送的时候自动创建你设置的 topic,data是消息内容。
(1)properties文件配置(也可以改成yml文件进行配置)
server.port=8082
#============== kafka ===================#
kafka.consumer.zookeeper.connect=192.168.71.61:2181,192.168.71.62:2181,192.168.71.63:2181
kafka.consumer.servers=192.168.71.61:9092,192.168.71.62:9092,192.168.71.63:9092
kafka.consumer.enable.auto.commit=true
kafka.consumer.session.timeout=6000
kafka.consumer.auto.commit.interval=100
kafka.consumer.auto.offset.reset=latest
kafka.consumer.topic=test
kafka.consumer.group.id=test
kafka.consumer.concurrency=10
在上面的配置中,我们给消费者分配的端口号是8082,服务器有3台,分别对应3个ip地址和端口。 并配置了kafka服务器的ip地址;
kafka.consumer.enable.auto.commit=true //指定消息被消费之后自动提交偏移量(即消息的编号,表示消费到了哪个位置,消费者每消费完一条消息就会向kafka服务器汇报自己消消费到的那个消息的编号,以便于下次继续消费)。
kafka.consumer.group.id: applog //消费者组
kafka.consumer.auto.offset.reset: latest //从最近的地方开始消费
(2)定义kafka消费者配置类,并配置监听器
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
/**
*
* @ClassName: KafkaConfig
* @Description: TODO(定义kafka消费者配置类,并配置监听器)
* @author
* @date 2018年5月22日 下午5:38:26
*
*/
@Configuration
@EnableKafka
public class KafkaConfig {
@Value("${kafka.consumer.servers}")
private String servers;
@Value("${kafka.consumer.enable.auto.commit}")
private boolean enableAutoCommit;
@Value("${kafka.consumer.session.timeout}")
private String sessionTimeout;
@Value("${kafka.consumer.auto.commit.interval}")
private String autoCommitInterval;
@Value("${kafka.consumer.group.id}")
private String groupId;
@Value("${kafka.consumer.auto.offset.reset}")
private String autoOffsetReset;
@Value("${kafka.consumer.concurrency}")
private int concurrency;
@Bean
public KafkaListenerContainerFactory> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(1500);
return factory;
}
public ConsumerFactory consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
public Map consumerConfigs() {
Map propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
return propsMap;
}
@Bean
public KafkaConsumer listener() {
return new KafkaConsumer();
}
}
(3)消费者
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import com.alibaba.fastjson.JSON;
public class KafkaConsumer {
protected final Logger logger = LoggerFactory.getLogger(this.getClass());
@KafkaListener(topics = "${kafka.consumer.topic}", containerFactory = "kafkaListenerContainerFactory")
public void listen(ConsumerRecord record) {
String topic = record.topic();
String value = record.value();
try {
//System.out.println("kafka的key: " + record.key()+"kafka的value: " + record.value().toString());
if(value != null){
KafkaMessage kafkaMessage = JSON.parseObject(value, KafkaMessage.class); //将kafka里的数据反序列化为实体
}
} catch (Exception e) {
logger.error("接收主题为"+topic+"的kafka的消息时异常, 消息:{}, 异常:{}", value, e);
}
}
}