程序需要的环境zookeeper和kafka
- 可以参考小编的文章进行部署环境:https://www.jianshu.com/p/941495613738
添加依赖包
compile group: 'org.apache.kafka', name: 'kafka_2.13', version: '2.5.0'
编写生产者测试类:
package com.liu.yue.xin.chen.kafka;
import java.util.Properties;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
/**
* kafka生产者
*
* @bk https://home.cnblogs.com/u/huanuan/
* @ https://www.jianshu.com/u/d29cc7d7ca49
* @Author 六月星辰 2020年5月22日
*/
public class KafkaProducerDemo {
private final KafkaProducer producer;
private final static String TOPIC = "liuyue";
private KafkaProducerDemo() {
Properties props = new Properties();
props.put("bootstrap.servers", "127.0.0.1:9092");
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
producer = new KafkaProducer(props);
}
/**
* 生产kafka信息
*
* @bk https://home.cnblogs.com/u/huanuan/
* @ https://www.jianshu.com/u/d29cc7d7ca49
* @Author 六月星辰 2020年5月22日
*/
private void produce() {
for (int i = 1; i < 40; i++) {
String key = "liuyue-" + String.valueOf(i);
String data = "Demo kafka message:" + key;
producer.send(new ProducerRecord(TOPIC, key, data));
System.err.println("values = " + data);
}
producer.close();
}
public static void main(String[] args) {
new KafkaProducerDemo().produce();
}
}
编写消费者测试类
package com.liu.yue.xin.chen.kafka;
import java.util.Arrays;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
/**
* kafka 消费者
*
* @bk https://home.cnblogs.com/u/huanuan/
* @ https://www.jianshu.com/u/d29cc7d7ca49
* @Author 六月星辰 2020年5月22日
*/
public class KafkaConsumerDemo {
private static KafkaConsumer consumer;
private final static String TOPIC = "liuyue";
private KafkaConsumerDemo() {
Properties props = new Properties();
props.put("bootstrap.servers", "127.0.0.1:9092");
// 每个消费者分配独立的组号
props.put("group.id", "test2");
// 如果value合法,则自动提交偏移量
props.put("enable.auto.commit", "true");
// 设置多久一次更新被消费消息的偏移量
props.put("auto.commit.interval.ms", "1000");
// 设置会话响应的时间,超过这个时间kafka可以选择放弃消费或者消费下一条消息
props.put("session.timeout.ms", "30000");
// 自动重置offset
props.put("auto.offset.reset", "earliest");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
consumer = new KafkaConsumer(props);
}
void consume() {
consumer.subscribe(Arrays.asList(TOPIC));
while (true) {
ConsumerRecords records = consumer.poll(100);
for (ConsumerRecord record : records) {
System.err.printf("offset = %d, key = %s, value = %s", record.offset(), record.key(), record.value());
System.out.println();
}
}
}
public static void main(String[] args) {
new KafkaConsumerDemo().consume();
}
}
运行生产者测试类
运行消费者测试类:
总结:测试kafka生产和消费都成功了~~~