1.Kafka 介绍
Apache Kafka® is a distributed streaming platform.
官网地址
2.maven 依赖
org.apache.kafka
kafka_2.12
0.10.2.0
org.apache.kafka
kafka-clients
0.10.2.0
3.手动创建一个 Topic
- 安装 Kafka 的步骤省略
- 确保已经安装了Kafka 和 zookeeper(尽量不用Kafka自带的zk)
1) 启动zookeeper服务 :bin/zkServer.sh start conf/zoo.cfg
2)启动kafka服务 :
前台启动:kafka-server-start.sh $KAFKA_HOME/config/server.properties
后台启动:nohup bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties >logs/catalina.log &
3) 创建topic : bin/kafka-topics.sh --create --zookeeper localhost:2181/kafka --replication-factor 1 --partitions 1 --topic partition-topic
4) 查看zookeeper下的topic数量 :
bin/kafka-topics.sh --list --zookeeper localhost:2181/kafka
bin/kafka-topics.sh --describe --zookeeper localhost:2181/kafka --topic partition-topic
5)发送消息 :bin/kafka-console-producer.sh --broker-list localhost:9092 --topic partition-topic
6)消费消息 :bin/kafka-console-consumer.sh --zookeeper localhost:2181/kafka --from-beginning --topic partition-topic
4.Producer
package com.zxr.micro.socket.kafka;
import org.apache.kafka.clients.producer.*;
import java.util.Properties;
import java.util.concurrent.Future;
public class ProducerDemo {
private static final String TOPIC = "partition-topic";
private static final String BOOTSTRAP_SERVERS = "127.0.0.1:9092";
private static KafkaProducer initProducer() {
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.RETRIES_CONFIG, 1);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 3000);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
return new KafkaProducer(props);
}
@SuppressWarnings("unchecked")
public static Future sendOne(String key, String value) {
return initProducer().send(new ProducerRecord<>(TOPIC, key, value));
}
public static void main(String[] args) throws Exception {
Future aa = ProducerDemo.sendOne("03", "dd");
System.out.println(">>>>>>>>>>: " + aa.get().toString());
Thread.sleep(3000);
Future bb = ProducerDemo.sendOne("04", "ee");
System.out.println(">>>>>>>>>>: " + bb.get().toString());
Thread.sleep(3000);
Future cc = ProducerDemo.sendOne("03", "ff");
System.out.println(">>>>>>>>>>: " + cc.get().toString());
Thread.sleep(3000);
}
}
- Producer Java API
5.Consumer
package com.zxr.micro.socket.kafka;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.*;
public class ConsumerDemo {
private static final String TOPIC = "partition-topic";
private static final String BOOTSTRAP_SERVERS = "127.0.0.1:9092";
public static void main(String[] args) {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 50000);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
KafkaConsumer consumer = new KafkaConsumer<>(props);
consumer.subscribe(Collections.singletonList(TOPIC));
while (true) {
ConsumerRecords records = consumer.poll(100);
for (ConsumerRecord record : records)
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
}
}
- Consumer Java API