添加依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.0.0</version>
</dependency>
// 新建一个名为kafkaDemo的topic。分区数为3,副本数为1
kafka-topics.sh --create --zookeeper 192.168.233.133:2181 --topic kafkaDemo --partitions 1 --replication-factor 1
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
public class MyProducer {
public static void main(String[] args) {
Properties prop = new Properties();
// kafka集群的一个地址和端口
prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.233.133:9092");
// 设置key的序列化器为StringSerializer
prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
// 设置value的序列化器为StringSerializer
prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class);
// 设置数据可靠性的级别 1 0 -1
prop.put(ProducerConfig.ACKS_CONFIG,"-1");
// 创建生产者对象
KafkaProducer<String,String> producer = new KafkaProducer<String, String>(prop);
for (int i = 230; i<240;i++){
// 构建消息
ProducerRecord<String, String> producerRecord = new ProducerRecord<>("kb09two", "hi","hello world" + i);
// 发送消息
producer.send(producerRecord);
try {
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
System.out.println("消息发送完成!!!!");
}
}
public class MyConsumer {
public static void main(String[] args) {
final Properties prop = new Properties();
prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.233.133:9092");
// 指定key的反序列化器
prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// 指定value的反序列化器
prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
// 判定连接超时的时间间隔
prop.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG,"30000");
// 提交方式,false为手动提交
prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
// 自动提交offset到zookeeper的时间间隔
prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
prop.put(ConsumerConfig.GROUP_ID_CONFIG,"G1");
// 创建消费者对象
KafkaConsumer<String,String> consumer = new KafkaConsumer<String, String>(prop);
consumer.subscribe(Collections.singleton("kb09two"));
// 一个消费者组G1里只有一个消费者
while(true){
ConsumerRecords<String,String> poll = consumer.poll(100);
for (ConsumerRecord<String, String> record : poll) {
System.out.println(record.offset()+"\t"+record.key()+"\t"+record.value());
}
}
}
}
public class MyConsumer {
public static void main(String[] args) {
final Properties prop = new Properties();
prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.233.133:9092");
// 指定key的反序列化器
prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// 指定value的反序列化器
prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
// 判定连接超时的时间间隔
prop.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG,"30000");
// 提交方式,false为手动提交
prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
// 自动提交offset到zookeeper的时间间隔
prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
// 模拟多个消费者在同一个分组里
prop.put(ConsumerConfig.GROUP_ID_CONFIG,"G2");
// 多线程,为每个分区创建一个消费者,但需要在注意的是每条消息只能被一个消费者消费
for (int i=0;i<4;i++){
new Thread(new Runnable() {
@Override
public void run() {
KafkaConsumer<String,String> consumer = new KafkaConsumer<String, String>(prop);
consumer.subscribe(Collections.singleton("kb09two"));
while(true){
ConsumerRecords<String,String> poll = consumer.poll(100);
for (ConsumerRecord<String, String> record : poll) {
System.out.println(Thread.currentThread().getName()+"\t"+record.offset()+"\t"+record.key()+"\t"+record.value());
}
}
}
}).start();
}
}
}