4.1 环境准备
1)在eclipse中创建一个java工程
2)在工程的根目录创建一个lib文件夹
3)解压kafka安装包,将安装包libs目录下的jar包拷贝到工程的lib目录下,并build path。
4)启动zk和kafka集群,在kafka集群中打开一个消费者
[itstar@bigdata11 kafka]$ bin/kafka-console-consumer.sh --zookeeper
bigdata11:2181 --topic first
4.2 Kafka生产者Java API
4.2.1 创建生产者(过时的API)
package com.itstar.kafka;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
public class OldProducer {
@SuppressWarnings("deprecation")
public static void main(String[] args) {
Properties properties = new Properties();
properties.put("metadata.broker.list", "bigdata11:9092");
properties.put("request.required.acks", "1");
properties.put("serializer.class", "kafka.serializer.StringEncoder");
Producer
ProducerConfig(properties));
KeyedMessage String>("first", "hello world"); producer.send(message ); } } package com.itstar.kafka; import java.util.Properties; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; public class NewProducer { public static void main(String[] args) { Properties props = new Properties(); // Kafka服务端的主机名和端口号 props.put("bootstrap.servers", "bigdata12:9092"); // 等待所有副本节点的应答 props.put("acks", "all"); // 消息发送最大尝试次数 props.put("retries", 0); // 一批消息处理大小 props.put("batch.size", 16384); // 请求延时 props.put("linger.ms", 1); // 发送缓存区内存大小 props.put("buffer.memory", 33554432); // key序列化 props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // value序列化 props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); KafkaProducer for (int i = 0; i < 50; i++) { producer.send(new ProducerRecord Integer.toString(i), "hello world-" + i)); } producer.close(); } } package com.itstar.kafka; import java.util.Properties; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; public class CallBackProducer { public static void main(String[] args) { Properties props = new Properties(); // Kafka服务端的主机名和端口号 props.put("bootstrap.servers", "bigdata12:9092"); // 等待所有副本节点的应答 props.put("acks", "all"); // 消息发送最大尝试次数 props.put("retries", 0); // 一批消息处理大小 props.put("batch.size", 16384); // 增加服务端请求延时 props.put("linger.ms", 1); // 发送缓存区内存大小 props.put("buffer.memory", 33554432); // key序列化 props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // value序列化 props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); KafkaProducer (props); for (int i = 0; i < 50; i++) { kafkaProducer.send(new ProducerRecord "hello" + i), new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (metadata != null) { System.out.println(metadata.partition() + "---" + metadata.offset()); } } }); } kafkaProducer.close(); } } 0)需求:将所有数据存储到topic的第0号分区上 1)定义一个类实现Partitioner接口,重写里面的方法(过时API) package com.itstar.kafka; import java.util.Map; import kafka.producer.Partitioner; public class CustomPartitioner implements Partitioner { public CustomPartitioner() { super(); } @Override public int partition(Object key, int numPartitions) { // 控制分区 return 0; } } 2)自定义分区(新API) package com.itstar.kafka; import java.util.Map; import org.apache.kafka.clients.producer.Partitioner; import org.apache.kafka.common.Cluster; public class CustomPartitioner implements Partitioner { @Override public void configure(Map } @Override public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { // 控制分区 return 0; } @Override public void close() { } } 3)在代码中调用 package com.itstar.kafka; import java.util.Properties; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; public class PartitionerProducer { public static void main(String[] args) { Properties props = new Properties(); // Kafka服务端的主机名和端口号 props.put("bootstrap.servers", "bigdata12:9092"); // 等待所有副本节点的应答 props.put("acks", "all"); // 消息发送最大尝试次数 props.put("retries", 0); // 一批消息处理大小 props.put("batch.size", 16384); // 增加服务端请求延时 props.put("linger.ms", 1); // 发送缓存区内存大小 props.put("buffer.memory", 33554432); // key序列化 props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // value序列化 props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // 自定义分区 props.put("partitioner.class", "com.itstar.kafka.CustomPartitioner"); Producer producer.send(new ProducerRecord "itstar")); producer.close(); } } 4)测试 (1)在bigdata11上监控/opt/module/kafka/logs/目录下fifirst主题3个分区的log日志动态变化情况 [itstar@bigdata11 first-0]$ tail -f 00000000000000000000.log [itstar@bigdata11 first-1]$ tail -f 00000000000000000000.log [itstar@bigdata11 first-2]$ tail -f 00000000000000000000.log (2)发现数据都存储到指定的分区了。 0)在控制台创建发送者 [itstar@bigdata13 kafka]$ bin/kafka-console-producer.sh --broker-list bigdata11:9092 --topic first >hello world 1)创建消费者(过时API) package com.itstar.kafka.consume; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import kafka.consumer.Consumer; import kafka.consumer.ConsumerConfig; import kafka.consumer.ConsumerIterator; import kafka.consumer.KafkaStream; import kafka.javaapi.consumer.ConsumerConnector; public class CustomConsumer { @SuppressWarnings("deprecation") public static void main(String[] args) { Properties properties = new Properties(); properties.put("zookeeper.connect", "bigdata11:2181"); properties.put("group.id", "g1"); properties.put("zookeeper.session.timeout.ms", "500"); properties.put("zookeeper.sync.time.ms", "250"); properties.put("auto.commit.interval.ms", "1000"); // 创建消费者连接器 ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(properties)); HashMap topicCount.put("first", 1); Map consumer.createMessageStreams(topicCount); KafkaStream ConsumerIterator while (it.hasNext()) { System.out.println(new String(it.next().message())); } } } 2)官方提供案例(自动维护消费情况)(新API) package com.itstar.kafka.consume; import java.util.Arrays; import java.util.Properties; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; public class CustomNewConsumer { public static void main(String[] args) { Properties props = new Properties(); // 定义kakfa 服务的地址,不需要将所有broker指定上 props.put("bootstrap.servers", "bigdata11:9092"); // 制定consumer group props.put("group.id", "test"); // 是否自动确认offset props.put("enable.auto.commit", "true"); // 自动确认offset的时间间隔 props.put("auto.commit.interval.ms", "1000"); // key的序列化类 props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // value的序列化类 props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // 定义consumer KafkaConsumer // 消费者订阅的topic, 可同时订阅多个 consumer.subscribe(Arrays.asList("first", "second","third")); while (true) { // 读取数据,读取超时时间为100ms ConsumerRecords for (ConsumerRecord System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value()); } } }4.2.2 创建生产者(新API)
4.2.3 创建生产者带回调函数(新API)
4.2.4 自定义分区生产者
4.3 Kafka消费者Java API