该版本代码本人试验通过,kafka版本为:kafka_2.10-0.10.0.1、虽然kafka_2.11-1.0.1也通过试验,但是不建议使用该版本代码,建议使用 0.10.1.X 之后的版本代码
注意:kafka版本不可使用 0.10.1.1,否则客户端和服务端都会报错
客户端报错 Connection with /xx.xx.x.xx disconnected
服务端报错 Caused by: java.lang.IllegalArgumentException: Invalid version for API key 3: 2
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.10.0.1</version>
</dependency>
public class KafkaOffsetMonitor_0_10_0_1 {
private static final Logger logger = LoggerFactory.getLogger(KafkaOffsetMonitor_0_10_0_1.class);
private static Properties properties = new Properties();
static {
properties.put("enable.auto.commit", "false");
properties.put("auto.commit.interval.ms", "1000");
properties.put("session.timeout.ms", "30000");
properties.put("auto.offset.reset", "earliest");
properties.put("key.deserializer", StringDeserializer.class.getName());
properties.put("value.deserializer", StringDeserializer.class.getName());
}
public static void main(String[] args) {
String brokerStr = args[0];//不带端口号的kafka服务地址,如:192.168.0.1,192.168.0.2
String topic = args[1];//主题名
String group = args[2];//消费组名称
logger.info("brokerStr" + ":" + brokerStr);
logger.info("topic" + ":" + topic);
logger.info("group" + ":" + group);
int port = 9092;
List<String> brokers = Arrays.asList(brokerStr.split(","));
StringBuilder sb = new StringBuilder();
for (String broker : brokers) {
sb.append(broker).append(":").append(port).append(",");
}
String servers = sb.toString().substring(0, sb.toString().length() - 1);
KafkaOffsetMonitor_0_10_0_1 monitor = new KafkaOffsetMonitor_0_10_0_1();
Map<Integer, PartitionMetadata> partitionMetadataMap = monitor.getPartitoinsMetadata(brokers, port, topic);
long logSize = 0L;
long sumOffset = 0L;
long lag = 0L;
for (Map.Entry<Integer, PartitionMetadata> entry : partitionMetadataMap.entrySet()) {
String host = entry.getValue().leader().host();
int partitionId = entry.getKey();
String clientName = "Client-" + topic + "-" + partitionId;
// kafka包括两个常数
// kafka.api.OffsetRequest.EarliestTime()发现日志中的数据的开始
// kafka.api.OffsetRequest.LatestTime()将只留新消息
long partitionOffset = monitor.getPartitionOffset(host, port, topic, partitionId, OffsetRequest.LatestTime(), clientName);
logSize += partitionOffset;
KafkaConsumer kafkaConsumer = monitor.getKafkaConsumer(group, topic, servers);
long committedOffset = monitor.getCommittedOffset(kafkaConsumer, topic, partitionId);
sumOffset += committedOffset;
if (kafkaConsumer != null) {
kafkaConsumer.close();
}
}
lag = logSize - sumOffset;
logger.info("====================kafka=========================");
logger.info("topic:" + topic);
logger.info("logSize:" + logSize);
logger.info("sumOffset:" + sumOffset);
logger.info("lag:" + lag);
logger.info("==================================================");
}
/**
* 获取kafka消费者
*
* @param group kakfa消费组
* @param topic 主题名
* @param servers kafka服务器列表
* @return
*/
public KafkaConsumer getKafkaConsumer(String group, String topic, String servers) {
properties.put("bootstrap.servers", servers);
properties.put("group.id", group);
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
kafkaConsumer.subscribe(Arrays.asList(topic));
return kafkaConsumer;
}
/**
* 根据 partitionId 获取分区的提交偏移量
* 使用 high level api
*
* @param kafkaConsumer kafka消费者
* @param topic 主题名
* @param partitionId 分区id
* @return
*/
public long getCommittedOffset(KafkaConsumer kafkaConsumer, String topic, int partitionId) {
TopicPartition topicPartition = new TopicPartition(topic, partitionId);
OffsetAndMetadata committed = kafkaConsumer.committed(topicPartition);
if (committed == null) {
return 0L;
}
long offset = committed.offset();
return offset;
}
/**
* 根据 partitionId 获取topic指定分区的偏移量(总偏移量)
* 使用 low level api
*
* @param host 分区leader ip
* @param port 端口号
* @param topic 主题名
* @param partitionId 分区id
* @param whichTime 最晚时间
* @param clientName 客户端名称
* @return
*/
public long getPartitionOffset(String host, int port, String topic, int partitionId, long whichTime, String clientName) {
// 创建连接
SimpleConsumer simpleConsumer = new SimpleConsumer(host, port, 100000, 64 * 1024, clientName);
// 组装请求
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
// 发送请求
OffsetResponse response = simpleConsumer.getOffsetsBefore(request);
/**
* SimpleConsumer 不处理broker错误,有错误需要自己处理
*/
if (response.hasError()) {
logger.error("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partitionId));
return 0L;
}
long[] offsets = response.offsets(topic, partitionId);
if (simpleConsumer != null) {
simpleConsumer.close();
}
return offsets[0];
}
/**
* 获取 topic 的分区信息
* 使用 low level api
*
* @param brokers kafka服务器列表ip
* @param prot 端口号
* @param topic 主题名
* @return
*/
public Map<Integer, PartitionMetadata> getPartitoinsMetadata(List<String> brokers, int prot, String topic) {
// 使用 map 对 PartitionMetadata 进行去重
Map<Integer, PartitionMetadata> result = new HashMap<>();
for (String broker : brokers) {
// 创建连接
SimpleConsumer simpleConsumer = new SimpleConsumer(broker, prot, 100000, 64 * 1024, "Client-PartitoinsMetadata-" + broker);
// 组装请求
List<String> topics = Collections.singletonList(topic);
TopicMetadataRequest request = new TopicMetadataRequest(topics);
// 发送请求
TopicMetadataResponse response = simpleConsumer.send(request);
// 获取返回数据
for (TopicMetadata topicsMetadatum : response.topicsMetadata()) {
for (PartitionMetadata partitionsMetadatum : topicsMetadatum.partitionsMetadata()) {
result.put(partitionsMetadatum.partitionId(), partitionsMetadatum);
}
}
if (simpleConsumer != null) {
simpleConsumer.close();
}
}
return result;
}
}