该版本代码本人试验通过,kafka版本为:kafka_2.10-0.8.2.1
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.10.0.1</version>
</dependency>
public class KafkaOffsetMonitor_0_8_2_1 {
private static final Logger logger = LoggerFactory.getLogger(KafkaOffsetMonitor_0_8_2_1.class);
public static void main(String[] args) {
String brokerStr = args[0];//不带端口号的kafka服务地址,如:192.168.0.1,192.168.0.2
String topic = args[1];//主题名
String group = args[2];//消费组名称
logger.info("brokerStr" + ":" + brokerStr);
logger.info("topic" + ":" + topic);
logger.info("group" + ":" + group);
List<String> brokers = Arrays.asList(brokerStr.split(","));
int port = 9092;
String clientId = "Client-" + topic;
int correlationId = 0;
KafkaOffsetMonitor_0_8_2_1 monitor = new KafkaOffsetMonitor_0_8_2_1();
Map<Integer, PartitionMetadata> partitionMetadataMap = monitor.getPartitoinsMetadata(brokers, port, topic);
List<TopicAndPartition> topicAndPartitions = new ArrayList<>();
for (Map.Entry<Integer, PartitionMetadata> entry : partitionMetadataMap.entrySet()) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, entry.getKey());
topicAndPartitions.add(topicAndPartition);
}
Map<TopicAndPartition, OffsetMetadataAndError> offsets = monitor.getCommittedOffset(brokers.get(0), port, group, topicAndPartitions, correlationId, clientId);
long logSize = 0L;
long sumOffset = 0L;
long lag = 0L;
for (Map.Entry<Integer, PartitionMetadata> entry : partitionMetadataMap.entrySet()) {
Integer partitionId = entry.getKey();
String host = entry.getValue().leader().host();
String clientName = "Client-" + topic + "-" + partitionId;
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
OffsetMetadataAndError committedOffsetMetadata = offsets.get(topicAndPartition);
if (committedOffsetMetadata.error() != ErrorMapping.NotCoordinatorForConsumerCode()) {
long committedOffset = committedOffsetMetadata.offset();
sumOffset += committedOffset;
}
// kafka包括两个常数
// kafka.api.OffsetRequest.EarliestTime()发现日志中的数据的开始
// kafka.api.OffsetRequest.LatestTime()将只留新消息
long partitionOffset = monitor.getPartitionOffset(host, port, topic, partitionId, OffsetRequest.LatestTime(), clientName);
logSize += partitionOffset;
}
lag = logSize - sumOffset;
logger.info("====================kafka=========================");
logger.info("topic:" + topic);
logger.info("logSize:" + logSize);
logger.info("sumOffset:" + sumOffset);
logger.info("lag:" + lag);
logger.info("==================================================");
}
/**
* 根据 partitionId 获取topic指定分区的偏移量(总偏移量)
* 使用 low level api
*
* @param host 分区leader ip
* @param port 端口号
* @param topic 主题名
* @param partitionId 分区id
* @param whichTime 最晚时间
* @param clientName 客户端名称
* @return
*/
public long getPartitionOffset(String host, int port, String topic, int partitionId, long whichTime, String clientName) {
// 创建连接
SimpleConsumer simpleConsumer = new SimpleConsumer(host, port, 100000, 64 * 1024, clientName);
// 组装请求
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
// 发送请求
OffsetResponse response = simpleConsumer.getOffsetsBefore(request);
/**
* SimpleConsumer 不处理broker错误,有错误需要自己处理
*/
if (response.hasError()) {
logger.error("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partitionId));
return 0L;
}
long[] offsets = response.offsets(topic, partitionId);
if (simpleConsumer != null) {
simpleConsumer.close();
}
return offsets[0];
}
/**
* 获取 topic 下的所有已提交偏移量
* (备注:0.8及以下版本可以适用,0.9及以上的返回的offsets为负数)
* 使用 low level api
*
* @param broker kafka服务ip
* @param port 端口
* @param group 消费组
* @param topicAndPartitions topicAndPartitions组
* @param correlationId 关联id (由客户端指定的一个数字唯一标示这次请求的id,服务器端在处理完请求后也会把同样的CorrelationId写到Response中,这样客户端就能把某个请求和响应对应起来了)
* @param clientId 客户端id
* @return
*/
public Map<TopicAndPartition, OffsetMetadataAndError> getCommittedOffset(String broker, int port, String group, List<TopicAndPartition> topicAndPartitions, int correlationId, String clientId) {
// 创建连接
BlockingChannel channel = new BlockingChannel(broker, port, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000);
channel.connect();
// 组装请求
OffsetFetchRequest offsetFetchRequest = new OffsetFetchRequest(group, topicAndPartitions,
(short) 0, // by default bind to version 0 so that it fetches from ZooKeeper
correlationId,
clientId);
// 发送请求
channel.send(offsetFetchRequest.underlying());
// 获取返回数据
OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().payload());
Map<TopicAndPartition, OffsetMetadataAndError> offsets = fetchResponse.offsets();
if (channel.isConnected()) {
channel.disconnect();
}
return offsets;
}
/**
* 获取 topic 的分区信息
* 使用 low level api
*
* @param brokers kafka服务器列表ip
* @param prot 端口号
* @param topic 主题名
* @return
*/
public Map<Integer, PartitionMetadata> getPartitoinsMetadata(List<String> brokers, int prot, String topic) {
// 使用 map 对 PartitionMetadata 进行去重
Map<Integer, PartitionMetadata> result = new HashMap<>();
for (String broker : brokers) {
// 创建连接
SimpleConsumer simpleConsumer = new SimpleConsumer(broker, prot, 100000, 64 * 1024, "Client-PartitoinsMetadata-" + broker);
// 组装请求
List<String> topics = Collections.singletonList(topic);
TopicMetadataRequest request = new TopicMetadataRequest(topics);
// 发送请求
TopicMetadataResponse response = simpleConsumer.send(request);
// 获取返回数据
for (TopicMetadata topicsMetadatum : response.topicsMetadata()) {
for (PartitionMetadata partitionsMetadatum : topicsMetadatum.partitionsMetadata()) {
result.put(partitionsMetadatum.partitionId(), partitionsMetadatum);
}
}
if (simpleConsumer != null) {
simpleConsumer.close();
}
}
return result;
}
}