kafka_2.10-0.10.1.X 以及之后的版本

1 适用于 kafka_2.10-0.10.1.X 以及之后的版本

该版本代码本人试验通过,kafka版本为:kafka_2.10-0.10.1.1

2 使用依赖

注意:kafka版本必须是 0.10.1.1及以上版本,否则客户端和服务端都会报错

客户端报错 Connection with /xx.xx.x.xx disconnected

服务端报错 Caused by: java.lang.IllegalArgumentException: Invalid version for API key 3: 2

<dependency>
      <groupId>org.apache.kafka</groupId>
      <artifactId>kafka_2.10</artifactId>
      <version>0.10.1.1</version>
 </dependency>

3 代码

  public class KafkaOffsetMonitor_0_10_1_1 {
    private static final Logger logger = LoggerFactory.getLogger(KafkaOffsetMonitor_0_10_1_1.class);

    private static Properties properties = new Properties();

    static {
        properties.put("enable.auto.commit", "false");
        properties.put("auto.commit.interval.ms", "1000");
        properties.put("session.timeout.ms", "30000");
        properties.put("auto.offset.reset", "earliest");
        properties.put("key.deserializer", StringDeserializer.class.getName());
        properties.put("value.deserializer", StringDeserializer.class.getName());
    }

    public static void main(String[] args) {

        String brokerStr = args[0];//不带端口号的kafka服务地址,如:192.168.0.1,192.168.0.2
        String topic = args[1];//主题名
        String group = args[2];//消费组名称
        logger.info("brokerStr" + ":" + brokerStr);
        logger.info("topic" + ":" + topic);
        logger.info("group" + ":" + group);

        List<String> brokers = Arrays.asList(brokerStr.split(","));
        int port = 9092;

        StringBuilder sb = new StringBuilder();
        for (String broker : brokers) {
            sb.append(broker).append(":").append(port).append(",");
        }

        String servers = sb.toString().substring(0, sb.toString().length() - 1);

        KafkaOffsetMonitor_0_10_1_1 monitor = new KafkaOffsetMonitor_0_10_1_1();

        KafkaConsumer kafkaConsumer = monitor.getKafkaConsumer(group, topic, servers);

        List<PartitionInfo> partitionInfoList = monitor.getPartitoinsMetadata(kafkaConsumer, topic);


        long logSize = 0L;
        long sumOffset = 0L;
        long lag = 0L;
        for (PartitionInfo partitionInfo : partitionInfoList) {
            long totalOffset = monitor.getPartitionOffset(kafkaConsumer, topic, partitionInfo.partition());
            logSize += totalOffset;
            long committedOffset = monitor.getCommittedOffset(kafkaConsumer, topic, partitionInfo.partition());
            sumOffset += committedOffset;
        }

        if (kafkaConsumer != null) {
            kafkaConsumer.close();
        }


        lag = logSize - sumOffset;
        logger.info("====================kafka=========================");
        logger.info("topic:" + topic);
        logger.info("logSize:" + logSize);
        logger.info("sumOffset:" + sumOffset);
        logger.info("lag:" + lag);
        logger.info("==================================================");
    }

    /**
     * 获取kafka消费者
     *
     * @param group   kakfa消费组
     * @param topic   主题名
     * @param servers kafka服务器列表
     * @return
     */
    public KafkaConsumer getKafkaConsumer(String group, String topic, String servers) {

        properties.put("bootstrap.servers", servers);
        properties.put("group.id", group);
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Arrays.asList(topic));
        return kafkaConsumer;
    }

    /**
     * 根据 partitionId 获取分区的提交偏移量
     * 使用 high level api
     *
     * @param kafkaConsumer kafka消费者
     * @param topic         主题名
     * @param partitionId   分区id
     * @return
     */
    public long getCommittedOffset(KafkaConsumer kafkaConsumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        OffsetAndMetadata committed = kafkaConsumer.committed(topicPartition);

        if (committed == null) {
            return 0L;
        }
        long offset = committed.offset();

        return offset;
    }


    /**
     * 获取topic分区的偏移量(总偏移量)
     * 使用 high level api
     *
     * @param kafkaConsumer kafka消费者
     * @param topic         主题名
     * @return
     */
    public long getTotalOffset(KafkaConsumer kafkaConsumer, String topic) {

        long result = 0L;

        List<PartitionInfo> partitionInfoList = kafkaConsumer.partitionsFor(topic);

        List<TopicPartition> topicPartitions = new ArrayList<>();
        for (PartitionInfo partitionInfo : partitionInfoList) {
            TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
            topicPartitions.add(topicPartition);
        }

        Map<TopicPartition, Long> map = kafkaConsumer.endOffsets(topicPartitions);

        for (Map.Entry<TopicPartition, Long> entry : map.entrySet()) {
            Long offset = entry.getValue();
            result += offset;
        }

        return result;
    }

    /**
     * 根据 partitionId 获取分区的提交偏移量
     * 使用 high level api
     *
     * @param kafkaConsumer kafka消费者
     * @param topic         主题名
     * @param partitionId   分区id
     * @return
     */
    public long getPartitionOffset(KafkaConsumer kafkaConsumer, String topic, int partitionId) {

        long result = 0L;

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);

        Map<TopicPartition, Long> map = kafkaConsumer.endOffsets(Collections.singleton(topicPartition));

        if (map != null && map.size() != 0) {
            for (Map.Entry<TopicPartition, Long> entry : map.entrySet()) {
                result += entry.getValue();
            }
        }
        return result;
    }


    /**
     * 获取 topic 的分区信息
     * 使用 high level api
     *
     * @param kafkaConsumer kafka消费者
     * @param topic         主题名
     * @return
     */
    public List<PartitionInfo> getPartitoinsMetadata(KafkaConsumer kafkaConsumer, String topic) {

        List<PartitionInfo> partitionInfoList = kafkaConsumer.partitionsFor(topic);
        return partitionInfoList;
    }

你可能感兴趣的:(kafka,kafka)