kafka学习之-java api测试

1.配置

package com.storm.storm_my.kafka;

/**
 * 
 * @author Peng.Li
 *
 */
public class KafkaConfigApiConstant {

    /**
     * 
     * @author 配置的key
     *
     */
    public interface kafkaPropertiesKeys {

        public static final String ZK_CONNECT = "zookeeper.connect";
        public static final String ZK_CONNECTIONTIMEOUT_MS = "zookeeper.connectiontimeout.ms";
        public static final String AUTO_COMMIT_INTERVAL_MS = "zookeeper.session.timeout.ms";
        public static final String ZK_SESSION_TIMEOUT_MS = "zookeeper.sync.time.ms";
        // 自动更新时间。默认60 * 1000
        public static final String ZK_SYNC_TIME_MS = "auto.commit.interval.ms";
        // 当consumer消费一定量的消息之后,将会自动向zookeeper提交offset信息,
        // 注意offset信息并不是每消费一次消息就向zk提交一次,而是现在本地保存(内存),并定期提交,默认为true
        public static final String AUTO_COMMIT_ENABLE = "auto.commit.enable";
        public static final String TOPIC = "topic";
        public static final String SERIALIZER_CLASS = "serializer.class";
        public static final String METADATA_BROKER_LIST = "metadata.broker.list";
        public static final String GROUP_ID = "group.id";
        public static final String FETCH_MESSAGE_MAX_BYTES = "fetch.message.max.bytes";
        // 最大取多少块缓存到消费者
        public static final String QUEUED_MAX_MESSAGE_CHUNKS = "queued.max.message.chunks";
        public static final String PARTITIONER_CLASS = "partitioner.class";
        public static final String FETCH_SIZE = "fetch.size";
        /**
         * 生产者单次最大生产能力
         */
        public static final String MAX_MESSAGE_SIZE = "max.message.size";
    }

    /**
     * 
     * @author 配置的value
     *
     */

    public interface KafkaPropertiesValues {
        // 1.zk的信息
        public final static String ZK_CONNECT = "192.168.14.100:2181,192.168.14.105:2181,192.168.14.102:2181";
        public final static int ZK_CONNECTIONTIMEOUT_MS = 6000;
        public final static int ZK_SESSION_TIMEOUT_MS = 6000;
        public final static int ZK_SYNC_TIME_MS = 6000;
        public final static int AUTO_COMMIT_INTERVAL_MS = 1000;
        public final static boolean AUTO_COMMIT_ENABLE = true;

        // 2.公用的配置
        public final static String TOPIC = "lp_topic";
        public final static String GROUP_ID = "lp_group1";

        // 3. kafka consumer config
        public static final int FETCH_MESSAGE_MAX_BYTES = 2 * 1024;
        public static final int FETCH_SIZE = 2 * 1024;
        // ??
        public final static int QUEUED_MAX_MESSAGE_CHUNKS = 10;
        // 4.kafka prducer
        public final static String METADATA_BROKER_LIST = "192.168.14.100:9092,192.168.14.105:9092,192.168.14.102:9092";
        public final static String PARTITIONER_CLASS = "kafka.producer.DefaultPartitioner";
        public static final String SERIALIZER_CLASS = "kafka.serializer.StringEncoder";
        public static final int MAX_MESSAGE_SIZE = 1024 * 1024;

    }
}

2.kafka cusumer

package com.storm.storm_my.kafka;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;

import com.storm.storm_my.kafka.KafkaConfigApiConstant.KafkaPropertiesValues;
import com.storm.storm_my.kafka.KafkaConfigApiConstant.kafkaPropertiesKeys;

/**
 * 
 * @author Peng.Li
 *
 */
public class KafkaConsumer implements Runnable {

    private ConsumerConnector consumer;

    private String topic;

    /**
     * 
     * @param topic
     */
    public KafkaConsumer(String topic) {
        consumer = Consumer.createJavaConsumerConnector(this.newConsumerConfig());
        this.topic = topic;
    }

    /**
     * 
     * @return 配置
     */
    public ConsumerConfig newConsumerConfig() {

        Properties props = new Properties();
        props.put(kafkaPropertiesKeys.ZK_CONNECT, KafkaPropertiesValues.ZK_CONNECT);
        props.put(kafkaPropertiesKeys.ZK_CONNECTIONTIMEOUT_MS, String.valueOf(KafkaPropertiesValues.ZK_CONNECTIONTIMEOUT_MS));
        props.put(kafkaPropertiesKeys.ZK_SESSION_TIMEOUT_MS,  String.valueOf(KafkaPropertiesValues.ZK_SESSION_TIMEOUT_MS));
        props.put(kafkaPropertiesKeys.ZK_SYNC_TIME_MS, String.valueOf(KafkaPropertiesValues.ZK_SYNC_TIME_MS));
        props.put(kafkaPropertiesKeys.AUTO_COMMIT_ENABLE, String.valueOf(KafkaPropertiesValues.AUTO_COMMIT_ENABLE));
        props.put(kafkaPropertiesKeys.TOPIC, KafkaPropertiesValues.TOPIC);
        props.put(kafkaPropertiesKeys.GROUP_ID, KafkaPropertiesValues.GROUP_ID);
        props.put(kafkaPropertiesKeys.FETCH_MESSAGE_MAX_BYTES, String.valueOf(KafkaPropertiesValues.FETCH_MESSAGE_MAX_BYTES));
        props.put(kafkaPropertiesKeys.QUEUED_MAX_MESSAGE_CHUNKS, String.valueOf(KafkaPropertiesValues.QUEUED_MAX_MESSAGE_CHUNKS));
        props.put(kafkaPropertiesKeys.FETCH_SIZE, String.valueOf(KafkaPropertiesValues.FETCH_SIZE));

        return new ConsumerConfig(props);
    }

    @Override
    public void run() {
        Map<String, Integer> topicMap = new HashMap<String, Integer>();
        topicMap.put(topic, new Integer(1));
        Map<String, List<KafkaStream<byte[], byte[]>>> topicOutPutStreamMap = consumer.createMessageStreams(topicMap);
        KafkaStream<byte[], byte[]> topicStream = topicOutPutStreamMap.get(topic).get(0);
        ConsumerIterator<byte[], byte[]> steamIt = topicStream.iterator();
        while (steamIt.hasNext()) {
            try {
                System.out.println("Recieve -> [" + new String(steamIt.next().message(), "UTF-8") + "]");
                try {
                    Thread.sleep(1000 * 3);
                } catch (InterruptedException e) {
                    // TODO Auto-generated catch block
                    e.printStackTrace();
                }
            } catch (Exception e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }

        }

    }

}

3.kafka producer

package com.storm.storm_my.kafka;

import java.util.Properties;

import com.storm.storm_my.kafka.KafkaConfigApiConstant.KafkaPropertiesValues;
import com.storm.storm_my.kafka.KafkaConfigApiConstant.kafkaPropertiesKeys;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

/**
 * 
 * @author Peng.Li
 *
 */
public class KafaProducer implements Runnable {

    private String topic;
    private Producer<String, String> producer;

    /**
     * 
     * @param topic
     */
    public KafaProducer(String topic) {
        this.topic = topic;
        producer = new Producer<String, String>(this.newProducerConfig());
    }

    /**
     * 构建生产者需要的配置
     * @return
     */
    public ProducerConfig newProducerConfig() {

        Properties producerProperties = new Properties();
        // zk
        producerProperties.put(kafkaPropertiesKeys.ZK_CONNECT, KafkaPropertiesValues.ZK_CONNECT);
        producerProperties.put(kafkaPropertiesKeys.ZK_CONNECTIONTIMEOUT_MS, String.valueOf(KafkaPropertiesValues.ZK_CONNECTIONTIMEOUT_MS));
        producerProperties.put(kafkaPropertiesKeys.ZK_SESSION_TIMEOUT_MS, String.valueOf(KafkaPropertiesValues.ZK_SESSION_TIMEOUT_MS));
        producerProperties.put(kafkaPropertiesKeys.ZK_SYNC_TIME_MS, String.valueOf(KafkaPropertiesValues.ZK_SYNC_TIME_MS));
        producerProperties.put(kafkaPropertiesKeys.AUTO_COMMIT_ENABLE, String.valueOf(KafkaPropertiesValues.AUTO_COMMIT_ENABLE));
        producerProperties.put(kafkaPropertiesKeys.TOPIC, KafkaPropertiesValues.TOPIC);
        // producer
        producerProperties.put(kafkaPropertiesKeys.SERIALIZER_CLASS, KafkaPropertiesValues.SERIALIZER_CLASS);
        producerProperties.put(kafkaPropertiesKeys.METADATA_BROKER_LIST, KafkaPropertiesValues.METADATA_BROKER_LIST);
        producerProperties.put(kafkaPropertiesKeys.PARTITIONER_CLASS, KafkaPropertiesValues.PARTITIONER_CLASS);
        producerProperties.put(kafkaPropertiesKeys.MAX_MESSAGE_SIZE, String.valueOf(KafkaPropertiesValues.MAX_MESSAGE_SIZE));

        return new ProducerConfig(producerProperties);

    }

    @Override
    public void run() {
        int offsetNo = 1;
        while (true) {
            String msg = new String("Message_" + offsetNo);
            System.out.println("Send->[" + msg + "]");
            producer.send(new KeyedMessage<String, String>(topic, msg));
            offsetNo++;
            try {
                Thread.sleep(1000 * 3);
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }

    }
}

 

4. consumer Client

 

package com.storm.storm_my.kafka.client;

import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import com.storm.storm_my.kafka.KafkaConfigApiConstant;
import com.storm.storm_my.kafka.KafkaConsumer;

/**
 * 
 * @author user
 *
 */
public class KafkaConsumerClient {

    public static void main(String[] args) {
        KafkaConsumer consumerRunnable = new KafkaConsumer(KafkaConfigApiConstant.KafkaPropertiesValues.TOPIC);
        ExecutorService executorService = Executors.newCachedThreadPool();
        executorService.execute(consumerRunnable);
    }

}

 

5.kafka producer Client

package com.storm.storm_my.kafka.client;

import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import com.storm.storm_my.kafka.KafaProducer;
import com.storm.storm_my.kafka.KafkaConfigApiConstant;

/**
 * 
 * @author Peng.Li
 *
 */

public class KafkaProducerClient {
    
    
    
    public static void main(String[] args) {
        
    
        
        KafaProducer producerRunnble  = new KafaProducer(KafkaConfigApiConstant.KafkaPropertiesValues.TOPIC);
        ExecutorService executorService = Executors.newCachedThreadPool();    
        executorService.execute(producerRunnble);
    }

}


结果:

 

 

 

你可能感兴趣的:(kafka学习之-java api测试)