kafka生产者、消费者代码示例

1、生产者
import java.util.Properties; 
   
import kafka.javaapi.producer.Producer; 
import kafka.producer.KeyedMessage; 
import kafka.producer.ProducerConfig; 
   
public class MyProducer {   
     
        public static void main(String[] args) {   
            Properties props = new Properties();   
            props.setProperty("metadata.broker.list","localhost:9092");   
            props.setProperty("serializer.class","kafka.serializer.StringEncoder");   
            props.put("request.required.acks","1");   
            ProducerConfig config = new ProducerConfig(props);   
            //创建生产这对象
            Producer producer = new Producer(config);
            //生成消息
            KeyedMessage data = new KeyedMessage("mykafka","test-kafka");
            try {   
                int i =1; 
                while(i < 100){    
                    //发送消息
                    producer.send(data);   
                } 
            } catch (Exception e) {   
                e.printStackTrace();   
            }   
            producer.close();   
        }   
}

2、消费者
import java.util.HashMap; 
import java.util.List;   
import java.util.Map;   
import java.util.Properties;   
     
import kafka.consumer.ConsumerConfig;   
import kafka.consumer.ConsumerIterator;   
import kafka.consumer.KafkaStream;   
import kafka.javaapi.consumer.ConsumerConnector;  
   
public class MyConsumer extends Thread{ 
        //消费者连接
        private final ConsumerConnector consumer;   
        //要消费的话题
        private final String topic;   
     
        public MyConsumer(String topic) {   
            consumer =kafka.consumer.Consumer   
                    .createJavaConsumerConnector(createConsumerConfig());   
            this.topic =topic;   
        }   
     
    //配置相关信息
    private static ConsumerConfig createConsumerConfig() {   
        Properties props = new Properties();   
//        props.put("zookeeper.connect","localhost:2181,10.XX.XX.XX:2181,10.XX.XX.XX:2181");
        //配置要连接的zookeeper地址与端口
        //The ‘zookeeper.connect’ string identifies where to find once instance of Zookeeper in your cluster.
        //Kafka uses ZooKeeper to store offsets of messages consumed for a specific topic and partition by this Consumer Group
        props.put("zookeeper.connect","localhost:2181");
        
        //配置zookeeper的组id (The ‘group.id’ string defines the Consumer Group this process is consuming on behalf of.)
        props.put("group.id", "0");
        
        //配置zookeeper连接超时间隔
        //The ‘zookeeper.session.timeout.ms’ is how many milliseconds Kafka will wait for 
        //ZooKeeper to respond to a request (read or write) before giving up and continuing to consume messages.
        props.put("zookeeper.session.timeout.ms","10000"); 
 
        //The ‘zookeeper.sync.time.ms’ is the number of milliseconds a ZooKeeper ‘follower’ can be behind the master before an error occurs.
        props.put("zookeeper.sync.time.ms", "200");


        //The ‘auto.commit.interval.ms’ setting is how often updates to the consumed offsets are written to ZooKeeper. 
        //Note that since the commit frequency is time based instead of # of messages consumed, if an error occurs between updates to ZooKeeper on restart you will get replayed messages.
        props.put("auto.commit.interval.ms", "1000");
        return new ConsumerConfig(props);   
    }   
     
    public void run(){ 
        
        Map topickMap = new HashMap();   
        topickMap.put(topic, 1);   
        Map>>  streamMap =consumer.createMessageStreams(topickMap);   
        
        KafkaStreamstream = streamMap.get(topic).get(0);   
        ConsumerIterator it =stream.iterator();   
        System.out.println("*********Results********");   
        while(true){   
            if(it.hasNext()){ 
                //打印得到的消息   
                System.err.println(Thread.currentThread()+" get data:" +new String(it.next().message()));   
            } 
            try {   
                Thread.sleep(1000);   
            } catch (InterruptedException e) {   
                e.printStackTrace();   
            }   
        }   
    }  
    
    
    public static void main(String[] args) {   
        MyConsumer consumerThread = new MyConsumer("mykafka");   
        consumerThread.start();   
    }   
}

你可能感兴趣的:(大数据,kafka)