java连接读取、写入kafka数据流

读取 

import org.apache.kafka.clients.consumer.*;

import java.util.Collections;
import java.util.Properties;

public class jkafka_demo {
	public static void main(String[] args) throws Exception {
		
		try {
			Properties props = new Properties();
			props.put("bootstrap.servers","192.168.16.152:9092,192.168.16.153:9092,192.168.154:9092");//kafka集群的服务器地址
			props.put("group.id", "groupIdName");
			props.put("enable.auto.commit", "true");
			props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
			props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
			KafkaConsumer consumer = new KafkaConsumer<>(props);
			consumer.subscribe(Collections.singletonList("topicName"));
			while (true) {
				ConsumerRecords records = consumer.poll(100);
				for (ConsumerRecord record : records) {
					System.out.printf("comsumer:>>>>>offset = %d, key= %s , value = %s\n", record.offset(),
							record.key(), record.value());
					String r = record.offset() + "\t" + record.key() + "\t" + record.value();
				}
			}
		} catch (Exception ex) {
			ex.printStackTrace();
			System.out.println("when calling kafka output error." + ex.getMessage());
		}
	}
}

写入 

import cn.test.bean.Article;
import com.alibaba.fastjson.JSONObject;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.Future;


public class writeToKafka {

    public static String wirteKafka(){
        Future future =null;
        Properties props = new Properties();
        props.put("bootstrap.servers", "192.168.16.152:9092,192.168.16.153:9092,192.168.154:9092");  //服务器地址端口
        /*ack 配置项用来控制producer要求leader确认多少消息后返回调用成功。当值为0时producer不需要等待任何确认消息。当值为1时只需要等待leader确认。当值为-1或all时需要全部ISR集合返回确认才可以返回成功。*/
        props.put("acks", "all");
        /*当 retries > 0 时,如果发送失败,会自动尝试重新发送数据。发送次数为retries设置的值。*/
        props.put("retries", 0);
        /*buffer.memory、batch.size、linger.ms三个参数用来控制缓冲区大小和延迟发送时间,具体含义可以参考官方文档的配置。*/
        props.put("batch.size", 16384);
        /*key.serializer 和 value.serializer 指定使用什么序列化方式将用户提供的key和value进行序列化。运行此程序,在$KAFKA_HOME目录下运行:*/
        props.put("key.serializer", StringSerializer.class.getName());
        props.put("value.serializer", StringSerializer.class.getName());
        Producer producer = new KafkaProducer(props);

        Article article = new Article(1,"titile","author");
        JSONObject jsonObject = (JSONObject) JSONObject.toJSON(article);
        try {
            future = producer.send(new ProducerRecord("topic_name", jsonObject.toString()));
        }catch (Exception e){
            e.printStackTrace();
        }finally {
            if(producer != null) {
                producer.flush();
            }
        }
        return future.toString();
    }
}

 

你可能感兴趣的:(java,kafka,大数据,kafka,java,大数据)