kafka生产者与多线程消费者(配置多个topic)demo

kafka生产者与多线程消费者(配置多个topic)demo

producer生产者代码

package com.cg.kafka;

import java.util.Properties;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;

//关于分区的问题:在创建topic主题时指定partition分区的数量。
//然后在kafka会自动去均衡同一个组的多个消费者从不同的partition分区去消费,消费者程序中只需要指定消费者所属的group组。
public class ProducerTherd {

	private static final String TOPIC = "test";//test或者test1
	private static final String BROKER_LIST = "192.168.80.132:9092"; //kafka集群配置        "192.168.80.132:9092,192.168.80.133:9092,192.168.80.134:9092";
	private static KafkaProducer<String,String> producer = null;
	
	static{
		Properties configs = initConfig();
		producer = new KafkaProducer<String,String>(configs);
	}
	
	private static Properties initConfig() {
		//step1 配置参数,这些跟优化kafka性能有关系
		Properties properties = new Properties();
		//1 连接broker
		properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKER_LIST);
		//3 acks
        // -1 代表所有处于isr列表中的follower partition都会同步写入消息成功
        // 0 代表消息只要发送出去就行,其他不管
        // 1 代表发送消息到leader partition写入成功就可以
		properties.put(ProducerConfig.ACKS_CONFIG, "-1");
		//4 重试次数
		properties.put(ProducerConfig.RETRIES_CONFIG, 5);//大部分问题,设置这个就可以解决,生产环境可以设置多些 5-10次
		//5 隔多久重试一次
		properties.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, 2000);
		//6 如果要提升kafka的吞吐量,可以指定压缩类型,如lz4
		properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "none");
		//7 缓冲区大小,默认是32M
		properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
		//8 一个批次batch的大小,默认是16k,需要根据一条消息的大小去调整
		properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 323840);
		//9 如果一个batch没满,达到如下的时间也会发送出去
		properties.put(ProducerConfig.LINGER_MS_CONFIG, 200);
		//10 一条消息最大的大小,默认是1M,生产环境中一般会修改变大,否则会报错
		properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, 1048576);
		//11 一条消息发送出去后,多久还没收到响应,就认为是超时
		properties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 500);
		//2 key和value序列化
		properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
		properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
		return properties;
	}
	
	public static void main(String[] args) {
		try {
			for (int i = 0; i < 9; i++) {
				String message = "{'name':'zhang','age':'99','sala':'2322'}";
				ProducerRecord<String, String> record = new ProducerRecord<String, String>(TOPIC,message);
				producer.send(record,new Callback() {
					
					@Override
					public void onCompletion(RecordMetadata metadata, Exception exception) {
						if (null == exception) {
							System.out.println("perfect!");
						}
						if (null!= metadata) {
							System.out.println("offset"+metadata.offset()+";partition:"+metadata.partition());
						}
					}
				}).get();
				
			}
		} catch (Exception e) {
			// TODO: handle exception
			e.printStackTrace();
		}finally {
			producer.close();
		}
	}
	
}

consumer多线程消费者代码

package com.cg.kafka;

import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * 关于分区的问题:在创建topic主题时指定partition分区的数量。
 * 然后在kafka会自动去均衡同一个组的多个消费者从不同的partition分区去消费,消费者程序中只需要指定消费者所属的group组。
 * @author Administrator
 */
public class ConsumerTherd {
	
	public static final Logger LOG = LoggerFactory.getLogger(ConsumerTherd.class);
	private static final String TOPIC = "test,test1";
	private static final String BROKER_list = "192.168.80.132:9092"; //kafka集群配置        "192.168.80.132:9092,192.168.80.133:9092,192.168.80.134:9092";
	private static KafkaConsumer<String, String> kafkaConsumer = null;
	
	static {
		Properties properties = initConfig();
		kafkaConsumer = new KafkaConsumer<String,String>(properties);
		kafkaConsumer.subscribe(Arrays.asList(TOPIC.split(",")));
	}
	
	private static Properties initConfig() {
		//step1 配置参数,这些跟优化kafka性能有关系
		Properties properties = new Properties();
		//连接borker
		properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKER_list);
		//指定消费者所属的消费组id
		properties.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
		properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "test");
		//消费者给coordinator发送心跳的时间间隔
		properties.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 1000);
		//coordinator认为多久没接收到心跳,就认为超时
		properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10*1000);
		//一次poll返回多少条record,默认500条
		properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000);
		//不回收socket连接,让其自动回收,设置为-1,添加此设置后,消费者无法接受到消息
//		properties.put(ConsumerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG, -1);
		//设置自动提交offset
		properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
		//多久自动提交offset
		properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
		//设置consumer重启后,从分区最新的offset读取
		properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
		//指定key和value的反序列化StringDeserializer.class.getName()
		properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
		properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
		return properties;
	}
	
	//创建线程池
	ExecutorService threadPool = Executors.newFixedThreadPool(5);
	
	public void startListening() {
		try {
			while(true) {
				ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(3000);
				consumerRecords.forEach(r -> {
					//Runable接口
					threadPool.execute(new ConsumerTask(r));
				});
			}
		} catch (Exception e) {
			e.printStackTrace();
			LOG.error("消费消息失败");
			kafkaConsumer.close();
		}
	}
	
	public static void main(String[] args) {
		ConsumerTherd consumerTherd = new ConsumerTherd();
		consumerTherd.startListening();
	}
	
	
	
}

ConsumerTask实现Runnable接口

package com.cg.kafka;


import org.apache.kafka.clients.consumer.ConsumerRecord;

import com.alibaba.fastjson.JSONObject;


public class ConsumerTask implements Runnable {
	
	private ConsumerRecord<String,String> record;

	public ConsumerTask(ConsumerRecord<String,String> record) {
		this.record = record;
	}
	
	@Override
	public void run() {
		JSONObject jsonObject = JSONObject.parseObject(record.value());
		System.out.println("线程-"+Thread.currentThread().getName()+
				"消费的消息:"+jsonObject.toJSONString()+"-----"+record.partition()+"------"+record.topic());
	}

}

你可能感兴趣的:(kafka)