kafka常见命令,kafka生产者消费者代码

常见命令

启动集群
[123] kafka-server-start.sh /opt/yjx/kafka_2.12-0.11.0.3/config/server.properties
//创建主题
[root@node01 ~]# kafka-topics.sh --zookeeper node01:2181 --create --replication-factor 2 --partitions 3 --topic userlog
[root@node01 ~]# kafka-topics.sh --zookeeper node01:2181 --create --replication-factor 2 --partitions 6 --topic studentlog
[root@node01 ~]# kafka-topics.sh --zookeeper node01:2181 --create --replication-factor 2 --partitions 6 --topic baidu
//查看所有主题
[root@node01 ~]# kafka-topics.sh --zookeeper node01:2181 --list
//查看主题
[root@node01 ~]# kafka-topics.sh --zookeeper node01:2181,node02:2181,node03:2181 --describe --topic userlog
//创建生产者
[root@node01 ~]# kafka-console-producer.sh --broker-list node01:9092,node02:9092,node03:9092 --topic userlog
//创建消费者
[root@node01 ~]# kafka-console-consumer.sh --zookeeper node01:2181,node02:2181,node03:2181 --from-beginning --topic userlog

生产者

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
/**
* 创建生产者线程类
*/
public class Hello01Producer extends Thread {
//创建Kafka的生产者
private Producer<String, String> producer;
	/**
	* 创建构造器
	*/
	public Hello01Producer(String producerName) {
		//设置线程的名字
		super.setName(producerName);
		//创建配置文件列表
		```Properties properties = new Properties();
		// kafka地址,多个地址用逗号分割
		properties.put("bootstrap.servers",	"192.168.88.101:9092,192.168.88.102:9092,192.168.88.103:9092");
		//设置写出数据的格式
		properties.put("key.serializer",	"org.apache.kafka.common.serialization.StringSerializer");
		properties.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
		//写出的应答方式
		properties.put("acks", "all");
		//错误重试
		properties.put("retries", 1);
		//批量写出
		properties.put("batch.size", 16384);
		//创建生产者对象
		producer = new KafkaProducer<String, String>(properties);
	}
	@Override
	public void run() {
		//初始化一个计数器
		int count = 0;
		System.out.println("Hello01Producer.run--开始发送数据");
		//迭代發送消息
		while (count < 100) {
			count++;
			String key = "yjx01-" + count;
			String value = Thread.currentThread().getName() + "--" +
			count;
			//封装消息对象
			ProducerRecord<String, String> producerRecord = new
			ProducerRecord<>("baidu", key, value);
			//发送消息到服务器
			producer.send(producerRecord);
			//打印消息
			//System.out.println("Producer.run--" + key + "--" + value);
			//每个1秒发送1条
			//try {
				// Thread.sleep(1000);
				//} catch (InterruptedException e) {
				// e.printStackTrace();
			//}
		}
		//强行将数据写出
		producer.close();
		System.out.println("Hello01Producer.run--发送数据完毕");
	}
	public static void main(String[] args) {
		Hello01Producer producer = new Hello01Producer("优极限01");
		producer.start();
	}
}

消费者

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
public class Hello01Consumer extends Thread {
	//创建消费者对象
	private KafkaConsumer<String, String> consumer;
	/**
	* 创建构造器
	*/
	public Hello01Consumer(String cname) {
		super.setName(cname);
		//读取配置文件
		Properties properties = new Properties();
		//ZK地址
		properties.put("bootstrap.servers", "192.168.88.101:9092,192.168.88.102:9092,192.168.88.103:9092");
		//消费者所在组的名称
		properties.put("group.id", "yjx-bigdata");
		//ZK超时时间
		properties.put("zookeeper.session.timeout.ms", "1000");
		//反序列化
		properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		//当消费者第一次消费时,从最低的偏移量开始消费
		properties.put("auto.offset.reset", "earliest");
		//自动提交偏移量
		properties.put("auto.commit.enable", "true");
		//消费者自动提交偏移量的时间间隔
		properties.put("auto.commit.interval.ms", "1000");
		//创建消费者对象
		consumer = new KafkaConsumer<>(properties);
	}
	@Override
	public void run() {
	try {
		consumer.subscribe(Arrays.asList("baidu"));
		boolean flag = true;
		while (flag) {
			ConsumerRecords<String, String> records =
			consumer.poll(100);
			for (TopicPartition partition : records.partitions()) {
				List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
					for (ConsumerRecord<String, String> record :partitionRecords) {
						System.out.println(record.topic() + ":" +
						record.partition() + ":" + record.offset() + ":" + record.key() + ":" +
						record.value());
					}
				consumer.commitSync();//同步
				}
			}
		} finally {
			consumer.close();
		}
	}
	public static void main(String[] args) {
		Hello01Consumer consumer01 = new Hello01Consumer("马龙YYDS");
		consumer01.start();
	}
}

你可能感兴趣的:(大数据,中间件,kafka,java,分布式)