kafka入门教程

kafka安装及运行

  • 官网下载0.10.1.0版本,java1.8
  • 启动zookeeper
    bin/zookeeper-server-start.sh config/zookeeper.properties
  • 启动broker
    bin/kafka-server-start.sh config/server.properties
  • 创建一个topic
    bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
  • 启动consumer
    bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning
  • 启动producer
    bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test

java代码测试

pom文件


    org.apache.kafka
    kafka_2.10
    0.10.1.0


    org.apache.kafka
    kafka-streams
    0.10.1.0

producer-demo1

import java.util.Properties;

import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

public class KafkaProducerDemo1 {
    public static void main(String[] args) {
        Properties props = new Properties();
        props.put("bootstrap.servers", "localhost:9092");
        props.put("acks", "all");
        props.put("retries", 0);
        props.put("batch.size", 16384);
        props.put("linger.ms", 1);
        props.put("buffer.memory", 33554432);
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        Producer producer = new KafkaProducer<>(props);
        char c = 'a';
        for(int i = 1; i < 2; i++) {
            String k = new String(new char[]{c});
            String v = k + k;
            producer.send(new ProducerRecord("test", k, v));
            c++;
        }
        producer.close();
    }
}

consumer-demo1

每次重新创建一个groupid,从开始的offset读取message

import java.io.UnsupportedEncodingException;
import java.util.Properties;
import java.util.Arrays;
import java.util.UUID;

import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.ConsumerRecord;


public class KafkaConsumerDemo1 {
    public static void main(String[] args) throws InterruptedException, UnsupportedEncodingException {
        Properties props = new Properties();
        props.put("bootstrap.servers", "localhost:9092");
        props.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.put("enable.auto.commit", "true");
        props.put("auto.commit.interval.ms", "1000");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer consumer = new KafkaConsumer(props);

        try {
            consumer.subscribe(Arrays.asList("test"));

            while(true) {
                ConsumerRecords records = consumer.poll(100);
                for (ConsumerRecord record : records) {
                    System.out.println(record.offset() + ": " + record.value());
                }
                Thread.sleep(1000);
            }
        } finally {
            consumer.close();
        }
    }
}

consumer-demo2

固定groupid,从指定的offset读取message

import java.io.UnsupportedEncodingException;
import java.util.Properties;
import java.util.Arrays;

import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;

public class KafkaConsumerDemo2 {
    public static void main(String[] args) throws InterruptedException, UnsupportedEncodingException {
        Properties props = new Properties();
        props.put("bootstrap.servers", "localhost:9092");
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "group_test_1");
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.put("enable.auto.commit", "false");
        props.put("auto.commit.interval.ms", "1000");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer consumer = new KafkaConsumer(props);

        try {
            String topic = "test";
            long offset = 10;
            TopicPartition partition0 = new TopicPartition(topic, 0);
            consumer.assign(Arrays.asList(partition0));
            consumer.seek(partition0, offset);

            while(true) {
                ConsumerRecords records = consumer.poll(100);
                for (ConsumerRecord record : records) {
                    System.out.println(record.offset() + ": " + record.value());
                }
                consumer.commitSync();
                Thread.sleep(1000);
            }
        } finally {
            consumer.close();
        }
    }
}

consumer-demo3

固定groupid,自己维护offset的位置信息

import java.io.UnsupportedEncodingException;
import java.util.Properties;
import java.util.Arrays;
import java.util.List;
import java.util.Collections;

import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

public class KafkaConsumerDemo3 {
    public static void main(String[] args) throws InterruptedException, UnsupportedEncodingException {
        Properties props = new Properties();
        props.put("bootstrap.servers", "localhost:9092");
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "group_test_1");
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.put("enable.auto.commit", "false");
        props.put("auto.commit.interval.ms", "1000");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer consumer = new KafkaConsumer(props);

        try {
            consumer.subscribe(Arrays.asList("test"));

            while (true) {
                ConsumerRecords records = consumer.poll(100);
                for (TopicPartition partition : records.partitions()) {
                    List> partitionRecords = records.records(partition);
                    for (ConsumerRecord record : partitionRecords) {
                        System.out.println(record.offset() + ": " + record.value());
                    }
                    long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
                    consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
                }
            }
        } finally {
            consumer.close();
        }
    }
}

kafka-streaming-demo1

import java.util.Map;
import java.util.HashMap;

import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.kstream.KStreamBuilder;
import org.apache.kafka.common.serialization.Serdes;

public class KafkaStreamingDemo1 {
    public static void main(String[] args) {
        Map props = new HashMap<>();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "my-stream-processing-application");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
        props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
        StreamsConfig config = new StreamsConfig(props);

        KStreamBuilder builder = new KStreamBuilder();
        builder.stream("test").filter((k,v) -> (v.equals("b"))).print();

        KafkaStreams streams = new KafkaStreams(builder, config);
        streams.start();
    }
}

你可能感兴趣的:(kafka入门教程)