go 操作kafka包 sarama 使用(示例)

go 操作kafka包 sarama 使用(一)

截止当前时间,github上golang操作kafka的包主要有两个:

  • Shopify/sarama starts 5.7k
  • confluentinc/confluent-kafka-go starts 2k

saram 使用纯go语言编写, confluent-kafka-go 这是包装了c的api

相关术语:

  • Broker

    Kafka集群包含一个或多个服务器,这种服务器被称为broker

  • Topic

    每条发布到Kafka集群的消息都有一个类别,这个类别被称为Topic。(物理上不同Topic的消息分开存储,逻辑上一个Topic的消息虽然保存于一个或多个broker上但用户只需指定消息的Topic即可生产或消费数据而不必关心数据存于何处)

  • Partition

    Partition是物理上的概念,每个Topic包含一个或多个Partition.

  • Producer

    负责发布消息到Kafka broker

  • Consumer

    消息消费者,向Kafka broker读取消息的客户端。

  • Consumer Group

    每个Consumer属于一个特定的Consumer Group(可为每个Consumer指定group name,若不指定group name则属于默认的group)。

环境:

go version sarama version
go1.13.5 linux/amd64 github.com/Shopify/sarama v1.26.1

a、 搭建zookeeper集群:

https://blog.csdn.net/luslin1711/article/details/105701958

b、搭建kafka集群

使用docker-compose

version: '3'
services:
    broker1:
        image: wurstmeister/kafka
        restart: always
        container_name: broker1
        network_mode: host
        environment:
          KAFKA_BROKER_ID: 1
          KAFKA_ZOOKEEPER_CONNECT: 127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183,127.0.0.1:2184
          KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9192
          KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9192
        volumes:
          - /var/run/docker.sock:/var/run/docker.sock
    broker2:
        image: wurstmeister/kafka
        restart: always
        container_name: broker2
        network_mode: host
        environment:
          KAFKA_BROKER_ID: 2
          KAFKA_ZOOKEEPER_CONNECT: 127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183,127.0.0.1:2184
          KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9292
          KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9292
        volumes:
          - /var/run/docker.sock:/var/run/docker.sock
    broker3:
        image: wurstmeister/kafka
        restart: always
        container_name: broker3
        network_mode: host
        environment:
          KAFKA_BROKER_ID: 3
          KAFKA_ZOOKEEPER_CONNECT: 127.0.0.1:2181,127.0.0.1:2181,127.0.0.1:2181,127.0.0.1:2181
          KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9392
          KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9392
        volumes:
          - /var/run/docker.sock:/var/run/docker.sock
    kafka-manager:
        image: sheepkiller/kafka-manager:latest
        network_mode: host
        depends_on:
          - broker1
          - broker2
          - broker3
        environment:
          ZK_HOSTS: 127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183,127.0.0.1:2184
          APPLICATION_SECRET: letmein
          KM_ARGS: -Djava.net.preferIPv4Stack=true

简单例子:

producer

package main

import (
	"github.com/Shopify/sarama"
	"log"
	"os"
	"os/signal"
	"sync"
)

func main() {
	config := sarama.NewConfig()

	config.Producer.Return.Successes = true
	config.Producer.Partitioner = sarama.NewRandomPartitioner

	client,err := sarama.NewClient([]string{"localhost:9192","localhost:9292","localhost:9392"}, config)
	defer client.Close()
	if err != nil {
		panic(err)
	}
	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		panic(err)
	}

	// Trap SIGINT to trigger a graceful shutdown.
	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)

	var (
		wg        sync.WaitGroup
		enqueued, successes, errors int
	)

	wg.Add(1)
	// start a groutines to count successes num
	go func() {
		defer wg.Done()
		for range producer.Successes() {
			successes++
		}
	}()

	wg.Add(1)
	// start a groutines to count error num
	go func() {
		defer wg.Done()
		for err := range producer.Errors() {
			log.Println(err)
			errors++
		}
	}()

ProducerLoop:
	for {
		message := &sarama.ProducerMessage{Topic: "my_topic", Value: sarama.StringEncoder("testing 123")}
		select {
		case producer.Input() <- message:
			enqueued++

		case <-signals:
			producer.AsyncClose() // Trigger a shutdown of the producer.
			break ProducerLoop
		}
	}

	wg.Wait()

	log.Printf("Successfully produced: %d; errors: %d\n", successes, errors)
}

consumer

package main

import (
	"github.com/Shopify/sarama"
	"log"
	"os"
	"os/signal"
)

func main()  {
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	client,err := sarama.NewClient([]string{"localhost:9192","localhost:9292","localhost:9392"}, config)
	defer client.Close()
	if err != nil {
		panic(err)
	}
	consumer, err := sarama.NewConsumerFromClient(client)

	defer consumer.Close()
	if err != nil {
		panic(err)
	}
	// get partitionId list
	partitions,err := consumer.Partitions("my_topic")
	if err != nil {
		panic(err)
	}

	for _, partitionId := range partitions{
		// create partitionConsumer for every partitionId
		partitionConsumer, err := consumer.ConsumePartition("my_topic", partitionId, sarama.OffsetNewest)
		if err != nil {
			panic(err)
		}

		go func(pc *sarama.PartitionConsumer) {
			defer (*pc).Close()
			// block
			for message := range (*pc).Messages(){
				value := string(message.Value)
				log.Printf("Partitionid: %d; offset:%d, value: %s\n", message.Partition,message.Offset, value)
			}

		}(&partitionConsumer)
	}
	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)
	select {
	case <-signals:

	}
}

使用consumer-group进行消费

package main

import (
	"context"
	"fmt"
	"github.com/Shopify/sarama"
	"os"
	"os/signal"
	"sync"
)
type consumerGroupHandler struct{
	name string
}

func (consumerGroupHandler) Setup(_ sarama.ConsumerGroupSession) error   { return nil }
func (consumerGroupHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil }
func (h consumerGroupHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
	for msg := range claim.Messages() {
		fmt.Printf("%s Message topic:%q partition:%d offset:%d  value:%s\n",h.name, msg.Topic, msg.Partition, msg.Offset, string(msg.Value))
		// 手动确认消息
		sess.MarkMessage(msg, "")
	}
	return nil
}

func handleErrors(group *sarama.ConsumerGroup,wg  *sync.WaitGroup ){
	wg.Done()
	for err := range (*group).Errors() {
		fmt.Println("ERROR", err)
	}
}

func consume(group *sarama.ConsumerGroup,wg  *sync.WaitGroup, name string) {
	fmt.Println(name + "start")
	wg.Done()
	ctx := context.Background()
	for {
		topics := []string{"my_topic"}
		handler := consumerGroupHandler{name: name}
		err := (*group).Consume(ctx, topics, handler)
		if err != nil {
			panic(err)
		}
	}
}

func main(){
	var wg sync.WaitGroup
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = false
	config.Version = sarama.V0_10_2_0
	client,err := sarama.NewClient([]string{"localhost:9192","localhost:9292","localhost:9392"}, config)
	defer client.Close()
	if err != nil {
		panic(err)
	}
	group1, err := sarama.NewConsumerGroupFromClient("c1", client)
	if err != nil {
		panic(err)
	}
	group2, err := sarama.NewConsumerGroupFromClient("c2", client)
	if err != nil {
		panic(err)
	}
	group3, err := sarama.NewConsumerGroupFromClient("c3", client)
	if err != nil {
		panic(err)
	}
	defer group1.Close()
	defer group2.Close()
	defer group3.Close()
	wg.Add(3)
	go consume(&group1,&wg,"c1")
	go consume(&group2,&wg,"c2")
	go consume(&group3,&wg,"c3")
	wg.Wait()
	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)
	select {
	case <-signals:
	}
}

你可能感兴趣的:(分布式,工具使用,后端)