go语言版本的kafaka API,使用sarama和sarama-cluster包,支持offset追踪。生产和消费的过程,都是通过设置一个配置结构体开始的。并发量小时,可以用同步生产者,但是并发量大时,必须使用异步生产者。
go get github.com/Shopify/sarama
该库要求kafka版本在0.8及以上,支持kafka定义的high-level API和low-level API,但不支持常用的consumer自动rebalance和offset追踪,所以一般得结合cluster版本使用。
go get github.com/bsm/sarama-cluster
需要kafka 0.9及以上版本。代码示例来自官网,可到官网查看更多信息。
//设置配置
config := sarama.NewConfig()
//等待服务器所有副本都保存成功后的响应
config.Producer.RequiredAcks = sarama.WaitForAll
//随机的分区类型
config.Producer.Partitioner = sarama.NewRandomPartitioner
//是否等待成功和失败后的响应,只有上面的RequireAcks设置不是NoReponse这里才有用.
config.Producer.Return.Successes = true
config.Producer.Return.Errors = true
//设置使用的kafka版本,如果低于V0_10_0_0版本,消息中的timestrap没有作用.需要消费和生产同时配置
config.Version = sarama.V0_11_0_0
//使用配置,新建一个异步生产者
producer, e := sarama.NewAsyncProducer([]string{"IP:9092","IP:9092","IP:9092"}, config)
if e != nil {
panic(e)
}
defer producer.AsyncClose()
//发送的消息,主题,key
msg := &sarama.ProducerMessage{
Topic: "test_topic",
Key: sarama.StringEncoder("test"),
}
var value string
for {
value = "this is a message"
//设置发送的真正内容
fmt.Scanln(&value)
//将字符串转化为字节数组
msg.Value = sarama.ByteEncoder(value)
fmt.Println(value)
//使用通道发送
producer.Input() <- msg
//循环判断哪个通道发送过来数据.
select {
case suc := <-producer.Successes():
fmt.Println("offset: ", suc.Offset, "timestamp: ", suc.Timestamp.String(), "partitions: ", suc.Partition)
case fail := <-producer.Errors():
fmt.Println("err: ", fail.Err)
}
}
config := sarama.NewConfig()
// config.Producer.RequiredAcks = sarama.WaitForAll
// config.Producer.Partitioner = sarama.NewRandomPartitioner
config.Producer.Return.Successes = true
config.Producer.Timeout = 5 * time.Second
p, err := sarama.NewSyncProducer(strings.Split("localhost:9092", ","), config)
defer p.Close()
if err != nil {
fmt.Println("NewSyncProducer error=", err)
painc(err)
}
v := "sync: " + strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Intn(10000))
fmt.Fprintln(os.Stdout, v)
msg := &sarama.ProducerMessage{
Topic: "test_topic",
Value: sarama.ByteEncoder(v),
}
if _, _, err := p.SendMessage(msg); err != nil {
fmt.Println("SendMessage error=", err)
panic(err)
}
groupID := "group-1"
topic := "test_topic"
config := cluster.NewConfig()
config.Group.Return.Notifications = true
config.Consumer.Offsets.CommitInterval = 1 * time.Second
config.Consumer.Offsets.Initial = sarama.OffsetNewest //初始从最新的offset开始
c, err := cluster.NewConsumer(strings.Split("localhost:9092", ","), groupID, strings.Split(topic, ","), config)
if err != nil {
panic(err)
return
}
defer c.Close()
go func(c *cluster.Consumer) {
errors := c.Errors()
noti := c.Notifications()
for {
select {
case err := <-errors:
panic(err)
case <-noti:
}
}
}(c)
for msg := range c.Messages() {
fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Value)
c.MarkOffset(msg, "") //MarkOffset 并不是实时写入kafka,有可能在程序crash时丢掉未提交的offset
}
客户端可以用来获取消费者和生产者,还可以获取kafka的broker
信息和topic
信息,以及每个topic中的offset等。
config := sarama.NewConfig()
config.Version = sarama.V0_10_0_0
client, err := sarama.NewClient([]string{"IP:9092", "IP:9092", "IP:9092"}, config)
if err != nil {
panic("client create error")
}
defer client.Close()
//获取主题的名称集合
topics, err := client.Topics()
if err != nil {
panic("get topics err")
}
for _, e := range topics {
fmt.Println(e)
}
//获取broker集合
brokers := client.Brokers()
//输出每个机器的地址
for _, broker := range brokers {
fmt.Println(broker.Addr())
}
config
结构体配置包括消费者、生产者、客户端等配置,需要用到哪个配置,指定哪个配置即可。
config := sarama.NewConfig()
c.Net.MaxOpenRequests = 5
c.Net.DialTimeout = 30 * time.Second
c.Net.ReadTimeout = 30 * time.Second
c.Net.WriteTimeout = 30 * time.Second
c.Net.SASL.Handshake = true
c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond
c.Metadata.RefreshFrequency = 10 * time.Minute
c.Metadata.Full = true
c.Producer.MaxMessageBytes = 1000000
c.Producer.RequiredAcks = WaitForLocal
c.Producer.Timeout = 10 * time.Second
c.Producer.Partitioner = NewHashPartitioner //选择分区的分区选择器.用于选择主题的分区
c.Producer.Retry.Max = 3 //重试次数
c.Producer.Retry.Backoff = 100 * time.Millisecond
c.Producer.Return.Errors = true //是否接收返回的错误消息,当发生错误时会放到Error这个通道中.从它里面获取错误消息
//抓取数据的大小设置
c.Consumer.Fetch.Min = 1
c.Consumer.Fetch.Default = 32768
c.Consumer.Retry.Backoff = 2 * time.Second //失败后再次尝试的间隔时间
c.Consumer.MaxWaitTime = 250 * time.Millisecond //最大等待时间
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
c.Consumer.Return.Errors = false //是否接收返回的错误消息,当发生错误时会放到Error这个通道中.从它里面获取错误消息
c.Consumer.Offsets.CommitInterval = 1 * time.Second // 提交跟新Offset的频率
c.Consumer.Offsets.Initial = OffsetNewest // 指定Offset,也就是从哪里获取消息,默认时从主题的开始获取.
c.ClientID = defaultClientID
c.ChannelBufferSize = 256 //通道缓存大小
c.Version = minVersion //指定kafka版本,不指定,使用最小版本,高版本的新功能可能无法正常使用.
c.MetricRegistry = metrics.NewRegistry()
在多个分区存在的情况下,分区选择决定将消息发送到哪个分区。
sarama
有多个分割器:
sarama.NewManualPartitioner() //返回一个手动选择分区的分割器,也就是获取msg中指定的`partition`
sarama.NewRandomPartitioner() //通过随机函数随机获取一个分区号
sarama.NewRoundRobinPartitioner() //环形选择,也就是在所有分区中循环选择一个
sarama.NewHashPartitioner() //通过msg中的key生成hash值,选择分区,
ProducerMessage
Topic string // kafka 主题
Key Encoder //用于选择分区,和分割器的NewHashPartitioner联合使用,决定当前消息被保存在哪个分区
Value Encoder //消息的内容.
Headers []RecordHeader //在生产者和消费者之间传递的键值对,
Metadata interface{} //sarama 用于传递数据使用
//下面的内容有生产者返回后的内容填充.
Offset int64 // 返回新发布的消息的偏移量
Partition int32 //返回的信息的保存分区
Timestamp time.Time //保存在服务端的消息时间
retries int
flags flagSet
ConsumerMessage
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Key, Value []byte //key和保存的值
Topic string //要消费的主题
Partition int32 //要消费的分区
Offset int64 //要消费的消息的位置,从哪里开始消费,最开始的,还是最后的
Timestamp time.Time // only set if kafka is version 0.10+, 内部时间
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
Headers []*RecordHeader // only set if kafka is version 0.11+
}
package main
import (
"fmt"
"github.com/Shopify/sarama"
"time"
)
func main() {
var address = []string{"127.0.0.1:9092"}
var topic string = "test"
asyncProducer(address, topic)
time.Sleep(3*time.Second)
}
// 异步生产者
func asyncProducer(address []string, topic string) {
config := sarama.NewConfig()
// 等待服务器所有副本都保存成功后,再返回响应
config.Producer.RequiredAcks = sarama.WaitForAll
// 随机向partition发送消息
config.Producer.Partitioner = sarama.NewRandomPartitioner
// 是否等待成功和失败后的响应,只有上面的RequireAcks设置不是NoResponse,这里才有用。
config.Producer.Return.Successes = true
config.Producer.Return.Errors = true
// 设置读写超时时间为2秒,默认为10秒
config.Producer.Timeout = 2 * time.Second
// 尝试发送消息最大次数
config.Producer.Retry.Max = 3
// 设置使用的kafka版本,如果低于V0_10_0_0版本,消息中的timestamp没有作用。需要消费者和生产者同时配置。
// 注意,如果设置了版本,但设置不对,则kafka会返回很奇怪的错误,并且无法成功发送消息。
//config.Version = sarama.V0_10_0_1
fmt.Println("start to make a producer")
// 使用配置,新建一个异步生产者
producer, e := sarama.NewAsyncProducer(address, config)
if e != nil {
fmt.Println("fail to make a producer, error=", e)
return
}
defer producer.AsyncClose()
// 循环判断哪个通道发送过来数据。
fmt.Println("start goroutine to get response")
go func(p sarama.AsyncProducer) {
for {
select {
case suc := <-p.Successes():
if suc != nil {
fmt.Printf("succeed, offset=%d, timestamp=%s, partitions=%d\n", suc.Offset, suc.Timestamp.String(), suc.Partition)
//fmt.Println("offset: ", suc.Offset, "timestamp: ", suc.Timestamp.String(), "partitions: ", suc.Partition)
}
case fail := <-p.Errors():
if fail != nil {
fmt.Printf("error= %v\n", fail.Err)
}
}
}
}(producer)
// 发送消息
strKey := "key"
srcValue := "async: this is a message, index=%d"
for i := 0; i < 5; i++ {
time.Sleep(500 * time.Millisecond)
value := fmt.Sprintf(srcValue, i)
// 发送的消息对应的主题。
// 注意:这里的msg必须是新构建的变量。不然,发送过去的消息内容都是一样的,因为批次发送消息的关系。
msg := &sarama.ProducerMessage{
Topic: topic,
}
// 设置消息的key
msg.Key = sarama.StringEncoder(strKey)
// 设置消息的value,将字符串转化为字节数组
msg.Value = sarama.ByteEncoder(value)
//fmt.Println(value)
// 使用通道发送
producer.Input() <- msg
}
}
package main
import (
"github.com/Shopify/sarama"
"time"
"log"
"fmt"
"os"
)
func main() {
var address = []string{"127.0.0.1:9092"}
topic := "test"
syncProducer(address, topic)
time.Sleep(2*time.Second)
}
// 同步生产消息模式
func syncProducer(address []string, topic string) {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
config.Producer.Timeout = 3 * time.Second
p, err := sarama.NewSyncProducer(address, config)
if err != nil {
log.Printf("sarama.NewSyncProducer err, message=%s \n", err)
return
}
defer p.Close()
strKey := "key"
srcValue := "sync: this is a message, index=%d"
for i:=0; i<5; i++ {
value := fmt.Sprintf(srcValue, i)
msg := &sarama.ProducerMessage{
Key:sarama.StringEncoder(strKey),
Topic:topic,
Value:sarama.ByteEncoder(value),
}
part, offset, err := p.SendMessage(msg)
if err != nil {
log.Printf("send message(%s) err=%v \n", value, err)
}else {
fmt.Fprintf(os.Stdout, value + "发送成功, partition=%d, offset=%d \n", part, offset)
}
}
}
package main
import (
"fmt"
"github.com/Shopify/sarama"
"github.com/bsm/sarama-cluster"
"log"
"os"
"os/signal"
"sync"
"syscall"
)
func main() {
var Address = []string{"127.0.0.1:9092"}
topic := []string{"test"}
var wg = &sync.WaitGroup{}
wg.Add(2)
//广播式消费:消费者1
go clusterConsumer(wg, Address, topic, "group-1")
//广播式消费:消费者2
go clusterConsumer(wg, Address, topic, "group-2")
wg.Wait()
}
// 支持brokers cluster的消费者
func clusterConsumer(wg *sync.WaitGroup, brokers, topics []string, groupId string) {
defer wg.Done()
config := cluster.NewConfig()
config.Consumer.Return.Errors = true
config.Group.Return.Notifications = true
config.Consumer.Offsets.Initial = sarama.OffsetNewest
// init consumer
consumer, err := cluster.NewConsumer(brokers, groupId, topics, config)
if err != nil {
log.Printf("%s: sarama-cluster.NewConsumer err, message=%s \n", groupId, err)
return
}
defer consumer.Close()
// trap SIGINT to trigger a shutdown
signals := make(chan os.Signal)
signal.Notify(signals,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT,
os.Interrupt,
os.Kill,
)
// consume errors
go func() {
for err := range consumer.Errors() {
log.Printf("groupId=%s, Error= %s\n", groupId, err.Error())
}
}()
// consume notifications
go func() {
for ntf := range consumer.Notifications() {
log.Printf("groupId=%s, Rebalanced Info= %+v \n", groupId, ntf)
}
}()
// consume messages, watch signals
var successes int
Loop:
for {
select {
case msg, ok := <-consumer.Messages():
if ok {
fmt.Fprintf(os.Stdout, "groupId=%s, topic=%s, partion=%d, offset=%d, key=%s, value=%s\n", groupId, msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
consumer.MarkOffset(msg, "") // mark message as processed
successes++
}
case <-signals:
break Loop
}
}
fmt.Fprintf(os.Stdout, "%s consume %d messages \n", groupId, successes)
}
参考链接:
http://pastebin.com/9ZsnP2eU
https://github.com/Shopify/sarama
https://github.com/bsm/sarama-cluste