安装环境:ubuntu20.04;网络-桥接模式;已安装docker-compose
本文搭建集群为无认证方式
2.创建docker-compose.yaml
version: '2'
services:
zookeeper:
image: wurstmeister/zookeeper
container_name: zookeeper
restart: always
ports:
- "2181:2181"
kafka1:
image: wurstmeister/kafka
container_name: kafka1
restart: always
ports:
- "9092:9092"
environment:
- KAFKA_BROKER_ID=0
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://虚拟机ip:9092
- KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092
kafka2:
image: wurstmeister/kafka
container_name: kafka2
restart: always
ports:
- "9093:9093"
environment:
- KAFKA_BROKER_ID=1
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://虚拟机ip:9093
- KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9093
kafka3:
image: wurstmeister/kafka
container_name: kafka3
restart: always
ports:
- "9094:9094"
environment:
- KAFKA_BROKER_ID=2
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://虚拟机ip:9094
- KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9094
参数说明:
/opt/kafka/config目录存放各种配置文件
cd /opt/kafka/bin/
创建名称为first的topic,3个分区,2个副本 注意:副本数不能超过brokers数(分区是可以超过的),否则会创建失败。
./kafka-topics.sh --create --topic first --zookeeper zookeeper:2181 --partitions 3 --replication-factor 2
查看 topic 列表(zookeeper:2181尽量与compose文件中一直致)
./kafka-topics.sh --list --zookeeper zookeeper:2181
查看 topic 为 first 的详情
./kafka-topics.sh --describe --topic first --zookeeper zookeeper:2181
说明:
创建一个生产者,向 topic 中发送消息
./kafka-console-producer.sh --topic first --broker-list 虚拟机ip:9092
录到 kafka2 或者 kafka3 容器内,然后创建一个消费者,接收 topic 中的消息
./kafka-console-consumer.sh --topic first --bootstrap-server 虚拟机ip:9092 --from-beginning
注意:--from-beginning表示从最开始读消息,不加该参数则根据最大offset读(从最新消息开始读取)
其他命令
删除topic,删除topic,不会立马删除,而是会先给该topic打一个标记。在/data/kafka1/data下可以看到:
./kafka-topics.sh --delete --topic first --zookeeper zookeeper:2181
查看某个topic对应的消息数量
./kafka-run-class.sh kafka.tools.GetOffsetShell --topic second --time -1 --broker-list 虚拟机ip:9092
查看所有消费者组
./kafka-consumer-groups.sh --bootstrap-server 虚拟机ip:9092 --list
修改kafka对应topic分区数(只能增加,不能减少)
./kafka-topics.sh --alter --topic al-test --partitions 2 --zookeeper zookeeper:2181
消费者与生产者
package main
import (
"fmt"
"github.com/Shopify/sarama"
)
// 基于sarama第三方库开发的kafka client
func main() {
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll // 发送完数据需要leader和follow都确认
config.Producer.Partitioner = sarama.NewRandomPartitioner // 新选出一个partition
config.Producer.Return.Successes = true // 成功交付的消息将在success channel返回
// 构造一个消息
msg := &sarama.ProducerMessage{}
msg.Topic = "first"
msg.Value = sarama.StringEncoder("this is a test")
// 连接kafka
client, err := sarama.NewSyncProducer([]string{"192.168.123.123:9092", "192.168.123.123:9093", "192.168.123.123:9094"}, config)
if err != nil {
fmt.Println("producer closed, err:", err)
return
}
defer client.Close()
// 发送消息
pid, offset, err := client.SendMessage(msg)
if err != nil {
fmt.Println("send msg failed, err:", err)
return
}
fmt.Printf("pid:%v offset:%v\n", pid, offset)
}
package main
import (
"fmt"
"github.com/Shopify/sarama"
"sync"
)
var wg sync.WaitGroup
func main() {
consumer, err := sarama.NewConsumer([]string{"192.168.123.123:9092", "192.168.123.123:9093", "192.168.123.123:9094"}, nil)
if err != nil {
fmt.Println("consumersync connect err:", err)
return
}
defer consumer.Close()
//获取 kafka 主题
partitions, err := consumer.Partitions("first")
if err != nil {
fmt.Println("get partitions failed, err:", err)
return
}
for _, p := range partitions {
//sarama.OffsetNewest:从当前的偏移量开始消费,sarama.OffsetOldest:从最老的偏移量开始消费
partitionConsumer, err := consumer.ConsumePartition("asd", p, sarama.OffsetOldest)
if err != nil {
fmt.Println("partitionConsumer err:", err)
continue
}
wg.Add(1)
go func() {
for m := range partitionConsumer.Messages() {
fmt.Printf("key: %s, text: %s, offset: %d\n", string(m.Key), string(m.Value), m.Offset)
}
wg.Done()
}()
}
wg.Wait()
}