How can we create a configuration for gobench with -benchmem – IDEs Support (IntelliJ Platform) | JetBrains
本机进行watermill-benchmark
使用 apifox 自动化测试上报固定数量的消息
启动watermill-pub/sub的 benchmark 函数
func BenchmarkPubSub(b *testing.B) {
for i := 0; i < 1; i++ {
b.StopTimer()
logger := watermillzap.NewLogger(log.Logger)
publisher, subscriber := consumer.NewPubSub(logger)
router, err := message.NewRouter(message.RouterConfig{}, logger)
if err != nil {
log.Logger.Fatal("create router error", zap.Error(err))
}
router.AddPlugin(plugin.SignalsHandler)
router.AddMiddleware(
middleware.InstantAck,
middleware.Recoverer,
)
router.AddMiddleware(consumer.UnpackKafkaMessage, consumer.InitPerformanceEvent, consumer.AnalyzeEvent)
router.AddHandler("crash", "to_analyzer__0.PERF_CRASH", subscriber, "solar-dev.PERF_CRASH", publisher, consumer.CrashHandler)
router.AddHandler("lag", "to_analyzer__0.PERF_LAG", subscriber, "solar-dev.PERF_LAG", publisher, consumer.LagHandler)
go func() {
for {
if kafka.PublishCount >= 10000 && router.IsRunning() {
err = router.Close()
fmt.Printf("router close err:%v\n", err)
break
}
}
}()
b.StartTimer()
if err = router.Run(context.Background()); err != nil {
log.Logger.Error("router run error", zap.Error(err))
}
}
}
单 topic
Event number | ms | ns/op | B/op | allocs/op |
---|---|---|---|---|
crash 10 | 698 | 0.01306 | 0 | 0 |
crash 100 | 701 | 0.01373 | 0 | 0 |
crash 1000 | 1143 | 1164375900 | 131129392 | 815484 |
crash 10000 | 6174 | 6160307300 | 1300089088 | 8179332 |
双 topic
Event number | ms | ns/op | B/op | allocs/op |
---|---|---|---|---|
crash & lag 10 | 689 | 0.01486 | 0 | 0 |
crash & lag 100 | 718 | 0.01438 | 0 | 0 |
crash & lag 1000 | 1661 | 1677269200 | 269949888 | 1797754 |
crash & lag 10000 | 11697 | 11573685900 | 2684430704 | 17945041 |
profile/internal/watermill/pubsub/consumer_stage.go
// crashHandler
// @Description
// @Author xzx 2023-08-12 15:09:15
// @Param msg
// @Return []*message.Message
// @Return error
func crashHandler(msg *message.Message) ([]*message.Message, error) {
data := GetDataFromMsg(msg)
writeKafkaCtx, span := otelclient.ConsumerTracer.Start(data.RootSpanCtx, "crashHandler",
trace.WithSpanKind(trace.SpanKindProducer))
setSpanAttributes(span, data)
data.WriteKafkaSpan = span
toWriteBytes, contextErr := json.Marshal(data.Event)
if contextErr != nil {
data.Status = state.StatusUnmarshalError
handlerErr(writeKafkaCtx, "marshal error", contextErr)
data.WriteKafkaSpan.End()
data.RootSpan.End()
return nil, contextErr
}
msg = message.NewMessage(data.Event.BackendID, toWriteBytes)
msg.Metadata.Set(watermillkafka.HeaderKey, data.Event.ID)
log.Logger.Info("[4-crashHandler]", zap.String("topic", connector.GetTopic(data.Event.Category)), zap.String("id", data.Event.ID), zap.String("msg", string(toWriteBytes)))
SetDataInMsg(msg, data)
return message.Messages{msg}, nil
}
profile/internal/watermill/watermillkafka/publisher.go
// Publish publishes message to Kafka.
//
// Publish is blocking and wait for ack from Kafka.
// When one of messages delivery fails - function is interrupted.
func (p *Publisher) Publish(topic string, msgs ...*message.Message) error {
if p.closed {
return errors.New("publisher closed")
}
logFields := make(watermill.LogFields, 4)
logFields["topic"] = topic
for _, msg := range msgs {
logFields["message_uuid"] = msg.UUID
p.logger.Trace("Sending message to Kafka", logFields)
kafkaMsg, err := p.config.Marshaler.Marshal(topic, msg)
if err != nil {
return errors.Wrapf(err, "cannot marshal message %s", msg.UUID)
}
// todo: add otel-trace and log about sendMessage
data := pubsub.GetDataFromMsg(msg)
partition, offset, err := p.producer.SendMessage(kafkaMsg)
if err != nil {
log.Logger.ErrorContext(msg.Context(), "send message to kafka error", zap.Error(err))
data.WriteKafkaSpan.End()
data.RootSpan.End()
return errors.Wrapf(err, "cannot produce message %s", msg.UUID)
}
log.Logger.Info("[4-WriteKafka] write kafka success",
zap.String("topic", connector.GetTopic(data.Event.Category)),
zap.String("id", data.Event.ID), zap.Any("msg", data.Event),
zap.String("profile_root_span_id", data.RootSpan.SpanContext().SpanID().String()))
data.WriteKafkaSpan.End()
logFields["kafka_partition"] = partition
logFields["kafka_partition_offset"] = offset
p.logger.Trace("Message sent to Kafka", logFields)
data.RootSpan.End()
}
return nil
}