上一篇: Go消息中间件Nsq系列(四)------apps/nsq_to_file源码阅读
1. Topic/Channel
Topic/Channel是发布/订阅模型的一种实现。Topic对应于发布,Channel对应于订阅。
Channel是消费者订阅特定Topic的一种抽象。对于发往Topic的消息,nsqd向该Topic下的所有Channel投递消息,而同一个Channel只投递一次,Channel下如果存在多个消费者,则随机选择一个消费者做投递。这种投递方式可以被用作消费者负载均衡。
Channel本身在投递消息给消费者时维护两个队列,一个是inFlight队列,该队列存储正在投递,但还没被标记为投递成功的消息。另一个是deferred队列,用来存储需要被延时投递的消息。
inFlight队列中消息可能因为投递超时而失败,deferred队列中的消息需要在到达指定时间后进行重新投递。如果为两个队列中的每个消息都分别指定定时器,无疑是非常消耗资源的。因此nsq采用定时扫描队列的做法。
在扫描时采用多个worker分别处理。这种类似多线程的处理方式提高了处理效率。nsq在扫描策略上使用了Redis的probabilistic expiration算法,同时动态调整worker的数量,这些优化平衡了效率和资源占用.
Channel从属于特定Topic,可以认为是Topic的下一级。在同一个Topic之下可以有零个或多个Channel。
和Topic一样,Channel同样有永久和临时之分,永久的Channel只能通过显式删除销毁,临时的Channel在最后一个消费者断开连接的时候被销毁。
2. 通过此次nsqd程序源码阅读, 可以学习到
- sync.Once ,sync.WaitGroup的使用
- http handlerFunc Decorator的封装, 业务逻辑,统计日志,版本或其他处理. 感受一下go的函数编程 在http_api/api_response.go
- atomic.Values,
- nsqd.Main( ) exitFunc() 对错误的处理
- runtime.ReadMemStats() 获取内存统计信息 stats.go
- 等等
3. apps/nsqd程序入口, nsqd目录为主要实现
负责接收消息,存储消息,分发消息给客户端,nsqd可以单独部署,也可以多节点部署,主要监听了两个端口,一个用来服务客户端(4150),一个用来提供api服务(4151).当然也可以配置监听https(4152)端口
/*
type Service interface {
// Init is called before the program/service is started and after it's
// determined if the program is running as a Windows Service. This method must
// be non-blocking.
Init(Environment) error
// Start is called after Init. This method must be non-blocking.
Start() error
// Stop is called in response to syscall.SIGINT, syscall.SIGTERM, or when a
// Windows Service is stopped.
Stop() error
{
*/
// 封装了nsqd程序, 实现 上述go-svc/Service 接口
type program struct {
once sync.Once
nsqd *nsqd.NSQD
}
func main() {
prg := &program{}
// 使用了go-svc兼容包,
// 先执行prg.Init() 方法, 然后执行prg.Start(), 错误或者结束执行Stop()方法
if err := svc.Run(prg, syscall.SIGINT, syscall.SIGTERM); err != nil {
logFatal("%s", err)
}
}
func (p *program) Init(env svc.Environment) error {
if env.IsWindowsService() {
dir := filepath.Dir(os.Args[0])
return os.Chdir(dir)
}
return nil
}
func (p *program) Start() error {
opts := nsqd.NewOptions()
flagSet := nsqdFlagSet(opts)
// ... 省略
// 根据命令行,配置参数,默认参数从高到低解析值,Options
options.Resolve(opts, flagSet, cfg)
nsqd, err := nsqd.New(opts)
p.nsqd = nsqd
// 解析已序列化的元数据
err = p.nsqd.LoadMetadata()
// 序列化保存最新的元数据
err = p.nsqd.PersistMetadata()
go func() {
// nsqd 主程序入口
// 启动必要的 TCPServer,HTTPServer , queueScanLoop, lookupLoop
// 可选 根据配置开启 HTTPServer, statsdLoop
err := p.nsqd.Main()
if err != nil {
p.Stop()
os.Exit(1)
}
}()
return nil
}
func (p *program) Stop() error {
p.once.Do(func() {
// 关闭所有服务监听程序, http tcp https
// 然后调用PersistMetadata()保存最新的元数据
// 关闭topics, channel
// 所有清理工作完成后, 结束程序
p.nsqd.Exit()
})
return nil
}
// 1. Command line flag
// 2. Deprecated command line flag
// 3. Config file value
// 4. Get() value (if Getter)
// 5. Options struct default value
nsqd程序从main()函数入口, 最重要的就是program.Start()方法,该函数主要功能:
- 根据命令行,配置参数,默认参数从高到低解析值,Options
2. 执行nsqd.LoadMetadata()
函数作用从datapath/nsq.data 下加载元数据(topics,channels)数据,
如果没有之前保存的数据信息, 则视为全新的启动,如果正在加载元数组, 则不会通过lookup去获取对应的channels, 而是根据读取的元数据创建channel,其中 topic 会保存到topicsMap中, channel会保存到channelsMap中,
如果topic标记为暂停的, 则该topics会在messagePump中一直阻塞等待,.如果channel标记为暂停的, 则监听该channel的consumer消费者将会停止消费消息, 最后调用topics启动方法, 异步messagePump()中会收到启动消息,取消之前阻塞状态
执行nsqd.PersistMetadata(),
封装topics, channels,version数据到map中,然后序列化成json字符串, 然后写入tmp临时文件,异步阻塞等待io完成通知也就是file.Sync(),调用的,syscall.Fsync 然后将临时文件覆盖datapath/nsq.data
- 异步执行nsqd.Main(),最终的程序入口
nsq.Main()方法
func (n *NSQD) Main() error {
ctx := &context{n}
exitCh := make(chan error)
var once sync.Once
exitFunc := func(err error) {
once.Do(func() {
if err != nil {
n.logf(LOG_FATAL, "%s", err)
}
exitCh <- err
})
}
// 启动必要的 TCPServer,HTTPServer , queueScanLoop, lookupLoop
// 可选 根据配置开启 HTTPServer, statsdLoop
// 该设计, 通过protocol.TCPServer启动tcp服务, 不断接受新客户端链接 accept()
// 在通过tcpServer 实现handler接口, 通过获取协议标识头进行版本判断, 比如V1, V2, 这样在进行协议升级等降低耦合性,代码简单维护可读性强
// 各版本协议处理在独立的文件编写, 比如protocol_v1.go protocol_v2.go
tcpServer := &tcpServer{ctx: ctx}
n.waitGroup.Wrap(func() {
exitFunc(protocol.TCPServer(n.tcpListener, tcpServer, n.logf))
})
httpServer := newHTTPServer(ctx, false, n.getOpts().TLSRequired == TLSRequired)
n.waitGroup.Wrap(func() {
exitFunc(http_api.Serve(n.httpListener, httpServer, "HTTP", n.logf))
})
if n.tlsConfig != nil && n.getOpts().HTTPSAddress != "" {
httpsServer := newHTTPServer(ctx, true, true)
n.waitGroup.Wrap(func() {
exitFunc(http_api.Serve(n.httpsListener, httpsServer, "HTTPS", n.logf))
})
}
n.waitGroup.Wrap(n.queueScanLoop)
n.waitGroup.Wrap(n.lookupLoop)
if n.getOpts().StatsdAddress != "" {
n.waitGroup.Wrap(n.statsdLoop)
}
err := <-exitCh
return err
}
nsqd.Main()方法主要启动了4个goroutine, https是可选的配置是否开启
- tcpServer 启动tcp服务,一直阻塞等待客户端链接Accept(),然后在开启新的goroutine handler去处理客户端链接, 读取流取4字节判断协议版本号,然后执行prot.IOLoop(clientConn), 在IOLoop()方法中,会处理接受客户端传输的指令,同时也会启动一个messagePump goroutine负责给客户端发送心跳包,发送消息等
func (p *protocolV2) IOLoop(conn net.Conn) error {
// 同步启动messagePump, 使用异步阻塞 保证客户端其他属性初始化成功
messagePumpStartedChan := make(chan bool)
go p.messagePump(client, messagePumpStartedChan)
<-messagePumpStartedChan
for {
if client.HeartbeatInterval > 0 {
client.SetReadDeadline(time.Now().Add(client.HeartbeatInterval * 2))
} else {
client.SetReadDeadline(zeroTime)
}
// ReadSlice does not allocate new space for the data each request
// ie. the returned slice is only valid until the next call to it
line, err = client.Reader.ReadSlice('\n')
if err != nil {
if err == io.EOF {
err = nil
} else {
err = fmt.Errorf("failed to read command - %s", err)
}
break
}
// trim the '\n'
line = line[:len(line)-1]
// optionally trim the '\r'
if len(line) > 0 && line[len(line)-1] == '\r' {
line = line[:len(line)-1]
}
params := bytes.Split(line, separatorBytes)
p.ctx.nsqd.logf(LOG_DEBUG, "PROTOCOL(V2): [%s] %s", client, params)
// 处理协议指令
var response []byte
response, err = p.Exec(client, params)
// 错误返回
if err != nil {
// ... 省略
sendErr := p.Send(client, frameTypeError, []byte(err.Error()))
// ... 省略
}
if response != nil {
err = p.Send(client, frameTypeResponse, response)
// ... 省略
}
}
}
// 处理客户端命令
func (p *protocolV2) Exec(client *clientV2, params [][]byte) ([]byte, error) {
if bytes.Equal(params[0], []byte("IDENTIFY")) {
return p.IDENTIFY(client, params)
}
err := enforceTLSPolicy(client, p, params[0])
if err != nil {
return nil, err
}
switch {
case bytes.Equal(params[0], []byte("FIN")):
return p.FIN(client, params)
case bytes.Equal(params[0], []byte("RDY")):
return p.RDY(client, params)
case bytes.Equal(params[0], []byte("REQ")):
return p.REQ(client, params)
case bytes.Equal(params[0], []byte("PUB")):
return p.PUB(client, params)
case bytes.Equal(params[0], []byte("MPUB")):
return p.MPUB(client, params)
case bytes.Equal(params[0], []byte("DPUB")):
return p.DPUB(client, params)
case bytes.Equal(params[0], []byte("NOP")):
return p.NOP(client, params)
case bytes.Equal(params[0], []byte("TOUCH")):
return p.TOUCH(client, params)
case bytes.Equal(params[0], []byte("SUB")):
return p.SUB(client, params)
case bytes.Equal(params[0], []byte("CLS")):
return p.CLS(client, params)
case bytes.Equal(params[0], []byte("AUTH")):
return p.AUTH(client, params)
}
return nil, protocol.NewFatalClientErr(nil, "E_INVALID", fmt.Sprintf("invalid command %s", params[0]))
}
- newHTTPServer 启动api服务,发布消息, 查看统计信息, topic,channel增删暂停清空,配置更新,调试性能分析等api
func newHTTPServer(ctx *context, tlsEnabled bool, tlsRequired bool) *httpServer {
log := http_api.Log(ctx.nsqd.logf)
router := httprouter.New()
router.HandleMethodNotAllowed = true
router.PanicHandler = http_api.LogPanicHandler(ctx.nsqd.logf)
router.NotFound = http_api.LogNotFoundHandler(ctx.nsqd.logf)
router.MethodNotAllowed = http_api.LogMethodNotAllowedHandler(ctx.nsqd.logf)
s := &httpServer{
ctx: ctx,
tlsEnabled: tlsEnabled,
tlsRequired: tlsRequired,
router: router,
}
// 使用httprouter web框架
// decorate wrapper方式处理path
// 将业务逻辑, 统计信息 , 版本输出等其他信息分离
router.Handle("GET", "/ping", http_api.Decorate(s.pingHandler, log, http_api.PlainText))
router.Handle("GET", "/info", http_api.Decorate(s.doInfo, log, http_api.V1))
// v1 negotiate
router.Handle("POST", "/pub", http_api.Decorate(s.doPUB, http_api.V1))
router.Handle("POST", "/mpub", http_api.Decorate(s.doMPUB, http_api.V1))
router.Handle("GET", "/stats", http_api.Decorate(s.doStats, log, http_api.V1))
// only v1
router.Handle("POST", "/topic/create", http_api.Decorate(s.doCreateTopic, log, http_api.V1))
router.Handle("POST", "/topic/delete", http_api.Decorate(s.doDeleteTopic, log, http_api.V1))
router.Handle("POST", "/topic/empty", http_api.Decorate(s.doEmptyTopic, log, http_api.V1))
router.Handle("POST", "/topic/pause", http_api.Decorate(s.doPauseTopic, log, http_api.V1))
router.Handle("POST", "/topic/unpause", http_api.Decorate(s.doPauseTopic, log, http_api.V1))
router.Handle("POST", "/channel/create", http_api.Decorate(s.doCreateChannel, log, http_api.V1))
router.Handle("POST", "/channel/delete", http_api.Decorate(s.doDeleteChannel, log, http_api.V1))
router.Handle("POST", "/channel/empty", http_api.Decorate(s.doEmptyChannel, log, http_api.V1))
router.Handle("POST", "/channel/pause", http_api.Decorate(s.doPauseChannel, log, http_api.V1))
router.Handle("POST", "/channel/unpause", http_api.Decorate(s.doPauseChannel, log, http_api.V1))
router.Handle("GET", "/config/:opt", http_api.Decorate(s.doConfig, log, http_api.V1))
router.Handle("PUT", "/config/:opt", http_api.Decorate(s.doConfig, log, http_api.V1))
// debug
router.HandlerFunc("GET", "/debug/pprof/", pprof.Index)
router.HandlerFunc("GET", "/debug/pprof/cmdline", pprof.Cmdline)
router.HandlerFunc("GET", "/debug/pprof/symbol", pprof.Symbol)
router.HandlerFunc("POST", "/debug/pprof/symbol", pprof.Symbol)
router.HandlerFunc("GET", "/debug/pprof/profile", pprof.Profile)
router.Handler("GET", "/debug/pprof/heap", pprof.Handler("heap"))
router.Handler("GET", "/debug/pprof/goroutine", pprof.Handler("goroutine"))
router.Handler("GET", "/debug/pprof/block", pprof.Handler("block"))
router.Handle("PUT", "/debug/setblockrate", http_api.Decorate(setBlockRateHandler, log, http_api.PlainText))
router.Handler("GET", "/debug/pprof/threadcreate", pprof.Handler("threadcreate"))
return s
}
- queueScanLoop 主要两个功能:
3.1 定时 默认100ms, 随机获取不超过QueueScanSelectionCount(默认值20)个channel通知queueScanWorker()进行处理InFlightQueue(正在进行)和DeferredQueue(延时)消息,标记为脏数据,重新压入队列,
3.2 定时 默认5s, 根据channel size调整queueScanWorker() goroutine大小, 或增大 或缩减
// resizePool adjusts the size of the pool of queueScanWorker goroutines
//
// 1 <= pool <= min(num * 0.25, QueueScanWorkerPoolMax)
//
func (n *NSQD) resizePool(num int, workCh chan *Channel, responseCh chan bool, closeCh chan int) {
idealPoolSize := int(float64(num) * 0.25)
if idealPoolSize < 1 {
idealPoolSize = 1
} else if idealPoolSize > n.getOpts().QueueScanWorkerPoolMax {
idealPoolSize = n.getOpts().QueueScanWorkerPoolMax
}
for {
if idealPoolSize == n.poolSize {
break
} else if idealPoolSize < n.poolSize {
// contract
closeCh <- 1
n.poolSize--
} else {
// expand
n.waitGroup.Wrap(func() {
n.queueScanWorker(workCh, responseCh, closeCh)
})
n.poolSize++
}
}
}
// queueScanWorker receives work (in the form of a channel) from queueScanLoop
// and processes the deferred and in-flight queues
func (n *NSQD) queueScanWorker(workCh chan *Channel, responseCh chan bool, closeCh chan int) {
for {
select {
case c := <-workCh:
now := time.Now().UnixNano()
dirty := false
// 处理正在投递的消息
if c.processInFlightQueue(now) {
dirty = true
}
// 处理延时消息
if c.processDeferredQueue(now) {
dirty = true
}
// 反馈给queueScanLoop计算比例
responseCh <- dirty
case <-closeCh:
return
}
}
}
func (n *NSQD) queueScanLoop() {
workCh := make(chan *Channel, n.getOpts().QueueScanSelectionCount)
responseCh := make(chan bool, n.getOpts().QueueScanSelectionCount)
closeCh := make(chan int)
workTicker := time.NewTicker(n.getOpts().QueueScanInterval)
refreshTicker := time.NewTicker(n.getOpts().QueueScanRefreshInterval)
// 初始化 先调整一次
channels := n.channels()
n.resizePool(len(channels), workCh, responseCh, closeCh)
for {
select {
case <-workTicker.C: // 100ms定时
if len(channels) == 0 {
continue
}
case <-refreshTicker.C: // 5s 定时 调整workpool size
channels = n.channels()
n.resizePool(len(channels), workCh, responseCh, closeCh)
continue
case <-n.exitChan:
goto exit
}
// 默认条目数为20
num := n.getOpts().QueueScanSelectionCount
if num > len(channels) {
num = len(channels)
}
loop:
// 获取随机 唯一 channel
for _, i := range util.UniqRands(num, len(channels)) {
workCh <- channels[i]
}
numDirty := 0
for i := 0; i < num; i++ {
if <-responseCh {
numDirty++
}
}
//比例超过25%则继续处理,
if float64(numDirty)/float64(num) > n.getOpts().QueueScanDirtyPercent {
goto loop
}
}
exit:
n.logf(LOG_INFO, "QUEUESCAN: closing")
close(closeCh)
workTicker.Stop()
refreshTicker.Stop()
}
- lookupLoop(), 功能是根据配置的lookupd(服务发现)地址定时 15s 去发送心跳维持连接, 在接收topic,channel,opts更新通知,
func (n *NSQD) lookupLoop() {
var lookupPeers []*lookupPeer
var lookupAddrs []string
// 标志是否需要连接 lookup地址
connect := true
hostname, err := os.Hostname()
if err != nil {
n.logf(LOG_FATAL, "failed to get hostname - %s", err)
os.Exit(1)
}
// for announcements, lookupd determines the host automatically
// 15秒 心跳 维持 与各lookup 通信
ticker := time.Tick(15 * time.Second)
for {
if connect {
// 获取配置传递进来lookup地址 进行连接, 并记录, 至connect标记为false
for _, host := range n.getOpts().NSQLookupdTCPAddresses {
if in(host, lookupAddrs) {
continue
}
n.logf(LOG_INFO, "LOOKUP(%s): adding peer", host)
lookupPeer := newLookupPeer(host, n.getOpts().MaxBodySize, n.logf,
connectCallback(n, hostname))
lookupPeer.Command(nil) // start the connection
lookupPeers = append(lookupPeers, lookupPeer)
lookupAddrs = append(lookupAddrs, host)
}
n.lookupPeers.Store(lookupPeers)
connect = false
}
select {
case <-ticker:
// 发送心跳并读取响应
// send a heartbeat and read a response (read detects closed conns)
for _, lookupPeer := range lookupPeers {
n.logf(LOG_DEBUG, "LOOKUPD(%s): sending heartbeat", lookupPeer)
cmd := nsq.Ping()
_, err := lookupPeer.Command(cmd)
if err != nil {
n.logf(LOG_ERROR, "LOOKUPD(%s): %s - %s", lookupPeer, cmd, err)
}
}
// 收到需要更新通知
case val := <-n.notifyChan:
var cmd *nsq.Command
var branch string
switch val.(type) {
case *Channel: // 新建或删除 channel
// notify all nsqlookupds that a new channel exists, or that it's removed
branch = "channel"
channel := val.(*Channel)
if channel.Exiting() == true {
cmd = nsq.UnRegister(channel.topicName, channel.name)
} else {
cmd = nsq.Register(channel.topicName, channel.name)
}
case *Topic: // 新建或删除 topic
// notify all nsqlookupds that a new topic exists, or that it's removed
branch = "topic"
topic := val.(*Topic)
if topic.Exiting() == true {
cmd = nsq.UnRegister(topic.name, "")
} else {
cmd = nsq.Register(topic.name, "")
}
}
// 前面组装命令, 在这里轮询发送
for _, lookupPeer := range lookupPeers {
n.logf(LOG_INFO, "LOOKUPD(%s): %s %s", lookupPeer, branch, cmd)
_, err := lookupPeer.Command(cmd)
if err != nil {
n.logf(LOG_ERROR, "LOOKUPD(%s): %s - %s", lookupPeer, cmd, err)
}
}
// lookupd配置地址改变更新通知
case <-n.optsNotificationChan:
var tmpPeers []*lookupPeer
var tmpAddrs []string
for _, lp := range lookupPeers {
if in(lp.addr, n.getOpts().NSQLookupdTCPAddresses) {
tmpPeers = append(tmpPeers, lp)
tmpAddrs = append(tmpAddrs, lp.addr)
continue
}
n.logf(LOG_INFO, "LOOKUP(%s): removing peer", lp)
lp.Close()
}
lookupPeers = tmpPeers
lookupAddrs = tmpAddrs
connect = true
case <-n.exitChan:
goto exit
}
}
exit:
n.logf(LOG_INFO, "LOOKUP: closing")
}
- statsdLoop 通过UDP来定期推送数据nsqd对消息的统计, 内存消耗等给statsd_address(配置的地址),可以使用Graphite+Grafana搭建更强大的监控
func (n *NSQD) statsdLoop() {
var lastMemStats memStats
var lastStats []TopicStats
// 60s 定时器
interval := n.getOpts().StatsdInterval
ticker := time.NewTicker(interval)
for {
select {
case <-n.exitChan: // 结束推送
goto exit
case <-ticker.C:
// 获取推送地址 前缀
addr := n.getOpts().StatsdAddress
prefix := n.getOpts().StatsdPrefix
// 使用udp推送
conn, err := net.DialTimeout("udp", addr, time.Second)
if err != nil {
n.logf(LOG_ERROR, "failed to create UDP socket to statsd(%s)", addr)
continue
}
sw := writers.NewSpreadWriter(conn, interval-time.Second, n.exitChan)
// StatsdUDPPacketSize: 508
bw := writers.NewBoundaryBufferedWriter(sw, n.getOpts().StatsdUDPPacketSize)
client := statsd.NewClient(bw, prefix)
n.logf(LOG_INFO, "STATSD: pushing stats to %s", addr)
// 获取所有的topics stats
stats := n.GetStats("", "", false)
for _, topic := range stats {
// try to find the topic in the last collection
lastTopic := TopicStats{}
for _, checkTopic := range lastStats {
if topic.TopicName == checkTopic.TopicName {
lastTopic = checkTopic
break
}
}
// topics信息
diff := topic.MessageCount - lastTopic.MessageCount
stat := fmt.Sprintf("topic.%s.message_count", topic.TopicName)
client.Incr(stat, int64(diff))
diff = topic.MessageBytes - lastTopic.MessageBytes
stat = fmt.Sprintf("topic.%s.message_bytes", topic.TopicName)
client.Incr(stat, int64(diff))
stat = fmt.Sprintf("topic.%s.depth", topic.TopicName)
client.Gauge(stat, topic.Depth)
stat = fmt.Sprintf("topic.%s.backend_depth", topic.TopicName)
client.Gauge(stat, topic.BackendDepth)
for _, item := range topic.E2eProcessingLatency.Percentiles {
stat = fmt.Sprintf("topic.%s.e2e_processing_latency_%.0f", topic.TopicName, item["quantile"]*100.0)
// We can cast the value to int64 since a value of 1 is the
// minimum resolution we will have, so there is no loss of
// accuracy
client.Gauge(stat, int64(item["value"]))
}
// channel 统计信息
for _, channel := range topic.Channels {
// try to find the channel in the last collection
lastChannel := ChannelStats{}
for _, checkChannel := range lastTopic.Channels {
if channel.ChannelName == checkChannel.ChannelName {
lastChannel = checkChannel
break
}
}
diff := channel.MessageCount - lastChannel.MessageCount
stat := fmt.Sprintf("topic.%s.channel.%s.message_count", topic.TopicName, channel.ChannelName)
client.Incr(stat, int64(diff))
stat = fmt.Sprintf("topic.%s.channel.%s.depth", topic.TopicName, channel.ChannelName)
client.Gauge(stat, channel.Depth)
stat = fmt.Sprintf("topic.%s.channel.%s.backend_depth", topic.TopicName, channel.ChannelName)
client.Gauge(stat, channel.BackendDepth)
stat = fmt.Sprintf("topic.%s.channel.%s.in_flight_count", topic.TopicName, channel.ChannelName)
client.Gauge(stat, int64(channel.InFlightCount))
stat = fmt.Sprintf("topic.%s.channel.%s.deferred_count", topic.TopicName, channel.ChannelName)
client.Gauge(stat, int64(channel.DeferredCount))
diff = channel.RequeueCount - lastChannel.RequeueCount
stat = fmt.Sprintf("topic.%s.channel.%s.requeue_count", topic.TopicName, channel.ChannelName)
client.Incr(stat, int64(diff))
diff = channel.TimeoutCount - lastChannel.TimeoutCount
stat = fmt.Sprintf("topic.%s.channel.%s.timeout_count", topic.TopicName, channel.ChannelName)
client.Incr(stat, int64(diff))
stat = fmt.Sprintf("topic.%s.channel.%s.clients", topic.TopicName, channel.ChannelName)
client.Gauge(stat, int64(channel.ClientCount))
for _, item := range channel.E2eProcessingLatency.Percentiles {
stat = fmt.Sprintf("topic.%s.channel.%s.e2e_processing_latency_%.0f", topic.TopicName, channel.ChannelName, item["quantile"]*100.0)
client.Gauge(stat, int64(item["value"]))
}
}
}
lastStats = stats
// 内存统计信息
if n.getOpts().StatsdMemStats {
// runtime.ReadMemStats(&ms)
ms := getMemStats()
client.Gauge("mem.heap_objects", int64(ms.HeapObjects))
client.Gauge("mem.heap_idle_bytes", int64(ms.HeapIdleBytes))
client.Gauge("mem.heap_in_use_bytes", int64(ms.HeapInUseBytes))
client.Gauge("mem.heap_released_bytes", int64(ms.HeapReleasedBytes))
client.Gauge("mem.gc_pause_usec_100", int64(ms.GCPauseUsec100))
client.Gauge("mem.gc_pause_usec_99", int64(ms.GCPauseUsec99))
client.Gauge("mem.gc_pause_usec_95", int64(ms.GCPauseUsec95))
client.Gauge("mem.next_gc_bytes", int64(ms.NextGCBytes))
client.Incr("mem.gc_runs", int64(ms.GCTotalRuns-lastMemStats.GCTotalRuns))
lastMemStats = ms
}
bw.Flush()
sw.Flush()
conn.Close()
}
}
exit:
ticker.Stop()
n.logf(LOG_INFO, "STATSD: closing")
}
至此整个nsqd的启动流程完毕!