(promethues)node_exporter注册自定义监控数据,简单展示

一、要使用node_exporter通过collect注册自定义监控数据,需要遵循以下步骤:

1.创建一个自定义的collector。Collector是一个Go结构体,它实现了Collector接口的方法。这些方法包括:

Collect(ctx context.Context) (metrics, error): 此方法用于收集和返回自定义指标。
Describe(ctx context.Context, ch chan<- *prometheus.Desc): 此方法用于描述自定义指标。

2.在Collect方法中,编写代码以收集所需的自定义指标。可以使用各种方法和技术来收集这些指标,例如从文件中读取、从网络请求中获取等。

3. 在Describe方法中,编写代码以描述自定义指标。使用prometheus.NewDesc函数创建一个新的描述符,并使用ch通道将其发送到Prometheus服务器。

4.编译和运行自定义的collector。确保collector在Prometheus服务器上运行,以便Prometheus服务器能够从collector中获取自定义指标。

5.在Prometheus服务器上配置自定义的collector。使用Prometheus的配置文件将自定义的collector添加到服务器上。配置文件应指定collector的地址和端口号,以便Prometheus服务器能够与collector通信。

6.启动Prometheus服务器以使用自定义的collector。在启动服务器时,Prometheus将自动加载配置文件并开始从自定义的collector中拉取数据。

7.在Prometheus的查询界面上查看自定义指标。一旦Prometheus服务器开始从自定义的collector中拉取数据,您就可以在查询界面上查看这些数据,并使用它们进行监控和警报。

二、展示实战案例

将监控数据集成到node_exporter上,实现目标跟之前博客一样,只不过是这里采用go语言将监控数据集成到node_exporter
上次博客连接:
基于python编写的服务器之间流量传输netflow_exporter

1.下载node_exporter-master

传送门:点击下载
(promethues)node_exporter注册自定义监控数据,简单展示_第1张图片

2. 在collect目录增加自定义监控udf,下面是自定义监控服务器之间网络流量的指标

// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//go:build !nonetworkflow
// +build !nonetworkflow

package collector

import (
	"fmt"
	"github.com/go-kit/log"
	"github.com/prometheus/client_golang/prometheus"
	"os/exec"
	"strconv"
	"strings"
	"math"
)

const (
	// 定义自定义数据指标的子系统名称
	// netflow_metrics
	customMetricsSubsystem = "netflow_metrics"
)

// 定义 customMetricsCollector 结构体
type customMetricsCollector struct {
	logger log.Logger
	//...
}


type netflow struct {
	//flowSend    prometheus.Gauge
	//flowReceive prometheus.Gauge
	//label       *prometheus.CounterVec
	flowSend    *prometheus.CounterVec
	flowReceive *prometheus.CounterVec
}

func NewMetrics(reg prometheus.Registerer) *netflow {
	m := &netflow{
		flowSend: prometheus.NewCounterVec(
			prometheus.CounterOpts{
				Name: "flowSend",
				Help: "Current flowSend of the Host.",
			},
			[]string{"host"},
		),
		flowReceive: prometheus.NewCounterVec(
			prometheus.CounterOpts{
				Name: "flowReceive",
				Help: "Current flowReceive of the Host.",
			},
			[]string{"host"},
		),
	}
	reg.MustRegister(m.flowSend)
	reg.MustRegister(m.flowReceive)
	return m
}

func init() {
	//1. 在该函数中调用 registerCollector() 函数,注册自定义 customMetricsCollector
	registerCollector("netflow_metrics", defaultEnabled, NewCustomMetricsCollector)
}

// 2. 定义 customMetricsCollector 的工厂函数,后续传入 registerCollector() 函数中,以便创建 customMetricsCollector 对象
func NewCustomMetricsCollector(logger log.Logger) (Collector, error) {
	return &customMetricsCollector{
		logger: logger,
	}, nil
}

// 实现 Update() 函数,以便在处理请求时被 Collector.Collect() 调用
func (c *customMetricsCollector) Update(ch chan<- prometheus.Metric) error {
	reg := prometheus.NewRegistry()

	// Create new metrics and register them using the custom registry.
	m := NewMetrics(reg)
	//var metricType prometheus.ValueType

	//metricType = prometheus.CounterValue

	result, err := exec.Command("sh", "-c", "iftop -t  -N -n -s 1 2>/dev/null |grep -A 1 -E '^   [0-9]'").Output()
	if err != nil {
		fmt.Println("Error:", string(result))
	} else {
		lines := strings.Split(string(result), "\n")
		fmt.Println(lines)
		fmt.Println(len(lines) - 1)
		var count = 1
		var groupnum = 0
		all_netflow := make(map[string]string)
		//send_rece_arr := make([]float64, 6)
		var send_flow = ""
		for i := 0; i < len(lines)-1; i++ {
			//fmt.Println("--------------------------------------++++")
			//fmt.Println(lines[i])
			slice0 := strings.Split(lines[i], " ")
			if (count % 2) != 0 {
				groupnum++
				//发送
				send_arr := make([]string, 7)
				var scount = 0
				//fmt.Printf("%d 组发送流量:%s\n", groupnum, slice0)
				for _, v := range slice0 {
					if v != "" {
						//fmt.Println(v)
						send_arr[scount] = v
						scount++
					}
				}
				fmt.Println(send_arr)
				fmt.Println(send_arr[1], send_arr[len(send_arr)-2])
				send_flow = send_arr[len(send_arr)-2]
			} else {

				//接收
				rece_arr := make([]string, 6)
				var rcount = 0
				for _, v := range slice0 {
					if v != "" {
						//fmt.Println(v)
						rece_arr[rcount] = v
						rcount++
					}
				}
				fmt.Println(rece_arr)
				fmt.Println(rece_arr[0], rece_arr[len(rece_arr)-2])
				//transformtokb(send_flow)
				rece_flow := rece_arr[len(rece_arr)-2]
				fmt.Println(transformtokb(send_flow), transformtokb(rece_flow))
				send_flow = strconv.FormatFloat(transformtokb(send_flow), 'f', 2, 64)
				rece_flow = strconv.FormatFloat(transformtokb(rece_flow), 'f', 2, 64)
				all_netflow[rece_arr[0]] = send_flow + "|" + rece_flow
				send_flow = ""
			}
			count++
		}
		// 流量获取结束

		// 把切片里的有效数据放到map中

		netflow_send := make(map[string]float64)
		netflow_rece := make(map[string]float64)
		for desthost, flowsize := range all_netflow {
			p_send := 0.00
			p_rece := 0.00
			p_send, _ = strconv.ParseFloat(strings.Split(flowsize, "|")[0], 2)
			p_rece, _ = strconv.ParseFloat(strings.Split(flowsize, "|")[1], 2)
			netflow_send[desthost] = p_send
			netflow_rece[desthost] = p_rece
			m.flowSend.With(prometheus.Labels{"host": desthost}).Add(float64(p_send))
			m.flowReceive.With(prometheus.Labels{"host": desthost}).Add(float64(p_rece))

		}

	}
	return nil
}

// 对b/Kb/Mb数据进行转换
func transformtokb(flow string) float64 {
	newstr := ""
	for _, v := range flow {
		if v == 'K' {
			//fmt.Println(newstr)
			strInt, err := strconv.ParseFloat(newstr, 2)
			if err != nil {
				fmt.Println("transformtokb v is K",err)
			}
			finalResult := float64(strInt)
			return finalResult
			//break
		} else if v == 'b' {
			//fmt.Println(newstr)
			strInt, err := strconv.ParseFloat(newstr, 2)
			if err != nil {
				fmt.Println("transformtokb v is b",err)
			}
			//fmt.Println(str_int / 1024)
			//var final_result=0.0
			//final_result :=
			finalResult := math.Round(float64(strInt/1024)*1000) / 1000
			return finalResult
			//break
		} else if v == 'M' {
			//fmt.Println(newstr)
			strInt, err := strconv.ParseFloat(newstr, 2)
			if err != nil {
				fmt.Println("transformtokb v is M",err)
			}
			//fmt.Println(str_int * 1024)
			//final_result := float64(str_int * 1024)
			finalResult := math.Round(float64(strInt*1024)*1000) / 1000
			return finalResult
		}
		newstr += string(v)
	}
	return 0
}

3.编译

参照之前编译博客:Linux环境下编译并运行go项目
(promethues)node_exporter注册自定义监控数据,简单展示_第2张图片
还可以这样安装go环境:

[root@hadoop1011 ~]# tar -zxvf go1.20.12.linux-amd64.tar.gz -C /usr/local/go1.20.12/

[root@hadoop1011 ~]# vim /etc/profile.d/go.sh 
export PATH=$PATH:/usr/local/go1.20.12/go/bin
[root@hadoop1011 ~]# source /etc/profile
[root@hadoop1011 node_exporter-master]# ./node_exporter 

新开一个窗口

[root@hadoop1011 node_exporter-master]#curl -L 127.0.0.1:9100/metrics|grep netflow
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:--  0:00:12 --:--:--     0# HELP node_netflow_receive Receive netflow from remote host.
# TYPE node_netflow_receive gauge
node_netflow_receive{remote_host="192.168.196.73"} 0.813
node_netflow_receive{remote_host="192.168.10.100"} 2529.28
node_netflow_receive{remote_host="192.168.10.101"} 2068.48
node_netflow_receive{remote_host="192.168.10.103"} 1853.44
node_netflow_receive{remote_host="192.168.10.104"} 1710.08
node_netflow_receive{remote_host="192.168.10.99"} 1792
# HELP node_netflow_send Send remote host netflow.
# TYPE node_netflow_send gauge
node_netflow_send{remote_host="192.168.196.73"} 1.72
node_netflow_send{remote_host="192.168.10.100"} 2109.44
node_netflow_send{remote_host="192.168.10.101"} 2805.76
node_netflow_send{remote_host="192.168.10.103"} 2088.96
node_netflow_send{remote_host="192.168.10.104"} 2529.28
node_netflow_send{remote_host="192.168.10.99"} 2078.72
node_scrape_collector_duration_seconds{collector="netflow"} 12.064428387
node_scrape_collector_success{collector="netflow"} 1
100 95019    0 95019    0     0   7873      0 --:--:--  0:00:12 --:--:-- 23415
[root@hadoop1011 node_exporter-master]#

node_exporter注册自定义监控数据介绍到这里,后面还会继续优化。欢迎评论交流,转发和点赞,收藏!
同时也介绍下公众号:运维仙人,期待您的关注。
(promethues)node_exporter注册自定义监控数据,简单展示_第3张图片

你可能感兴趣的:(Go,go,后端,prometheus)