Python爬取HAProxy数据——升级版

这是通过读取;csv来爬取所需内容;

Python爬取HAProxy数据——升级版_第1张图片

# import json
# import logging
#from datetime import datetime
from requests import Session

class HAProxyServer():
    """
    Represents a single HAProxy instance to be polled
    params:
     - base_url(list) - HAProxy url defined as :
     - user(str) -  User to authenticate with via basic auth(optional)
     - password(str) -  Password to authenticate with via basic auth(optional)
     - verify_ssl(bool) - Fail on SSL validation error. Default True.
    """

    # init函数需要三个未知参数;主要解决了http认证和登录问题
    #timeout是必须参数,因为此网站是动态的,用
    def __init__(self, base_url, user, password,
                 verify_ssl=True, timeout=5, https=False):
        self.failed = False
        self.verify = verify_ssl
        self.timeout = timeout
        self._session = Session()
        if user and password:
            self._session.auth = (user, password)

        self.name = base_url.split(':')[0]
        #print(self.name)
        # print(base_url)
        if https:
            base_url = 'https://' + base_url
        else:
            base_url = 'http://' + base_url
        self.url = base_url + '/;csv;norefresh'

        self.update()

    def update(self):
        #csv是所有#以后的内容
        csv = [l for l in self._fetch().strip(' #').split('\n') if l]
        #print(csv)

        if self.failed:
            return

        # read fields header to create keys
        #从csv中去除第0个元素,当作head
        self.fields = [f for f in csv.pop(0).split(',') if f]

        # 获取value
        self.values = []
        for line in csv:
            #print(line)
            value = line.split(',')
            self.values .append(value)
            #print(values)
            #fields指的是header,line.split指的是value,name是url的ip值或者路径值
            #service = HAProxyService(self.fields, line.split(','), self.name)

    def _fetch(self):
        try:
            r = self._session.request('GET', self.url,
                                      timeout=self.timeout,
                                      verify=self.verify)
        except Exception as ex:
            self._fail(ex)
            return ""

        if not r.ok:
            self._fail(r.text)
            return ""

        return r.text

    def _fail(self, reason):
        self.failed = True
        log.error('Error fetching stats from %s:\n%s' % (self.url, reason))


#获取元组
def dict_init(fields, values):
    values = [_decode(v) for v in values]
    #print(values)

    # zip field names and values
    service_dict = dict(zip(fields, values))
    return service_dict

def _decode(value):
    """
    decode byte strings and convert to int where needed
    """
    if value.isdigit():
        return int(value)
    if isinstance(value, bytes):
        return value.decode('utf-8')
    #将空值赋值为0
    if not value.strip():
        return 0
    else:
        return value

def findValue(tuble,pxname,svname,head):
    if tuble['pxname'] == pxname and tuble['svname'] == svname:
        if head in tuble.keys():
            return tuble[head]
        else:
            print('非法的head,重新输入!!')
            return -1
        #return tuble.head
    else:
        print("pxname或svname错误!!")
        return -1

# 基本信息
url = '-------/stats;csv'
user = '----'
pwd = '----'
haproxy = HAProxyServer(url,user,pwd)

#经过分析得到数据
heads = haproxy.fields
values = haproxy.values

while 1:
    pxname = input("请输入pxname:比如(www-balancer)")
    svname = input("请输入svname:比如(FRONTEND)")
    head = input("请输入head:比如(smax)")
    for value in values:
        judge = findValue(dict_init(heads,value), pxname, svname, head)
        if judge == -1:
            break
        else:
            print(judge)
            break






 后面的输出部分,可以通过os进行,通过执行命令获取参数进行查询。

Python爬取HAProxy数据——升级版_第2张图片

 

你可能感兴趣的:(python,HAPROXY相关内容的获取)