date[2019-01-05]
go并发编程案例解析
package main
import (
"bufio"
"flag"
"fmt"
"github.com/influxdata/influxdb/client/v2"
"io"
"log"
"math/rand"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
)
type LogProcess struct {
rc chan []byte
wc chan *Message
read Reader
write Writer
}
type Reader interface {
Read(rc chan []byte)
}
type Writer interface {
Write(wc chan *Message)
}
type ReadDataFromFile struct {
path string
}
type WriteDateToInfluxDb struct {
influxDBua string
}
type Message struct {
TimeLocal time.Time
BytesSent int
Path, Method, Scheme, Status string
UpstreamTime, RequestTime float64
}
//写入模块
func (w *WriteDateToInfluxDb) Write(wc chan *Message) {
//初始化influxdb client
//从Write Channel读取数
//Tags:Path,Method,Scheme,Status
//Fiedls:
//Time:
//写入模块
infSlic := strings.Split(w.influxDBua, "@")
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: infSlic[0],
Username: infSlic[1],
Password: infSlic[2],
})
if err != nil {
log.Fatal(err)
}
defer c.Close()
for v := range wc {
// Create a new point batch
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
Database: infSlic[3],
Precision: infSlic[4],
})
if err != nil {
log.Fatal(err)
}
// Create a point and add to batch
//Tags:Path Method Scheme Status
tags := map[string]string{"Path": v.Path, "Method": v.Method, "Scheme": v.Scheme, "Status": v.Status,}
fields := map[string]interface{}{
"BytesSent": v.BytesSent,
"UpstreamTime": v.UpstreamTime,
"RequestTime": v.RequestTime,
}
pt, err := client.NewPoint("nginx_log", tags, fields, v.TimeLocal)
if err != nil {
log.Fatal(err)
}
bp.AddPoint(pt)
// Write the batch
if err := c.Write(bp); err != nil {
log.Fatal(err)
}
// Close client resources
if err := c.Close(); err != nil {
log.Fatal(err)
}
log.Println("Write Success!")
}
}
//读取模块
func (r *ReadDataFromFile) Read(rc chan []byte) {
//打开文件
f, err := os.Open(r.path)
if err != nil {
panic(fmt.Sprintf("open file eror :%s", err.Error()))
}
//从文件末尾读取
f.Seek(0, 2)
rd := bufio.NewReader(f)
for {
line, err := rd.ReadBytes('\n')
if err == io.EOF {
time.Sleep(500 * time.Millisecond)
continue
} else if err != nil {
panic(fmt.Sprintf("ReadBytes error:%s", err.Error()))
}
rc <- line
}
}
//处理模块
func (lp *LogProcess) ProcessData() {
r := regexp.MustCompile(`([\d\.]+)\s+([^ \[]+)\s+([^ \[]+)\s+\[([^\]]+)\]\s+\"([^"]+)\"\s+(\d{3})\s+(\d+)\s+\"([^"]+)\"\s+\"(.*?)\"`)
rd := rand.New(rand.NewSource(time.Now().UnixNano()))
loc, _ := time.LoadLocation("Asia/Shanghai")
for v := range lp.rc {
fmt.Println(string(v))
ret := r.FindStringSubmatch(string(v))
if len(ret) != 10 {
log.Println("FindStringSubmatch fail:", string(v))
continue
}
message := &Message{}
location, err := time.ParseInLocation("02/Jan/2006:15:04:05 +0800", ret[4], loc)
if err != nil {
log.Println("ParseInLocation fail:", err.Error(), string(ret[4]))
}
message.TimeLocal = location
byteSent, _ := strconv.Atoi(ret[8])
message.BytesSent = byteSent
//GET /foo?query=t HTTP/1.0
reqSli := strings.Split(ret[5], " ")
if len(reqSli) != 3 {
log.Println("strings.Split Fail", ret[5])
continue
}
message.Method = reqSli[1]
message.Scheme = reqSli[1]
u, err := url.Parse(reqSli[2])
if err != nil {
log.Println("url parse fail:", err)
}
message.Path = u.Path
message.Status = ret[6]
message.UpstreamTime = rd.Float64() * 4
message.RequestTime = rd.Float64() * 4
//message.UpstreamTime, _ = strconv.ParseFloat(ret[12], 64)
//message.RequestTime, _ = strconv.ParseFloat(ret[13], 64)
lp.wc <- message
}
}
func main() {
var path, influxDsn string
flag.StringVar(&path, "path", "C:/soft/nginx-1.15.8/logs/access.log", "read file path")
flag.StringVar(&influxDsn, "influxDsn", "http://127.0.0.1:8086@imooc@imoocpass@imooc@s", "read influxdb datasource")
r := &ReadDataFromFile{
path: path,
}
w := &WriteDateToInfluxDb{
influxDBua: influxDsn,
}
lp := &LogProcess{
rc: make(chan []byte),
wc: make(chan *Message),
read: r,
write: w,
}
go lp.read.Read(lp.rc)
go lp.ProcessData()
go lp.write.Write(lp.wc)
time.Sleep(time.Duration(30000000) * time.Second)
}
influxdb的简单使用
一、influxdb与传统数据库的比较
influxDB | 传统数据库中的概念 |
---|---|
database | 数据库 |
measurement | 数据库中的表 |
points | 表里面的一行数据 |
influxdb数据的构成:
Point由时间戳(time)、数据(field)、标签(tags)组成。
Point属性 | 传统数据库中的概念 |
---|---|
time | 每个数据记录时间,是数据库中的主索引(会自动生成) |
fields | 各种记录值(没有索引的属性)也就是记录的值:温度, 湿度 |
tags | 各种有索引的属性:地区,海拔 |
这里不得不提另一个名词:series:
所有在数据库中的数据,都需要通过图表来展示,而这个series表示这个表里面的数据,可以在图表上画成几条线:通过tags排列组合算出来。具体可以通过SHOW SERIES FROM "表名" 进行查询。
二、用户管理
//显示用户
> SHOW USERS
//创建用户
> CREATE USER "username" WITH PASSWORD 'password'
//创建管理员权限的用户
> CREATE USER "username" WITH PASSWORD 'password' WITH ALL PRIVILEGES
//删除用户
> DROP USER "username"
三、命令行下创建及查询
> CREATE DATABASE "testDB"
//查询当前的所有数据库显示所有表
> show databases
//显示所有表
> SHOW MEASUREMENTS