一 处理MySQL慢查询日志和错误日志


MySQL的慢查询日志有两种记录方式,文件和表。


MySQL的慢查询日志如下:

/usr/local/mysql/bin/mysqld, Version: 5.5.21-log (Source distribution). started with:
Tcp port: 3306  Unix socket: /var/lib/mysql/mysql.sock
Time                 Id Command    Argument
# Time: 150422 12:08:32
# User@Host: root[root] @ localhost []
# Query_time: 0.000496  Lock_time: 0.000329 Rows_sent: 1  Rows_examined: 1
use zabbix;
SET timestamp=1429675712;
SELECT dv.mandatory,dv.optional FROM dbversion dv;
# User@Host: root[root] @ localhost []
# Query_time: 0.001546  Lock_time: 0.000320 Rows_sent: 0  Rows_examined: 0
SET timestamp=1429675712;
SELECT n.* FROM nodes n WHERE n.nodetype=1 ORDER BY n.nodeid;
# User@Host: root[root] @ localhost []
# Query_time: 0.000486  Lock_time: 0.000364 Rows_sent: 1  Rows_examined: 1
SET timestamp=1429675712;
select t.globalmacroid,t.macro,t.value from globalmacro t order by t.globalmacroid;
# User@Host: root[root] @ localhost []
# Query_time: 0.000887  Lock_time: 0.000349 Rows_sent: 16  Rows_examined: 130
SET timestamp=1429675712;
select t.interfaceid,t.hostid,t.main,t.type,t.useip,t.ip,t.dns,t.port from interface t where (t.hostid between 10151 and 
10158 or t.hostid between 10826 and 10831 or t.hostid in (10001,10050,10060,10065,10068,10070,10094,10100,10101,10102,101
04,10119,10140,10201,10203,10291,10728,10734,10820,10821,10823,10824,10836,10839,10840,10841,10842,10869)) order by t.int
erfaceid;



在配置文件中设置:

slow_query_log_file=/data/app_data/mysql/mysql-slow.log
slow_query_log=1
long_query_time=3
#log_queries_not_using_indexes=1


log_error=/data/app_data/mysql/mysql-error.log

slow_query_log_file指定慢查询记录日志

slow_query_log开启慢查询记录日志功能

long_query_time指定SQL语句最大执行时间,超过这个时间将被记录

log_queries_not_using_indexes  记录那些没有使用索引的查询语句,但是会有很多执行时间比较快但是没有索引的语句记录




二 编写Logstash配置文件

input {

file {
#  slow log output to a file
    type => "mysql_slow_file"
    path => "/data/app_data/mysql/mysql-slow.log"
    sincedb_path => "/dev/null"
    codec => multiline{
      pattern => "^# User@Host:"
      negate => true
      what => previous
                      } 
     }

file {
# slow log output to mysql.slow_log 
    type => "mysql_slow_table"
    path => "/data/app_data/mysql/mysql-slow.csv*"
    sincedb_path => "/dev/null"
     }

file {
    type => "mysql_error"
    path => "/data/app_data/mysql/mysql-error.log"
    sincedb_path => "/dev/null"    

     }

      }

filter {

if [type] == "mysql_slow_file" {
     grok {
             match => [ "message", "^# User@Host: %{USER:mysql.user}(?:\[[^\]]+\])?\s+@\s+%{HOST:mysql.host}?\s+\[%{IP:mysql.ip}?\]" ]
          }
     grok {
              match => [ "message", "^# Query_time: %{NUMBER:mysql.query_time:float}\s+Lock_time: %{NUMBER:mysql.lock_time:float} Rows_sent: %{NUMBER:mysql.rows_sent:int} \s*Rows_examined: %{NUMBER:mysql.rows_examined:int}"]
          }
     grok {
              match => [ "message", "^SET timestamp=%{NUMBER:timestamp};" ]
           }
     date {
              match => [ "timestamp", "UNIX" ]
          }
     mutate {
              remove_field => "timestamp"
            }
                           }

if [type] == "mysql_slow_table" {
      mutate { gsub => [ "message", '\\"', '""' ] }

      csv {
      columns => [ "start_time", "user_host", "query_time", "lock_time",
                 "rows_sent", "rows_examined", "db", "last_insert_id",
                 "insert_id", "server_id", "sql_text" ]
          }
                           
      mutate { convert => [ "rows_sent", "integer" ] }
      mutate { convert => [ "rows_examined", "integer" ] }
      mutate { convert => [ "last_insert_id", "integer" ] }
      mutate { convert => [ "insert_id", "integer" ] }
      mutate { convert => [ "server_id", "integer" ] }
           
      date {
         match => [ "start_time", "YYYY-MM-DD HH:mm:ss" ]
         remove_field => [ "start_time" ]
           }
       
      ruby { code => "event['query_time'] = event['query_time'] ? event['query_time'].split(':').inject(0){|a, m| a = a * 60 + m.to_i} : 0" }
      
      ruby { code => "event['lock_time'] = event['lock_time'] ? event['lock_time'].split(':').inject(0){|a, m| a = a * 60 + m.to_i} : 0" }
      mutate { remove_field => [ "message" ] }
       
                                }

if [type] == "mysql_error" {
    mutate {
              add_tag => ["zabbix-sender"]
              add_field => [
                "zabbix_host","%{host}",
                "zabbix_item","mysql.error"
#                "send_field","%{message}"
                           ]
                                     }


                           }
}

output {
   stdout {
     codec => "rubydebug"
          }

   zabbix {
    tags => "zabbix-sender"
    host => "xxxxxxx"
    port => "10051"
    zabbix_sender => "/usr/local/zabbix/bin/zabbix_sender"
          } 

   redis {
     host => "xxxxxxx"
     data_type => "list"
     key => "logstash"
         }
       }



最后根据情况设置Kibana的图表,添加Zabbix监控报警











参考文档:

http://www.phase2technology.com/blog/adding-mysql-slow-query-logs-to-logstash/

https://gist.github.com/mjpowersjr/740a9583e9ec8b49e0a3

http://gurutek.biz/mysql-slow-query-logging-to-table/