logstash的一些实践(3)接入各种类型日志

logstash可以接入很多类型的日志,整理常见的如下:
配置为in.conf

DB(以oracle为例)

input {
  jdbc {
    type => "db"
    jdbc_driver_library => "/home/logstash/ojdbc8.jar"
    jdbc_driver_class => "Java::oracle.jdbc.driver.OracleDriver"
    jdbc_connection_string => "jdbc:oracle:thin:@//11.111.11.11/testdb"
    jdbc_user => "****"
    jdbc_password => "****"
    schedule => "* * * * *" //每分钟执行一次
    statement_filepath => "/home/logstash/statement.sql"
    record_last_run => true
    tracking_column => "op_time" //标识跟踪的字段
    codec => plain { charset => "UTF-8"}
    jdbc_default_timezone => "Asia/Shanghai"
    tracking_column_type => "timestamp" //跟踪的字段类型为timestamp
    last_run_metadata_path => "/home/logstash/oracle_one_last_id" //保存最后一次执行后的值,即stament.sql里的变量sql_last_value
    clean_run => "false"
    jdbc_paging_enabled => true
    jdbc_page_size => 100000 //分页大小,每次返回多少行数据
  }
}

statement.sql的内容如下

select * from test where op_time >= :sql_last_value

syslog

input{
  syslog{
    type => "system-syslog"
    host => "192.168.247.135"
    port => "514"
  }
}

snmptrap

input {
    snmptrap {
        #   记住端口号一定要大于1024
        port => "1064"
        community => ["public"]
        host => "192.168.101.204"
    }
}

files

input {
    file {
      path => "/var/log/messages"
      type => "system"
      start_position => "beginning"
    }
    file {
       path => "/var/log/elasticsearch/application.log"
       type => "es"
       start_position => "beginning"
      codec => multiline {
          pattern => "^\["
          negate => true
          what => "previous"
        }
    }
    file {
       path => "/var/log/nginx/access.log"
       codec => json
       start_position => "beginning"
       type => "nginx-log"
    }
}

kafka

input {
  kafka{
    bootstrap_servers => ["localhost:9092"]
    group_id => "logk"
    auto_offset_reset => "earliest"
    consumer_threads => "16"
    decorate_events => true
    topics => ["logs"]
    type => "kafka_logs"
    codec => json
  }
}

你可能感兴趣的:(logstash的一些实践(3)接入各种类型日志)