先对logstash进行安装测试和配置
[root@localhost software]# java -version
openjdk version "1.8.0_65"
OpenJDK Runtime Environment (build 1.8.0_65-b17)
OpenJDK 64-Bit Server VM (build 25.65-b01, mixed mode)
[root@localhost software]#
[root@localhost software]# pwd
/software
[root@localhost software]#
[root@localhost software]# wget https://download.elastic.co/logstash/logstash/packages/centos/logstash-all-plugins-2.4.0.noarch.rpm
--2016-09-20 16:17:18-- https://download.elastic.co/logstash/logstash/packages/centos/logstash-all-plugins-2.4.0.noarch.rpm
正在解析主机 download.elastic.co (download.elastic.co)... 23.23.240.27, 23.21.83.64, 107.22.244.203, ...
正在连接 download.elastic.co (download.elastic.co)|23.23.240.27|:443... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:140945933 (134M) [application/octet-stream]
正在保存至: “logstash-all-plugins-2.4.0.noarch.rpm”
100%[===============================================================>] 140,945,933 262KB/s 用时 8m 53s
2016-09-20 16:26:21 (258 KB/s) - 已保存 “logstash-all-plugins-2.4.0.noarch.rpm” [140945933/140945933])
[root@localhost software]# ls
logstash-all-plugins-2.4.0.noarch.rpm
[root@localhost software]# chmod +x logstash-all-plugins-2.4.0.noarch.rpm
[root@localhost software]# rpm -ivh logstash-all-plugins-2.4.0.noarch.rpm
准备中... ################################# [100%]
正在升级/安装...
1:logstash-all-plugins-1:2.4.0-1 ################################# [100%]
[root@localhost software]#
[root@localhost software]# cd ~
[root@localhost ~]# service logstash start
logstash started.
[root@localhost ~]#
[root@localhost ~]# whereis logstash
logstash: /etc/logstash /opt/logstash/bin/logstash /opt/logstash/bin/logstash.bat
[root@localhost ~]# ls /etc/logstash/
conf.d
[root@localhost ~]# ls /etc/logstash/conf.d/
[root@localhost opt]# ll /opt/logstash/bin
总用量 44
-rwxrwxr-x. 1 logstash logstash 1854 8月 31 04:36 logstash
-rw-rw-r--. 1 logstash logstash 689 8月 31 04:36 logstash.bat
-rwxrwxr-x. 1 logstash logstash 5330 8月 31 04:36 logstash.lib.sh
-rwxrwxr-x. 1 logstash logstash 439 8月 31 04:36 logstash-plugin
-rw-rw-r--. 1 logstash logstash 251 8月 31 04:36 logstash-plugin.bat
-rwxrwxr-x. 1 logstash logstash 199 8月 31 04:36 plugin
-rw-rw-r--. 1 logstash logstash 203 8月 31 04:36 plugin.bat
-rwxrwxr-x. 1 logstash logstash 322 8月 31 04:36 rspec
-rw-rw-r--. 1 logstash logstash 245 8月 31 04:36 rspec.bat
-rw-rw-r--. 1 logstash logstash 2947 8月 31 04:36 setup.bat
[root@localhost opt]# ll /opt/logstash
总用量 164
drwxr-xr-x. 2 logstash logstash 4096 9月 20 16:36 bin
-rw-rw-r--. 1 logstash logstash 102879 8月 31 04:36 CHANGELOG.md
-rw-rw-r--. 1 logstash logstash 2249 8月 31 04:36 CONTRIBUTORS
-rw-rw-r--. 1 logstash logstash 7799 8月 31 04:36 Gemfile
-rw-rw-r--. 1 logstash logstash 34057 8月 31 04:36 Gemfile.jruby-1.9.lock
drwxr-xr-x. 4 logstash logstash 42 9月 20 16:36 lib
-rw-rw-r--. 1 logstash logstash 589 8月 31 04:36 LICENSE
-rw-rw-r--. 1 logstash logstash 149 8月 31 04:36 NOTICE.TXT
drwxr-xr-x. 4 logstash logstash 31 9月 20 16:36 vendor
[root@localhost opt]#
[root@localhost opt]# cd /etc/logstash/conf.d/
[root@localhost conf.d]#
[root@localhost conf.d]# ls
first.conf
[root@localhost conf.d]# cat first.conf
input {
stdin { }
}
output {
stdout {
codec => rubydebug
}
}
[root@localhost conf.d]#
[root@localhost conf.d]# chmod +x ./*
[root@localhost conf.d]# /opt/logstash/bin/logstash -f first.conf
Hello
Settings: Default pipeline workers: 1
Pipeline main started
{
"message" => "Hello",
"@version" => "1",
"@timestamp" => "2016-09-20T08:48:37.097Z",
"host" => "localhost.localdomain"
}
[root@localhost software]# cd /opt
[root@localhost opt]# cd logstash/
[root@localhost logstash]# ls
bin CHANGELOG.md CONTRIBUTORS Gemfile Gemfile.jruby-1.9.lock lib LICENSE NOTICE.TXT vendor
[root@localhost logstash]# bin/plugin install logstash-output-webhdfs
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
Validating logstash-output-webhdfs
Installing logstash-output-webhdfs
Installation successful
[root@localhost logstash]# bin/plugin install logstash-filter-elasticsearch
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
Validating logstash-filter-elasticsearch
Installing logstash-filter-elasticsearch
Installation successful
[root@localhost logstash]#
[root@localhost logstash]# bin/plugin install logstash-input-beats
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
Validating logstash-input-beats
Installing logstash-input-beats
Installation successful
[root@localhost logstash]# bin/plugin install logstash-input-kafka
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
Validating logstash-input-kafka
Installing logstash-input-kafka
Installation successful
[root@localhost logstash]# bin/plugin install logstash-output-lumberjack
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
Validating logstash-output-lumberjack
Installing logstash-output-lumberjack
Installation successful
[root@localhost logstash]# bin/plugin install logstash-output-kafka
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
Validating logstash-output-kafka
Installing logstash-output-kafka
Installation successful
[root@localhost logstash]# bin/plugin install logstash-input-file
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
Validating logstash-input-file
Installing logstash-input-file
Installation successful
[root@localhost logstash]# bin/plugin install logstash-input-syslog
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
Validating logstash-input-syslog
Installing logstash-input-syslog
Installation successful
[root@localhost logstash]# bin/plugin install logstash-output-file
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
Validating logstash-output-file
Installing logstash-output-file
Installation successful
[root@localhost logstash]# bin/plugin install logstash-input-tcp
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
Validating logstash-input-tcp
Installing logstash-input-tcp
Installation successful
[root@localhost logstash]# cd /software/
[root@localhost software]#
[root@localhost software]# wget https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/rpm/elasticsearch/2.4.0/elasticsearch-2.4.0.rpm
--2016-09-20 18:10:09-- https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/rpm/elasticsearch/2.4.0/elasticsearch-2.4.0.rpm
正在解析主机 download.elastic.co (download.elastic.co)... 23.23.240.27, 107.22.226.93, 107.22.240.120, ...
正在连接 download.elastic.co (download.elastic.co)|23.23.240.27|:443... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:27256532 (26M) [application/x-rpm]
正在保存至: “elasticsearch-2.4.0.rpm”
100%[===============================================================>] 27,256,532 160KB/s 用时 2m 51s
2016-09-20 18:13:09 (156 KB/s) - 已保存 “elasticsearch-2.4.0.rpm” [27256532/27256532])
[root@localhost software]# chmod +x elasticsearch-2.4.0.rpm
[root@localhost software]# rpm -ivh elasticsearch-2.4.0.rpm
警告:elasticsearch-2.4.0.rpm: 头V4 RSA/SHA1 Signature, 密钥 ID d88e42b4: NOKEY
准备中... ################################# [100%]
Creating elasticsearch group... OK
Creating elasticsearch user... OK
正在升级/安装...
1:elasticsearch-2.4.0-1 ################################# [100%]
### NOT starting on installation, please execute the following statements to configure elasticsearch service to start automatically using systemd
sudo systemctl daemon-reload
sudo systemctl enable elasticsearch.service
### You can start elasticsearch service by executing
sudo systemctl start elasticsearch.service
[root@localhost software]# sudo systemctl enable elasticsearch.service
Created symlink from /etc/systemd/system/multi-user.target.wants/elasticsearch.service to /usr/lib/systemd/system/elasticsearch.service.
[root@localhost software]# service elasticsearch start
Starting elasticsearch (via systemctl): [ 确定 ]
[root@localhost software]#
[root@localhost software]# cd /etc/logstash/conf.d
[root@localhost conf.d]# cat first.conf
input {
file {
path => ["/var/log/*log","/var/log/message"]
type => "system"
start_position => "beginning"
}
}
output {
stdout {
codec => rubydebug
}
}
[root@localhost conf.d]# /opt/logstash/bin/logstash -f first.conf
..........日志输入......
————————————————————我是分割线——————————————————————————————————
收集本地日志
[root@localhost bin]# cd /etc/logstash/conf.d/
[root@localhost conf.d]# ls
collect_email_svr_log.conf collect_lenovobook_log.conf collect_local_log.conf
[root@localhost conf.d]# cat collect_local_log.conf
input {
file {
path => ["/var/log/*log","/var/log/message"]
type => "system"
start_position => "beginning"
}
}
output {
file {
path => "/coll_log/localhost/%{+yyyy}/%{+MM}/%{+dd}/%{host}.log"
message_format => "%{message}"
#gzip => true
}
}
[root@localhost conf.d]#
————————————————————我是分割线——————————————————————————————————
[root@localhost conf.d]# cd /opt/logstash/
[root@localhost logstash]# ls
bin CHANGELOG.md CONTRIBUTORS Gemfile Gemfile.jruby-1.9.lock lib LICENSE NOTICE.TXT vendor
[root@localhost logstash]# bin/plugin list
The use of bin/plugin is deprecated and will be removed in a feature release. Please use bin/logstash-plugin.
logstash-codec-avro
logstash-codec-cef
logstash-codec-cloudfront
logstash-codec-collectd
logstash-codec-compress_spooler
logstash-codec-csv
logstash-codec-dots
logstash-codec-edn
logstash-codec-edn_lines
logstash-codec-es_bulk
logstash-codec-fluent
logstash-codec-graphite
logstash-codec-gzip_lines
logstash-codec-json
logstash-codec-json_lines
logstash-codec-line
logstash-codec-msgpack
logstash-codec-multiline
logstash-codec-netflow
logstash-codec-oldlogstashjson
logstash-codec-plain
logstash-codec-protobuf
logstash-codec-rubydebug
logstash-codec-s3plain
logstash-codec-sflow
logstash-filter-aggregate
logstash-filter-alter
logstash-filter-anonymize
logstash-filter-checksum
logstash-filter-cidr
logstash-filter-cipher
logstash-filter-clone
logstash-filter-collate
logstash-filter-csv
logstash-filter-date
logstash-filter-de_dot
logstash-filter-dns
logstash-filter-drop
logstash-filter-elapsed
logstash-filter-elasticsearch
logstash-filter-environment
logstash-filter-extractnumbers
logstash-filter-fingerprint
logstash-filter-geoip
logstash-filter-grok
logstash-filter-i18n
logstash-filter-json
logstash-filter-json_encode
logstash-filter-kv
logstash-filter-metaevent
logstash-filter-metricize
logstash-filter-metrics
logstash-filter-multiline
logstash-filter-mutate
logstash-filter-oui
logstash-filter-prune
logstash-filter-punct
logstash-filter-range
logstash-filter-ruby
logstash-filter-sleep
logstash-filter-split
logstash-filter-syslog_pri
logstash-filter-throttle
logstash-filter-tld
logstash-filter-translate
logstash-filter-unique
logstash-filter-urldecode
logstash-filter-useragent
logstash-filter-uuid
logstash-filter-xml
logstash-filter-zeromq
logstash-input-beats
logstash-input-cloudwatch
logstash-input-couchdb_changes
logstash-input-elasticsearch
logstash-input-eventlog
logstash-input-exec
logstash-input-file
logstash-input-fluentd
logstash-input-ganglia
logstash-input-gelf
logstash-input-gemfire
logstash-input-generator
logstash-input-github
logstash-input-graphite
logstash-input-heartbeat
logstash-input-http
logstash-input-http_poller
logstash-input-imap
logstash-input-irc
logstash-input-jdbc
logstash-input-jmx
logstash-input-kafka
logstash-input-kinesis
logstash-input-log4j
logstash-input-lumberjack
logstash-input-meetup
logstash-input-pipe
logstash-input-puppet_facter
logstash-input-rabbitmq
logstash-input-redis
logstash-input-relp
logstash-input-rss
logstash-input-s3
logstash-input-salesforce
logstash-input-snmptrap
logstash-input-sqlite
logstash-input-sqs
logstash-input-stdin
logstash-input-stomp
logstash-input-syslog
logstash-input-tcp
logstash-input-twitter
logstash-input-udp
logstash-input-unix
logstash-input-varnishlog
logstash-input-websocket
logstash-input-wmi
logstash-input-xmpp
logstash-input-zenoss
logstash-input-zeromq
logstash-output-boundary
logstash-output-circonus
logstash-output-cloudwatch
logstash-output-csv
logstash-output-datadog
logstash-output-datadog_metrics
logstash-output-elasticsearch
logstash-output-elasticsearch-ec2
logstash-output-elasticsearch_java
logstash-output-email
logstash-output-exec
logstash-output-file
logstash-output-ganglia
logstash-output-gelf
logstash-output-graphite
logstash-output-graphtastic
logstash-output-hipchat
logstash-output-http
logstash-output-influxdb
logstash-output-irc
logstash-output-juggernaut
logstash-output-kafka
logstash-output-librato
logstash-output-loggly
logstash-output-lumberjack
logstash-output-metriccatcher
logstash-output-monasca_log_api
logstash-output-mongodb
logstash-output-nagios
logstash-output-nagios_nsca
logstash-output-null
logstash-output-opentsdb
logstash-output-pagerduty
logstash-output-pipe
logstash-output-rabbitmq
logstash-output-rados
logstash-output-redis
logstash-output-redmine
logstash-output-riemann
logstash-output-s3
logstash-output-sns
logstash-output-solr_http
logstash-output-sqs
logstash-output-statsd
logstash-output-stdout
logstash-output-stomp
logstash-output-syslog
logstash-output-tcp
logstash-output-udp
logstash-output-webhdfs
logstash-output-websocket
logstash-output-xmpp
logstash-output-zabbix
logstash-output-zeromq
logstash-output-zookeeper
logstash-patterns-core
[root@localhost logstash]#
————————————————————————————————我是分割线——————————————————————————————
收取远程linux主机日志记录
[root@localhost ~]# cd /etc/logstash/
[root@localhost logstash]# ls
conf.d
[root@localhost logstash]# cd conf.d/
[root@localhost conf.d]# ls
collect_email_svr_log.conf collect_lenovobook_log.conf collect_local_log.conf
[root@localhost conf.d]# vi collect_lenovobook_log.conf
[root@localhost conf.d]# vi collect_lenovobook_log.conf
[root@localhost conf.d]# cat collect_lenovobook_log.conf
input {
beats {
port => 5044
}
}
output {
file {
path => "/coll_log/collect_lenovobook_log/%{+yyyy}/%{+MM}/%{+dd}/%{host}.log"
message_format => "%{message}"
#gzip => true
}
}
[root@localhost conf.d]#
——————————————————————————————————我是分割线——————————————————————————
http://blog.csdn.net/fenglailea/article/details/52469671
[root@localhost bin]# ./filebeat -e -c /etc/filebeat/filebeat.yml -d "Publish"
[root@localhost filebeat]# cat filebeat.yml
################### Filebeat Configuration Example #########################
############################# Filebeat ######################################
filebeat:
# List of prospectors to fetch data.
prospectors:
# Each - is a prospector. Below are the prospector specific configurations
-
# Paths that should be crawled and fetched. Glob based paths.
# To fetch all ".log" files from a specific level of subdirectories
# /var/log/*/*.log can be used.
# For each file found under this path, a harvester is started.
# Make sure not file is defined twice as this can lead to unexpected behaviour.
paths:
- /var/log/*.log
#- c:\programdata\elasticsearch\logs\*
# Configure the file encoding for reading files with international characters
# following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
# Some sample encodings:
# plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
# hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
#encoding: plain
# Type of the files. Based on this the way the file is read is decided.
# The different types cannot be mixed in one prospector
#
# Possible options are:
# * log: Reads every line of the log file (default)
# * stdin: Reads the standard in
input_type: log
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list. The include_lines is called before
# exclude_lines. By default, no lines are dropped.
# exclude_lines: ["^DBG"]
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list. The include_lines is called before
# exclude_lines. By default, all the lines are exported.
# include_lines: ["^ERR", "^WARN"]
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
# exclude_files: [".gz$"]
# Optional additional fields. These field can be freely picked
# to add additional information to the crawled log files for filtering
#fields:
# level: debug
# review: 1
# Set to true to store the additional fields as top level fields instead
# of under the "fields" sub-dictionary. In case of name conflicts with the
# fields added by Filebeat itself, the custom fields overwrite the default
# fields.
#fields_under_root: false
# Ignore files which were modified more then the defined timespan in the past.
# In case all files on your system must be read you can set this value very large.
# Time strings like 2h (2 hours), 5m (5 minutes) can be used.
#ignore_older: 0
# Close older closes the file handler for which were not modified
# for longer then close_older
# Time strings like 2h (2 hours), 5m (5 minutes) can be used.
#close_older: 1h
# Type to be published in the 'type' field. For Elasticsearch output,
# the type defines the document type these entries should be stored
# in. Default: log
#document_type: log
# Scan frequency in seconds.
# How often these files should be checked for changes. In case it is set
# to 0s, it is done as often as possible. Default: 10s
#scan_frequency: 10s
# Defines the buffer size every harvester uses when fetching the file
#harvester_buffer_size: 16384
# Maximum number of bytes a single log event can have
# All bytes after max_bytes are discarded and not sent. The default is 10MB.
# This is especially useful for multiline log messages which can get large.
#max_bytes: 10485760
# Mutiline can be used for log messages spanning multiple lines. This is common
# for Java Stack Traces or C-Line Continuation
#multiline:
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
#pattern: ^\[
# Defines if the pattern set under pattern should be negated or not. Default is false.
#negate: false
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
#match: after
# The maximum number of lines that are combined to one event.
# In case there are more the max_lines the additional lines are discarded.
# Default is 500
#max_lines: 500
# After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
# Default is 5s.
#timeout: 5s
# Setting tail_files to true means filebeat starts readding new files at the end
# instead of the beginning. If this is used in combination with log rotation
# this can mean that the first entries of a new file are skipped.
#tail_files: false
# Backoff values define how agressively filebeat crawls new files for updates
# The default values can be used in most cases. Backoff defines how long it is waited
# to check a file again after EOF is reached. Default is 1s which means the file
# is checked every second if new lines were added. This leads to a near real time crawling.
# Every time a new line appears, backoff is reset to the initial value.
#backoff: 1s
# Max backoff defines what the maximum backoff time is. After having backed off multiple times
# from checking the files, the waiting time will never exceed max_backoff idenependent of the
# backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
# file after having backed off multiple times, it takes a maximum of 10s to read the new line
#max_backoff: 10s
# The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
# the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
# The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
#backoff_factor: 2
# This option closes a file, as soon as the file name changes.
# This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause
# issues when the file is removed, as the file will not be fully removed until also Filebeat closes
# the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the
# same name can be created. Turning this feature on the other hand can lead to loss of data
# on rotate files. It can happen that after file rotation the beginning of the new
# file is skipped, as the reading starts at the end. We recommend to leave this option on false
# but lower the ignore_older value to release files faster.
#force_close_files: false
# Additional prospector
#-
# Configuration to use stdin input
#input_type: stdin
# General filebeat configuration options
#
# Event count spool threshold - forces network flush if exceeded
#spool_size: 2048
# Enable async publisher pipeline in filebeat (Experimental!)
#publish_async: false
# Defines how often the spooler is flushed. After idle_timeout the spooler is
# Flush even though spool_size is not reached.
#idle_timeout: 5s
# Name of the registry file. Per default it is put in the current working
# directory. In case the working directory is changed after when running
# filebeat again, indexing starts from the beginning again.
registry_file: /var/lib/filebeat/registry
# Full Path to directory with additional prospector configuration files. Each file must end with .yml
# These config files must have the full filebeat config part inside, but only
# the prospector part is processed. All global options like spool_size are ignored.
# The config_dir MUST point to a different directory then where the main filebeat config file is in.
#config_dir:
###############################################################################
############################# Libbeat Config ##################################
# Base config file used by all other beats for using libbeat features
############################# Output ##########################################
# Configure what outputs to use when sending the data collected by the beat.
# Multiple outputs may be used.
output:
### Elasticsearch as output
# elasticsearch:
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (http and 9200)
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
# hosts: ["172.19.122.99:9200"]
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "admin"
#password: "s3cr3t"
# Number of workers per Elasticsearch host.
#worker: 1
# Optional index name. The default is "filebeat" and generates
# [filebeat-]YYYY.MM.DD keys.
#index: "filebeat"
# A template is used to set the mapping in Elasticsearch
# By default template loading is disabled and no template is loaded.
# These settings can be adjusted to load your own template or overwrite existing ones
#template:
# Template name. By default the template name is filebeat.
#name: "filebeat"
# Path to template file
#path: "filebeat.template.json"
# Overwrite existing template
#overwrite: false
# Optional HTTP Path
#path: "/elasticsearch"
# Proxy server url
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# The number of seconds to wait for new events between two bulk API index requests.
# If `bulk_max_size` is reached before this interval expires, addition bulk index
# requests are made.
#flush_interval: 1
# Boolean that sets if the topology is kept in Elasticsearch. The default is
# false. This option makes sense only for Packetbeat.
#save_topology: false
# The time to live in seconds for the topology information that is stored in
# Elasticsearch. The default is 15 seconds.
#topology_expire: 15
# tls configuration. By default is off.
#tls:
# List of root certificates for HTTPS server verifications
#certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for TLS client authentication
#certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#certificate_key: "/etc/pki/client/cert.key"
# Controls whether the client verifies server certificates and host name.
# If insecure is set to true, all server host names and certificates will be
# accepted. In this mode TLS based connections are susceptible to
# man-in-the-middle attacks. Use only for testing.
#insecure: true
# Configure cipher suites to be used for TLS connections
#cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#curve_types: []
# Configure minimum TLS version allowed for connection to logstash
#min_version: 1.0
# Configure maximum TLS version allowed for connection to logstash
#max_version: 1.2
output:
logstash:
enabled: true
hosts: ["172.19.122.99:5044"]
### Logstash as output
#logstash:
# The Logstash hosts
# hosts: ["172.19.122.99:5044"]
# Number of workers per Logstash host.
#worker: 1
# The maximum number of events to bulk into a single batch window. The
# default is 2048.
#bulk_max_size: 2048
# Set gzip compression level.
#compression_level: 3
# Optional load balance the events between the Logstash hosts
#loadbalance: true
# Optional index name. The default index name depends on the each beat.
# For Packetbeat, the default is set to packetbeat, for Topbeat
# top topbeat and for Filebeat to filebeat.
#index: filebeat
# Optional TLS. By default is off.
#tls:
# List of root certificates for HTTPS server verifications
#certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for TLS client authentication
#certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#certificate_key: "/etc/pki/client/cert.key"
# Controls whether the client verifies server certificates and host name.
# If insecure is set to true, all server host names and certificates will be
# accepted. In this mode TLS based connections are susceptible to
# man-in-the-middle attacks. Use only for testing.
#insecure: true
# Configure cipher suites to be used for TLS connections
#cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#curve_types: []
### File as output
#file:
# Path to the directory where to save the generated files. The option is mandatory.
#path: "/tmp/filebeat"
# Name of the generated files. The default is `filebeat` and it generates files: `filebeat`, `filebeat.1`, `filebeat.2`, etc.
#filename: filebeat
# Maximum size in kilobytes of each file. When this size is reached, the files are
# rotated. The default value is 10 MB.
#rotate_every_kb: 10000
# Maximum number of files under path. When this number of files is reached, the
# oldest file is deleted and the rest are shifted from last to first. The default
# is 7 files.
#number_of_files: 7
### Console output
# console:
# Pretty print json event
#pretty: false
############################# Shipper #########################################
shipper:
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
# If this options is not defined, the hostname is used.
#name:
# The tags of the shipper are included in their own field with each
# transaction published. Tags make it easy to group servers by different
# logical properties.
#tags: ["service-X", "web-tier"]
# Uncomment the following if you want to ignore transactions created
# by the server on which the shipper is installed. This option is useful
# to remove duplicates if shippers are installed on multiple servers.
#ignore_outgoing: true
# How often (in seconds) shippers are publishing their IPs to the topology map.
# The default is 10 seconds.
#refresh_topology_freq: 10
# Expiration time (in seconds) of the IPs published by a shipper to the topology map.
# All the IPs will be deleted afterwards. Note, that the value must be higher than
# refresh_topology_freq. The default is 15 seconds.
#topology_expire: 15
# Internal queue size for single events in processing pipeline
#queue_size: 1000
# Configure local GeoIP database support.
# If no paths are not configured geoip is disabled.
#geoip:
#paths:
# - "/usr/share/GeoIP/GeoLiteCity.dat"
# - "/usr/local/var/GeoIP/GeoLiteCity.dat"
############################# Logging #########################################
# There are three options for the log ouput: syslog, file, stderr.
# Under Windos systems, the log files are per default sent to the file output,
# under all other system per default to syslog.
logging:
# Send all logging output to syslog. On Windows default is false, otherwise
# default is true.
#to_syslog: true
# Write all logging output to files. Beats automatically rotate files if rotateeverybytes
# limit is reached.
#to_files: false
# To enable logging to files, to_files option has to be set to true
files:
# The directory where the log files will written to.
#path: /var/log/mybeat
# The name of the files where the logs are written to.
#name: mybeat
# Configure log file size limit. If limit is reached, log file will be
# automatically rotated
rotateeverybytes: 10485760 # = 10MB
# Number of rotated log files to keep. Oldest files will be deleted first.
#keepfiles: 7
# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are beat, publish, service
# Multiple selectors can be chained.
#selectors: [ ]
# Sets log level. The default log level is error.
# Available log levels are: critical, error, warning, info, debug
#level: error
[root@localhost filebeat]#
——————————————————————————————我还是分割线————————————————————————————————————————————————
windows主机日志收集
下载nxlog软件,安装步骤省略,在系统服务中启动nxlog服务
http://nxlog.org/system/files/products/files/1/nxlog-ce-2.9.1716.msi
windows端配置文件修改如下,linux日志服务器地址为 172.19.122.99
## This is a sample configuration file. See the nxlog reference manual about the
## configuration options. It should be installed locally and is also available
## online at http://nxlog.org/docs/
## Please set the ROOT to the folder your nxlog was installed into,
## otherwise it will not start.
#define ROOT C:\Program Files\nxlog
define ROOT C:\Program Files (x86)\nxlog
Moduledir %ROOT%\modules
CacheDir %ROOT%\data
Pidfile %ROOT%\data\nxlog.pid
SpoolDir %ROOT%\data
LogFile %ROOT%\data\nxlog.log
Module xm_syslog
Module im_msvistalog
# For windows 2003 and earlier use the following:
# Module im_mseventlog
Module om_tcp
Host 172.19.122.99
Port 514
Exec to_syslog_snare();
Path in => out
logstash服务器端配置
[root@localhost conf.d]# cat win.conf
input {
tcp {
port => 514 }
}
output {
file {
path => "/coll_log/collect_Mywindows10_log/%{+yyyy}/%{+MM}/%{+dd}/%{host}.log"
message_format => "%{message}"
#gzip => true
}
}