最近有朋友老说用RPM安装ELK教程不适用,不通用;很多生产环境不联网,操作系统版本也不同;叫我弄个源码安装的教程。所以就整理一个ELK5.0版本源码安装过程,水平有限,凑合着看!!最后附上安装包
一、配置JAVA环境变量
# mkdir /usr/local/java/ –p
# cd /usr/local/java/
# tar zxvf /data/elk5.0/jdk-8u111-linux-x64.tar.gz
# cat >>/etc/profile<
# mkdir /data/PRG/-p
# cd /data/PRG/
# tar zxvf /data/elk5.0/elasticsearch-5.0.0.tar.gz
# mv elasticsearch-5.0.0 elasticsearch
# useradd elasticsearch -s /sbin/nologin
# chown elasticsearch. elasticsearch /data/PRG/elasticsearch/
添加启动脚本
vi /etc/init.d/elasticsearch
#!/bin/sh
#
# elasticsearch
#
# chkconfig: 2345 80 20
# description: Starts and stops a single elasticsearch instance on this system
#
### BEGIN INIT INFO
# Provides: Elasticsearch
# Required-Start: $network $named
# Required-Stop: $network $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: This service manages the elasticsearch daemon
# Description: Elasticsearch is a very scalable, schema-free and high-performance search solution supporting multi-tenancy and near realtime search.
### END INIT INFO
#
# init.d / servicectl compatibility (openSUSE)
#
if [ -f /etc/rc.status ]; then
. /etc/rc.status
rc_reset
fi
#
# Source function library.
#
if [ -f /etc/rc.d/init.d/functions ]; then
. /etc/rc.d/init.d/functions
fi
# Sets the default values for elasticsearch variables used in this script
export JAVA_HOME=/usr/local/java/jdk1.8.0_111
ES_USER="elasticsearch"
ES_GROUP="elasticsearch"
name=elasticsearch
ES_HOME="/data/PRG/elasticsearch"
MAX_OPEN_FILES=65536
MAX_MAP_COUNT=262144
LOG_DIR="$ES_HOME/log/"
DATA_DIR="$ES_HOME/lib/"
CONF_DIR="$ES_HOME/config"
mkdir -p $LOG_DIR
chown -R elasticsearch.elasticsearch $ES_HOME
PID_DIR="$ES_HOME/log"
# Source the default env file
ES_ENV_FILE="/etc/sysconfig/elasticsearch"
if [ -f "$ES_ENV_FILE" ]; then
. "$ES_ENV_FILE"
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
exec="$ES_HOME/bin/elasticsearch"
prog="elasticsearch"
pidfile="$PID_DIR/${prog}.pid"
export ES_HEAP_SIZE
export ES_HEAP_NEWSIZE
export ES_DIRECT_SIZE
export ES_JAVA_OPTS
export ES_GC_LOG_FILE
export ES_STARTUP_SLEEP_TIME
export JAVA_HOME
export ES_INCLUDE
ulimit -n $MAX_OPEN_FILES
lockfile=$ES_HOME/log/$prog
# backwards compatibility for old config sysconfig files, pre 0.90.1
if [ -n $USER ] && [ -z $ES_USER ] ; then
ES_USER=$USER
fi
checkJava() {
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
}
start() {
checkJava
[ -x $exec ] || exit 5
if [ -n "$MAX_LOCKED_MEMORY" -a -z "$ES_HEAP_SIZE" ]; then
echo "MAX_LOCKED_MEMORY is set - ES_HEAP_SIZE must also be set"
return 7
fi
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
if [ -n "$MAX_LOCKED_MEMORY" ]; then
ulimit -l $MAX_LOCKED_MEMORY
fi
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
fi
export ES_GC_LOG_FILE
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"
fi
if [ -n "$pidfile" ] && [ ! -e "$pidfile" ]; then
touch "$pidfile" && chown "$ES_USER":"$ES_GROUP" "$pidfile"
fi
cd $ES_HOME
echo -n $"Starting $prog: "
# if not running, start it up here, usually something like "daemon $exec"
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d
#daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.conf=$CONF_DIR
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
# stop it here, often "killproc $prog"
killproc -p $pidfile -d 86400 $prog
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
# run checks to determine if the service is running or use generic status
status -p $pidfile $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?
# chmod +x /etc/init.d/elasticsearch
# /etc/init.d/elasticsearch start
# /etc/init.d/elasticsearch status
elasticsearch (pid 20895) is running...
# netstat -ntlp |grep 9[2-3]00
tcp 0 0 :::9300 :::* LISTEN 20895/java
tcp 0 0 :::9200 :::* LISTEN 20895/java
三、配置elasticsearch
# vim /data/PRG/elasticsearch/config/jvm.options
-Xms512m
-Xmx512m
# cat /data/PRG/elasticsearch/config/elasticsearch.yml|grep -v '#'
network.host: 0.0.0.0 ###开启监听地址,
action.auto_create_index:.security,.monitoring*,.watches,.triggered_watches,.watcher-history*
####以下模块视情况是否开启
xpack.security.enabled: true ####开启用户认证
xpack.monitoring.enabled: true
xpack.graph.enabled: true
xpack.watcher.enabled: true
xpack.security.authc.realms: ####用户认证模式,ldap、file、pki、ActiveDirectory等
file1:
type: file
order: 0
# cd /data/PRG/
# tar zxvf /data/elk5.0/logstash-5.0.0.tar.gz
# mv logstash-5.0.0 logstash
# useradd logstash -s /sbin/nologin
# chown logstash. logstash /data/PRG/logstash
添加启动脚本
vim /etc/init.d/logstash
#!/bin/sh
# Init script for logstash
# Maintained by Elasticsearch
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
# * Sections: 20.2, 20.3
#
### BEGIN INIT INFO
# Provides: logstash
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description:
# Description: Starts Logstash as a daemon.
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin:/data/PRG/logstash/bin
export PATH
if [ `id -u` -ne 0 ]; then
echo "You need root privileges to run this script"
exit 1
fi
name=logstash
LS_USER=logstash
LS_GROUP=logstash
LS_HOME=/data/PRG/logstash
LS_HEAP_SIZE="1g"
LS_LOG_DIR=/data/PRG/logstash/logs
LS_LOG_FILE="${LS_LOG_DIR}/$name.log"
pidfile="${LS_LOG_DIR}/$name.pid"
LS_CONF_DIR=/data/PRG/logstash/conf.d
LS_OPEN_FILES=16384
LS_NICE=19
KILL_ON_STOP_TIMEOUT=${KILL_ON_STOP_TIMEOUT-0} #default value is zero to this variable but could be updated by user request
LS_OPTS=""
[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name
program=$LS_HOME/bin/logstash
args=" -f ${LS_CONF_DIR} -l ${LS_LOG_FILE} ${LS_OPTS}"
quiet() {
"$@" > /dev/null 2>&1
return $?
}
start() {
LS_JAVA_OPTS="${LS_JAVA_OPTS} -Djava.io.tmpdir=${LS_HOME}"
HOME=${LS_HOME}
export PATH HOME LS_HEAP_SIZE LS_JAVA_OPTS LS_USE_GC_LOGGING LS_GC_LOG_FILE
# chown doesn't grab the suplimental groups when setting the user:group - so we have to do it for it.
# Boy, I hope we're root here.
SGROUPS=$(id -Gn "$LS_USER" | tr " " "," | sed 's/,$//'; echo '')
if [ ! -z $SGROUPS ]
then
EXTRA_GROUPS="--groups $SGROUPS"
fi
# set ulimit as (root, presumably) first, before we drop privileges
ulimit -n ${LS_OPEN_FILES}
# Run the program!
nice -n ${LS_NICE} chroot --userspec $LS_USER:$LS_GROUP $EXTRA_GROUPS / sh -c "
cd $LS_HOME
ulimit -n ${LS_OPEN_FILES}
$program $args > ${LS_LOG_DIR}/$name.stdout" 2> "${LS_LOG_DIR}/$name.err" &
# Generate the pidfile from here. If we instead made the forked process
# generate it there will be a race condition between the pidfile writing
# and a process possibly asking for status.
echo $! > $pidfile
echo "$name started."
return 0
}
stop() {
# Try a few times to kill TERM the program
if status ; then
pid=`cat "$pidfile"`
echo "Killing $name (pid $pid) with SIGTERM"
ps -ef |grep $pid |grep -v 'grep' |awk '{print $2}' | xargs kill -9
# Wait for it to exit.
for i in 1 2 3 4 5 6 7 8 9 ; do
echo "Waiting $name (pid $pid) to die..."
status || break
sleep 1
done
if status ; then
if [ $KILL_ON_STOP_TIMEOUT -eq 1 ] ; then
echo "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."
kill -KILL $pid
echo "$name killed with SIGKILL."
else
echo "$name stop failed; still running."
return 1 # stop timed out and not forced
fi
else
echo "$name stopped."
fi
fi
}
status() {
if [ -f "$pidfile" ] ; then
pid=`cat "$pidfile"`
if kill -0 $pid > /dev/null 2> /dev/null ; then
# process by this pid is running.
# It may not be our pid, but that's what you get with just pidfiles.
# TODO(sissel): Check if this process seems to be the same as the one we
# expect. It'd be nice to use flock here, but flock uses fork, not exec,
# so it makes it quite awkward to use in this case.
return 0
else
return 2 # program is dead but pid file exists
fi
else
return 3 # program is not running
fi
}
configtest() {
# Check if a config file exists
if [ ! "$(ls -A ${LS_CONF_DIR}/* 2> /dev/null)" ]; then
echo "There aren't any configuration files in ${LS_CONF_DIR}"
return 1
fi
HOME=${LS_HOME}
export PATH HOME
test_args="-t -f ${LS_CONF_DIR} ${LS_OPTS} "
$program ${test_args}
[ $? -eq 0 ] && return 0
# Program not configured
return 6
}
case "$1" in
start)
status
code=$?
if [ $code -eq 0 ]; then
echo "$name is already running"
else
start
code=$?
fi
exit $code
;;
stop) stop ;;
force-stop) force_stop ;;
status)
status
code=$?
if [ $code -eq 0 ] ; then
echo "$name is running"
else
echo "$name is not running"
fi
exit $code
;;
reload) reload ;;
restart)
stop && start
;;
check)
configtest
exit $?
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|check}" >&2
exit 3
;;
esac
exit $?
# chmod +x /etc/init.d/logstash
# /etc/init.d/logstash start
# /etc/init.d/logstash status
logstash is running
# netstat -ntlp|grep 9600
tcp 0 0 :::9600 :::* LISTEN 10141/java
# cat /data/PRG/logstash/config/logstash.yml |grep -v '#'
http.host: "0.0.0.0" ###开启监听地址
ngin日志收集
# cat /data/PRG/logstash/conf.d/filter.conf
input {
beats {
port => 10200
}
}
filter {
grok {
match=> {
message => "%{IPORHOST:remote_addr} , \[%{HTTPDATE:timestamp}\], %{IPORHOST:http_host} , \"%{WORD:http_verb}(?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\" , %{NUMBER:http_status_code}, %{NUMBER:bytes_read} , %{QS:referrer} , %{QS:agent} ,\"%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\" , - , - , - ,%{IPORHOST:server_ip} , %{BASE10NUM:request_duration}"
}
match=> {
message => "%{IPORHOST:remote_addr} , \[%{HTTPDATE:timestamp}\], %{IPORHOST:http_host} , \"%{WORD:http_verb}(?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\" , %{NUMBER:http_status_code}, %{NUMBER:bytes_read} , %{QUOTEDSTRING:referrer} , %{QS:agent} ,\"%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\" ,%{IPORHOST}:%{INT} , %{INT} , %{BASE10NUM} , %{IPORHOST} ,%{BASE10NUM:request_duration}"
}
}
}
output {
elasticsearch {
hosts =>["192.168.62.200:9200"]
index =>"operation-%{+YYYY.MM.dd}"
document_type=> "nginx2"
user => 'admin' #### elasticsearch的用户名,用X-PACK插件创建
password =>'kbsonlong' #### elasticsearch的用户名
}
stdout { codec =>rubydebug }
}
# cd /data/PRG/
# tar zxvf /data/elk5.0/kibana-5.0.0-linux-x86_64.tar.gz
# mv kibana-5.0.0-linux-x86_64 kibana
# useradd kibana –s /sbin/nologin
# chown kibana. kibana /data/PRG/kibana
添加启动脚本
# vim /etc/init.d/kibana
#!/bin/sh
# Init script for kibana
# Maintained by
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
# * Sections: 20.2, 20.3
#
### BEGIN INIT INFO
# Provides: kibana
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description:
# Description: Kibana
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATH
KIBANA_HOME=/data/PRG/kibana
name=kibana
program=$KIBANA_HOME/bin/kibana
args=''
pidfile="$KIBANA_HOME/logs/$name.pid"
LOG_HOME="$KIBANA_HOME/logs"
[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name
[ -z "$nice" ] && nice=0
trace() {
logger -t "/etc/init.d/kibana" "$@"
}
emit() {
trace "$@"
echo "$@"
}
start() {
# Ensure the log directory is setup correctly.
[ ! -d "$LOG_HOME" ] && mkdir "$LOG_HOME"
chmod 755 "$LOG_HOME"
# Setup any environmental stuff beforehand
# Run the program!
#chroot --userspec "$user":"$group" "$chroot" sh -c "
$program $args >> $LOG_HOME/kibana.stdout 2>> $LOG_HOME/kibana.stderr &
# Generate the pidfile from here. If we instead made the forked process
# generate it there will be a race condition between the pidfile writing
# and a process possibly asking for status.
echo $! > $pidfile
emit "$name started"
return 0
}
stop() {
# Try a few times to kill TERM the program
if status ; then
pid=$(cat "$pidfile")
echo "Killing $name (pid $pid) with SIGTERM"
ps -ef |grep $pid |grep -v 'grep' |awk '{print $2}' | xargs kill -9
# Wait for it to exit.
for i in 1 2 3 4 5 ; do
trace "Waiting $name (pid $pid) to die..."
status || break
sleep 1
done
if status ; then
if [ "$KILL_ON_STOP_TIMEOUT" -eq 1 ] ; then
trace "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."
kill -KILL $pid
emit "$name killed with SIGKILL."
else
emit "$name stop failed; still running."
fi
else
emit "$name stopped."
fi
fi
}
status() {
if [ -f "$pidfile" ] ; then
pid=$(cat "$pidfile")
if ps -p $pid > /dev/null 2> /dev/null ; then
# process by this pid is running.
# It may not be our pid, but that's what you get with just pidfiles.
# TODO(sissel): Check if this process seems to be the same as the one we
# expect. It'd be nice to use flock here, but flock uses fork, not exec,
# so it makes it quite awkward to use in this case.
return 0
else
return 2 # program is dead but pid file exists
fi
else
return 3 # program is not running
fi
}
case "$1" in
force-start|start|stop|status|restart)
trace "Attempting '$1' on kibana"
;;
esac
case "$1" in
force-start)
PRESTART=no
exec "$0" start
;;
start)
status
code=$?
if [ $code -eq 0 ]; then
emit "$name is already running"
exit $code
else
start
exit $?
fi
;;
stop) stop ;;
status)
status
code=$?
if [ $code -eq 0 ] ; then
emit "$name is running"
else
emit "$name is not running"
fi
exit $code
;;
restart)
stop && start
;;
*)
echo "Usage: $SCRIPTNAME {start|force-start|stop|force-start|force-stop|status|restart}" >&2
exit 3
;;
esac
exit $?
# chmod +x /etc/init.d/kibana
# /etc/init.d/kibana start
# /etc/init.d/kibana status
# netstat -ntlp |grep 5601
tcp 0 0 0.0.0.0:5601 0.0.0.0:* LISTEN 13052/node
# cat /data/PRG/kibana/config/kibana.yml |grep -v '#'
server.host: "0.0.0.0"
####以下模块视情况是否开启
xpack.security.enabled: true
xpack.monitoring.enabled: true
xpack.graph.enabled: true
xpack.reporting.enabled: true
# /data/PRG/kibana/bin/kibana-plugin install file:///root/x-pack-5.0.0.zip
# /data/PRG/elasticsearch/bin/elasticsearch-plugin install file:///root/x-pack-5.0.0.zip
离线安装x-pack要修改用户脚本,默认创建用户配置文件在/etc/elasticsearch/x-pack目录
# vim /data/PRG/elasticsearch/bin/x-pack/users
否则在创建用户的时候提示/etc/elasticsearch/x-pack/users…tmp不存在
# mkdir /etc/elasticsearch/x-pack/
# chown elasticsearch. elasticsearch /etc/elasticsearch/x-pack/-R
# cd /data/PRG/elasticsearch
# bin/x-pack/users useradd admin -p kbsonlong -rsuperuser
# /data/PRG/elasticsearch/bin/x-pack/users list
admin :superuser
test : - ###创建用户时没有添加-r参数,所以没有用户角色
# curl http://localhost:9200/_xpack/ --useradmin:kbsonlong
{"build":{"hash":"7763f8e","date":"2016-10-26T04:51:59.202Z"},"license":{"uid":"06a82587-66ac-4d4a-90c4-857d9ca7f3bc","type":"trial","mode":"trial","status":"active","expiry_date_in_millis":1483753731066},"features":{"graph":{"description":"GraphData Exploration for the ElasticStack","available":true,"enabled":true},"monitoring":{"description":"Monitoringfor the ElasticStack","available":true,"enabled":true},"security":{"description":"Securityfor the ElasticStack","available":true,"enabled":true},"watcher":{"description":"Alerting,Notification and Automation for the ElasticStack","available":true,"enabled":true}},"tagline":"Youknow, for X"}
# /data/PRG/elasticsearch/bin/x-pack/users userdel test
# /data/PRG/elasticsearch/bin/x-pack/users list
admin :superuser
# cd /data/PRG
# tar zxvf / data/elk5.0/filebeat-5.0.0-linux-x86_64.tar.gz
# mv filebeat-5.0.0-linux-x86_64 filebeat
配置启动脚本
# vim /etc/init.d/filebeat
#!/bin/bash
#
# filebeat filebeat shipper
#
# chkconfig: 2345 98 02
#
### BEGIN INIT INFO
# Provides: filebeat
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Sends log files to Logstash or directly to Elasticsearch.
# Description: filebeat is a shipper part of the Elastic Beats
# family. Please see: https://www.elastic.co/products/beats
### END INIT INFO
PATH=/usr/bin:/sbin:/bin:/usr/sbin
export PATH
[ -f /etc/sysconfig/filebeat ] && . /etc/sysconfig/filebeat
pidfile=${PIDFILE-/data/PRG/filebeat/filebeat.pid}
agent=${PB_AGENT-/data/PRG/filebeat/filebeat}
args="-c /data/PRG/filebeat/filebeat.yml"
test_args="-e -configtest"
wrapper="filebeat-god"
wrapperopts="-r / -n -p $pidfile"
RETVAL=0
# Source function library.
. /etc/rc.d/init.d/functions
# Determine if we can use the -p option to daemon, killproc, and status.
# RHEL < 5 can't.
if status | grep -q -- '-p' 2>/dev/null; then
daemonopts="--pidfile $pidfile"
pidopts="-p $pidfile"
fi
test() {
$agent $args $test_args
}
start() {
echo -n $"Starting filebeat: "
test
if [ $? -ne 0 ]; then
echo
exit 1
fi
daemon $daemonopts $wrapper $wrapperopts -- $agent $args
RETVAL=$?
echo
return $RETVAL
}
stop() {
echo -n $"Stopping filebeat: "
killproc $pidopts $wrapper
RETVAL=$?
echo
[ $RETVAL = 0 ] && rm -f ${pidfile}
}
restart() {
test
if [ $? -ne 0 ]; then
return 1
fi
stop
start
}
rh_status() {
status $pidopts $wrapper
RETVAL=$?
return $RETVAL
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
status)
rh_status
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart}"
exit 1
esac
exit $RETVAL
配置filebeat
# cat filebeat/filebeat.yml |grep -v '#'
filebeat.prospectors:
- input_type: log
paths:
-/tmp/nginx.log
output.logstash:
enabled: true
hosts: ["localhost:10200"]
启动filebeat
# /etc/init.d/filebeat5 start
Starting filebeat: 2016/12/0807:18:37.177631 beat.go:264: INFO Home path: [/data/PRG/filebeat] Config path:[/data/PRG/filebeat] Data path: [/data/PRG/filebeat/data] Logs path:[/data/PRG/filebeat/logs]
2016/12/08 07:18:37.177681 beat.go:174:INFO Setup Beat: filebeat; Version: 5.0.0
2016/12/08 07:18:37.177760 logstash.go:90:INFO Max Retries set to: 3
2016/12/08 07:18:37.177828 outputs.go:106:INFO Activated logstash as output plugin.
2016/12/08 07:18:37.177912 publish.go:291:INFO Publisher name: operation
2016/12/08 07:18:37.178158 async.go:63:INFO Flush Interval set to: 1s
2016/12/08 07:18:37.178170 async.go:64:INFO Max Bulk Size set to: 2048
Config OK
[ OK ]
# /etc/init.d/filebeat5 status
filebeat-god (pid 7365) is running...
# ps -ef |grep filebeat
root 7405 1 0 15:18 pts/1 00:00:00 filebeat-god -r / -n -p/data/PRG/filebeat/filebeat.pid -- /data/PRG/filebeat/filebeat -c/data/PRG/filebeat/filebeat.yml
root 7406 7405 0 15:18 pts/1 00:00:00 /data/PRG/filebeat/filebeat -c/data/PRG/filebeat/filebeat.yml
附上安装源码包,包括x-pack、beat等
百度云盘http://pan.baidu.com/s/1skT4zCx