cd /usr/local/evecom
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.7.0-linux-x86_64.tar.gz
tar -zxvf elasticsearch-7.7.0-linux-x86_64.tar.gz
es的所有配置文件都在${ES_HOME}/config这个目录下。
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
# Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please consult the documentation for further information on configuration options:
# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
cluster.name: my-applicationes
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
node.name: node-1
#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /usr/local/evecom/elasticsearch-7.7.0/data
#
# Path to log files:
#
path.logs: /usr/local/evecom/elasticsearch-7.7.0/logs
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
network.host: 172.27.65.xx
#
# Set a custom port for HTTP:
#
http.port: 9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"]
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
cluster.initial_master_nodes: ["node-1"]
#
# For more information, consult the discovery and cluster formation module documentation.
#
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
#gateway.recover_after_nodes: 3
#
# For more information, consult the gateway module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true
http.cors.enabled: true
http.cors.allow-origin: "*"
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
-Xms6g
-Xmx6g
echo "net.ipv4.tcp_fin_timeout = 30" >>/etc/sysctl.conf
echo "net.ipv4.tcp_keepalive_time = 1200" >>/etc/sysctl.conf
echo "net.ipv4.tcp_keepalive_probes = 3" >>/etc/sysctl.conf
echo "net.ipv4.tcp_keepalive_intvl = 15" >>/etc/sysctl.conf
echo "net.ipv4.tcp_syncookies = 1" >>/etc/sysctl.conf
echo "net.ipv4.tcp_tw_reuse = 1" >>/etc/sysctl.conf
echo "net.ipv4.tcp_tw_recycle = 1" >>/etc/sysctl.conf
echo "net.ipv4.tcp_max_tw_buckets = 5000" >>/etc/sysctl.conf
echo "net.ipv4.tcp_max_syn_backlog = 8192" >>/etc/sysctl.conf
echo "net.ipv4.route.gc_timeout = 100" >>/etc/sysctl.conf
echo "net.ipv4.tcp_syn_retries = 1" >>/etc/sysctl.conf
echo "net.ipv4.tcp_synack_retries = 1" >>/etc/sysctl.conf
echo "net.ipv4.tcp_timestamps = 0" >>/etc/sysctl.conf
echo "vm.max_map_count = 262144" >>/etc/sysctl.conf
sysctl -p
echo "* soft nofile 65535" >>/etc/security/limits.conf
echo "* hard nofile 65535" >>/etc/security/limits.conf
echo "* soft noproc 65535" >>/etc/security/limits.conf
echo "* hard noproc 65535" >>/etc/security/limits.conf
ES在启动的时候是不允许使用root账户的,所以我们要新建一个用户es。
useradd es
passwd es
chown es:es -R /usr/local/evecom
su es
./bin/elasticsearch -d
ps -ef | grep elastic
#配置自己的jdk11
export JAVA_HOME=/es/elasticsearch-7.7.0/jdk
export PATH=$JAVA_HOME/bin:$PATH
#添加jdk判断
if [ -x "$JAVA_HOME/bin" ]; then
JAVA="/es/elasticsearch-7.7.0/jdk/bin/java"
else
JAVA=`which java`
fi
chattr -i /etc/group
chattr -i /etc/gshadow
chattr -i /etc/shadow
chattr -i /etc/passwd
- config/logstash-tomcat.conf
# Sample Logstash configuration for creating a simple
# Beats -> Logstash -> Elasticsearch pipeline.
input {
beats {
port => 5044
}
}
filter {
dissect {
mapping => { "message" => "%{loglevel} [%{logtime}] %{thread} %{class}: %{msg}" }
add_field => { "logId" => "%{[host][name]}_%{logtime}.%{offset}" }
remove_field => ["message","beat","log","host"]
}
date {
#match => [ "timestamp", "yyyy-MMM-dd HH:mm:ss.SSS ZZ" ]
match => [ "logtime", "yyyy-MM-dd HH:mm:ss.SSS" ]
}
}
output {
#修改appname, hosts ,index ,使用小写不支持大写,appname和filebeat中配制的appname对应。index 和tomcattpl.json中的index对应
if [fields][appname] == "ssss" {
elasticsearch {
hosts => ["http://172.27.xx.xxx:9201"]
index => "ssss-%{+YYYY.MM.dd}"
#index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "changeme"
template => "/usr/local/evecom/logstash-7.7.0/config/tomcattpl.json"
template_name => "tomcattpl"
template_overwrite => true
manage_template => true
}
}
#修改appname, hosts ,index ,使用小写不支持大写,appname和filebeat中配制的appname对应。index 和tomcattpl.json中的index对应
if [fields][appname] == "sssss" {
elasticsearch {
hosts => ["http://172.27.xx.xxx:9201"]
index => "ssss-%{+YYYY.MM.dd}"
#index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "changeme"
}
}
}
- config/tomcattpl.json
{
"order": 0,
"version": 60002,
"index_patterns": ["ssss*"],#修改成和logstash-tomcat.conf中的index对应
"settings": {
"index": {
"number_of_shards": "5",
"refresh_interval": "5s"
}
},
"mappings": {
"dynamic_templates": [
{
"message_field": {
"path_match": "msg",
"mapping": {
"norms": false,
"type": "text",
"fields": {
"keyword": {
"ignore_above": 10000,
"type": "keyword"
}
}
},
"match_mapping_type": "string"
}
},{
"message_field": {
"path_match": "message",
"mapping": {
"norms": false,
"type": "text"
},
"match_mapping_type": "string"
}
}, {
"string_fields": {
"mapping": {
"norms": false,
"type": "text",
"fields": {
"keyword": {
"ignore_above": 256,
"type": "keyword"
}
}
},
"match_mapping_type": "string",
"match": "*"
}
}
],
"properties": {
"@timestamp": {
"type": "date"
},
"geoip": {
"dynamic": true,
"properties": {
"ip": {
"type": "ip"
},
"latitude": {
"type": "half_float"
},
"location": {
"type": "geo_point"
},
"longitude": {
"type": "half_float"
}
}
},
"@version": {
"type": "keyword"
}
}
},
"aliases": {}
}
注:此json文件每个版本兼性不一样。可用
http://172.27.xx.xxx:9201/_template/logstash
修改
logstash 后台启动
setsid ./logstash -f ../config/logstash-tomcat.conf --log.level error -e
或
nohup ./logstash -f ../config/logstash-tomcat.conf --log.level error -w 8 -b 1000 > /dev/null 2>&1 &
需要修改里面的 :
appname (要用小写)
paths 对应日志文件
logstash hosts
###################### Filebeat Configuration Example #########################
# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html
# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.
#=========================== Filebeat inputs =============================
filebeat.inputs:
# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
- type: log
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /usr/local/evecom/apache-tomcat-8.5.49/logs/ssss.log
#- c:\programdata\elasticsearch\logs\*
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list.
#exclude_lines: ['^DBG']
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
#exclude_files: ['.gz$']
# Optional additional fields. These fields can be freely picked
# to add additional information to the crawled log files for filtering
#fields:
# level: debug
# review: 1
### Multiline options
# Multiline can be used for log messages spanning multiple lines. This is common
# for Java Stack Traces or C-Line Continuation
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
#multiline.pattern: ^\[
multiline.pattern: '^INFO|^DEBUG|^WARN|^ERROR'
# Defines if the pattern set under pattern should be negated or not. Default is false.
multiline.negate: true
multiline.max_lines: 1200
multiline.timeout: 2s
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
multiline.match: after
# close_renamed: true
#============================= Filebeat modules ===============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
#==================== Elasticsearch template setting ==========================
#setup.template.settings:
# index.number_of_shards: 3
#index.codec: best_compression
#_source.enabled: false
#================================ General =====================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
#name: 'ssss'
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
fields:
appname: ssss
# env: staging
#============================== Dashboards =====================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here, or by using the `-setup` CLI flag or the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
#============================== Kibana =====================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
#setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
#============================= Elastic Cloud ==================================
# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `:`.
#cloud.auth:
#================================ Outputs =====================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output ------------------------------
#output.elasticsearch:
# Array of hosts to connect to.
# hosts: ["localhost:9200"]
# Enabled ilm (beta) to use index lifecycle management instead daily indices.
#ilm.enabled: false
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
#----------------------------- Logstash output --------------------------------
output.logstash:
# The Logstash hosts
hosts: ["172.27.xx.xxx:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
#================================ Processors =====================================
# Configure processors to enhance or manipulate events generated by the beat.
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
#================================ Logging =====================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
logging.level: error
#logging.level: error
logging.to_files: true
logging.files:
path: /usr/local/evecom/filebeat-6.7.1-linux-x86_64/logs
name: filebeat.log
keepfiles: 3
permissions: 0644
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
logging.selectors: ["*"]
#============================== Xpack Monitoring ===============================
# filebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#xpack.monitoring.enabled: false
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well. Any setting that is not set is
# automatically inherited from the Elasticsearch output configuration, so if you
# have the Elasticsearch output configured, you can simply uncomment the
# following line.
#xpack.monitoring.elasticsearch:
后台启动:
setsid ./filebeat -c ./filebeat.yml -e
或
nohup /usr/local/filebeat/filebeat -e -c /usr/local/filebeat/kafka.yml >/dev/null 2>&1 &```