转载请表明出处 https://blog.csdn.net/Amor_Leo/article/details/83144739 谢谢
三台虚拟机都执行
mkdir -p /usr/local/elasticsearch/conf
mkdir -p /usr/local/elasticsearch/data
mkdir -p /usr/local/elasticsearch/plugins/ik
chmod 777 /usr/local/elasticsearch/plugins/ik
chmod 777 /usr/local/elasticsearch/data
chmod 777 /usr/local/elasticsearch/conf
三台虚拟机都执行
docker pull elasticsearch:6.5.4
三台虚拟机都执行
vi /etc/sysctl.conf
vm.max_map_count=655360
sysctl -p
vim /etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536
* soft nproc 4096
* hard nproc 4096
vim /etc/security/limits.d/20-nproc.conf
* soft nproc 4096
vim /usr/local/elasticsearch/conf/es.yml
#集群名
cluster.name: ESCluster
#节点名
node.name: node-111
#设置绑定的ip地址,可以是ipv4或ipv6的,默认为0.0.0.0,
#指绑定这台机器的任何一个ip
network.bind_host: 0.0.0.0
#设置其它节点和该节点交互的ip地址,如果不设置它会自动判断,
#值必须是个真实的ip地址
network.publish_host: 192.168.0.111
#设置对外服务的http端口,默认为9200
http.port: 9200
#设置节点之间交互的tcp端口,默认是9300
transport.tcp.port: 9300
#是否允许跨域REST请求
http.cors.enabled: true
#允许 REST 请求来自何处
http.cors.allow-origin: "*"
#节点角色设置
node.master: true
node.data: true
#有成为主节点资格的节点列表
#discovery.zen.ping.unicast.hosts: ["0.0.0.0:9300","192.168.0.112:9300","192.168.0.113:9300"]
discovery.zen.ping.unicast.hosts: ["192.168.0.111:9300","192.168.0.112:9300","192.168.0.113:9300"]
#集群中一直正常运行的,有成为master节点资格的最少节点数(默认为1)
# (totalnumber of master-eligible nodes / 2 + 1)
discovery.zen.minimum_master_nodes: 2
vim /usr/local/elasticsearch/conf/es.yml
#集群名
cluster.name: ESCluster
#节点名
node.name: node-112
#设置绑定的ip地址,可以是ipv4或ipv6的,默认为0.0.0.0,
#指绑定这台机器的任何一个ip
network.bind_host: 0.0.0.0
#设置其它节点和该节点交互的ip地址,如果不设置它会自动判断,
#值必须是个真实的ip地址
network.publish_host: 192.168.0.112
#设置对外服务的http端口,默认为9200
http.port: 9200
#设置节点之间交互的tcp端口,默认是9300
transport.tcp.port: 9300
#是否允许跨域REST请求
http.cors.enabled: true
#允许 REST 请求来自何处
http.cors.allow-origin: "*"
#节点角色设置
node.master: true
node.data: true
#有成为主节点资格的节点列表
#discovery.zen.ping.unicast.hosts: ["0.0.0.0:9300","192.168.0.111:9300","192.168.0.113:9300"]
discovery.zen.ping.unicast.hosts: ["192.168.0.111:9300","192.168.0.112:9300","192.168.0.113:9300"]
#集群中一直正常运行的,有成为master节点资格的最少节点数(默认为1)
# (totalnumber of master-eligible nodes / 2 + 1)
discovery.zen.minimum_master_nodes: 2
vim /usr/local/elasticsearch/conf/es.yml
#集群名
cluster.name: ESCluster
#节点名
node.name: node-113
#设置绑定的ip地址,可以是ipv4或ipv6的,默认为0.0.0.0,
#指绑定这台机器的任何一个ip
network.bind_host: 0.0.0.0
#设置其它节点和该节点交互的ip地址,如果不设置它会自动判断,
#值必须是个真实的ip地址
network.publish_host: 192.168.0.113
#设置对外服务的http端口,默认为9200
http.port: 9200
#设置节点之间交互的tcp端口,默认是9300
transport.tcp.port: 9300
#是否允许跨域REST请求
http.cors.enabled: true
#允许 REST 请求来自何处
http.cors.allow-origin: "*"
#节点角色设置
node.master: true
node.data: true
#有成为主节点资格的节点列表
#discovery.zen.ping.unicast.hosts: ["0.0.0.0:9300","192.168.0.111:9300","192.168.0.112:9300"]
discovery.zen.ping.unicast.hosts: ["192.168.0.111:9300","192.168.0.112:9300","192.168.0.113:9300"]
#集群中一直正常运行的,有成为master节点资格的最少节点数(默认为1)
# (totalnumber of master-eligible nodes / 2 + 1)
discovery.zen.minimum_master_nodes: 2
三台虚拟机都执行
cd /usr/local/elasticsearch/plugins/
yum -y install unzip
unzip -d /usr/local/elasticsearch/plugins/ik/ elasticsearch-analysis-ik-6.5.4.zip
rm -rf /usr/local/elasticsearch/plugins/elasticsearch-analysis-ik-6.5.4.zip
三台虚拟机都执行
firewall-cmd --zone=public --add-port=9200/tcp --permanent
firewall-cmd --zone=public --add-port=9300/tcp --permanent
firewall-cmd --reload
三台虚拟机都执行
docker run -d --name es -p 9200:9200 -p 9300:9300 -v /usr/local/elasticsearch/conf/es.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /usr/local/elasticsearch/data:/usr/share/elasticsearch/data -v /usr/local/elasticsearch/plugins:/usr/share/elasticsearch/plugins --privileged=true elasticsearch:6.5.4
docker logs es
放行端口号
firewall-cmd --zone=public --add-port=9100/tcp --permanent
firewall-cmd --reload
docker pull mobz/elasticsearch-head:5
docker run --name eshead -p 9100:9100 -d docker.io/mobz/elasticsearch-head:5
docker pull kibana:6.5.4
firewall-cmd --zone=public --add-port=5601/tcp --permanent
firewall-cmd --reload
docker run -d --name kibana -e "ELASTICSEARCH_URL=http://192.168.0.111:9200" -p 5601:5601 kibana:6.5.4
docker pull docker.elastic.co/logstash/logstash:6.5.4
mkdir -p /usr/local/logstash/conf
chmod 777 /usr/local/logstash/conf
mkdir -p /usr/local/logstash/plugin
chmod 777 /usr/local/logstash/plugin
mkdir -p /usr/local/logstash/pipeline
chmod 777 /usr/local/logstash/pipeline
cd /usr/local/logstash/plugin
wget http://central.maven.org/maven2/mysql/mysql-connector-java/5.1.47/mysql-connector-java-5.1.47.jar
vim /usr/local/logstash/pipeline/logstash.conf
input {
jdbc {
jdbc_connection_string => "jdbc:mysql://192.168.0.109:3306/estest?characterEncoding=UTF-8&useSSL=false"
jdbc_user => "root"
jdbc_password => "123456"
jdbc_driver_library => "/plugin/mysql-connector-java-5.1.47.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_paging_enabled => "true"
jdbc_page_size => "50000"
jdbc_default_timezone =>"Asia/Shanghai" ## 默认时区设置
statement => "SELECT * FROM test"
## 定时字段 各字段含义(由左至右)分、时、天、月、年,全部为*默认含义为每分钟都更新
schedule => "* * * * *"
}
file {
path => "/tmp/access_log"
start_position => "beginning"
}
} output {
stdout {
codec => json_lines
}
elasticsearch {
hosts => ["192.168.0.111:9200"] ## ES集群主机ip地址
user => "root" ## elasticsearch用户名
password => "root" ## elasticsearch密码
index => "estest" ## ES 索引 名称(自己定义)
document_type => "test" ## ES type 名称 (自己定义)
document_id => "%{id}" ## 自增ID编号
}
}
input {
file {
path => "/tmp/access_log"
start_position => "beginning"
}
}
output {
elasticsearch {
hosts => ["192.168.0.128:9200"]
user => "root"
password => "root"
}
}
vim /usr/local/logstash/conf/logstash.yml
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
xpack.monitoring.elasticsearch.url: http://192.168.0.111:9200
xpack.monitoring.elasticsearch.username: root
xpack.monitoring.elasticsearch.password: root
firewall-cmd --zone=public --add-port=5000/tcp --permanent
firewall-cmd --zone=public --add-port=5044/tcp --permanent
firewall-cmd --zone=public --add-port=9600/tcp --permanent
firewall-cmd --reload
docker run -v /usr/local/logstash/pipeline:/usr/share/logstash/pipeline -v /usr/local/logstash/conf/logstash.yml:/usr/share/logstash/config/logstash.yml -v /usr/local/logstash/plugin:/plugin -p 5000:5000 -p 5044:5044 -p 9600:9600 --name logstash --privileged=true -d docker.elastic.co/logstash/logstash:6.5.4 -f /usr/share/logstash/pipeline/logstash.conf
docker run -v /usr/local/logstash/pipeline:/usr/share/logstash/pipeline -v /usr/local/logstash/conf/logstash.yml:/usr/share/logstash/config/logstash.yml -p 5000:5000 -p 5044:5044 -p 9600:9600 --name logstash --privileged=true -d docker.elastic.co/logstash/logstash:6.5.4