一、ES 安装部署
部署方法: docker tar包 rpm ansible
1、安装jdk
下载 elastic https://www.elastic.co/cn/downloads/elasticsearch
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.6.2-x86_64.rpm
安装和启动 启动 端口为 9200
yum localinstall elasticsearch-6.6.0-x86_64.rpm
systemctl start elasticsearch.service
2、修改配置文件, 并重新启动
[root@elk-server]# cat /etc/elasticsearch/elasticsearch.yml |grep -v "^#"
node.name: node-1
bootstrap.memory_lock: true
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.208.120
http.port: 9200
discovery.zen.ping.unicast.hosts: ['192.168.208.120']
http.cors.allow-origin: "/.*/"
http.cors.enabled: true
[root@elk-server elasticsearch]# cat /etc/elasticsearch/jvm.options |grep -v '^#'
-Xms512m
-Xmx512m
[root@elk-server /]# cd /var/lib/
[root@elk-server lib]# chown -R elasticsearch:elasticsearch elasticsearch
测试: 这样就可以用了
curl 127.0.0.1:9200
[elk_master@elk-server ~]$ curl 192.168.208.120:9200
{
"name" : "node-1",
"cluster_name" : "elasticsearch",
"cluster_uuid" : "-agMZq-ST6GimvJkmJ_p3w",
"version" : {
"number" : "6.6.0",
"build_flavor" : "default",
"build_type" : "rpm",
"build_hash" : "a9861f4",
"build_date" : "2019-01-24T11:27:09.439740Z",
"build_snapshot" : false,
"lucene_version" : "7.6.0",
"minimum_wire_compatibility_version" : "5.6.0",
"minimum_index_compatibility_version" : "5.0.0"
},
"tagline" : "You Know, for Search"
}
3、二进制包安装配置文件:
[root@elk-server /]# cat /usr/local/elasticsearch/config/elasticsearch.yml |grep -v "^#"
cluster.name: es-cluster
node.name: node-1
bootstrap.memory_lock: true
path.data: /usr/local/elk/data
path.logs: /usr/local/elk/logs
network.host: 192.168.208.120
http.port: 9200
discovery.seed_hosts: ["192.168.208.120"]
cluster.initial_master_nodes: ["192.168.208.120"]
http.cors.allow-origin: "/.*/"
http.cors.enabled: true
新建一普通用户 elkuser
chown -R elkuser:elkuser /usr/local/elk/{data,logs}
启动
/usr/local/elasticsearch/bin/elasticsearch &
报错解决启动内存锁定:
修改配置文件后重启报错解决方法:
报错日志如下:
tail /var/log/elasticsearch/elasticsearch.log
[1] bootstrap checks failed
[1]: memory locking requested for elasticsearch process but memory is not locked
解决方法如下:
[root@elk-server /]# vim /etc/systemd/system/elasticsearch.service.d/override.conf
输入如下:
[Service]
LimitMEMLOCK=infinity
[root@elk-server /]# systemctl daemon-reload
[root@elk-server /]# systemctl restart elasticsearch
重启后就可以了
4、插件安装使用
三种交互方式 curl、插件、 kibana
[root@elk-server ~]# ls /var/lib/elasticsearch/nodes/0/
node.lock _state
[root@elk-server ~]# curl 192.168.208.120:9200/_cat
=^.^=
/_cat/allocation
/_cat/shards
/_cat/shards/{index}
/_cat/master
/_cat/nodes
/_cat/tasks
/_cat/indices
/_cat/indices/{index}
/_cat/segments
/_cat/segments/{index}
/_cat/count
/_cat/count/{index}
/_cat/recovery
/_cat/recovery/{index}
/_cat/health
/_cat/pending_tasks
/_cat/aliases
/_cat/aliases/{alias}
/_cat/thread_pool
/_cat/thread_pool/{thread_pools}
/_cat/plugins
/_cat/fielddata
/_cat/fielddata/{fields}
/_cat/nodeattrs
/_cat/repositories
/_cat/snapshots/{repository}
/_cat/templates
[root@elk-server ~]# curl 192.168.208.120:9200/_cat/nodes # 查看节点
192.168.208.120 21 85 2 0.00 0.04 0.07 mdi * node-1
插件使用方法:
1、打开 chorm 谷哥浏览器,点击 浏览器的 工具 --- 扩展程序
输入 elasticserach 的地址和端口号进行连接 http://192.168.208.120:9200/
二、ES集群安装部署 3台只能坏一台
1、主机配置:
添加 node 节点 192.168.208.121 192.168.208.122
[root@elk-server ~]# cat /etc/elasticsearch/elasticsearch.yml |egrep -v '^#|^$'
cluster.name: elasticsearch_cluster #集群名称
node.name: node-1 #节点名称
path.data: /var/lib/elasticsearch #数据目录
path.logs: /var/log/elasticsearch #日志目录
bootstrap.memory_lock: true #内存锁定, 内存锁定开起了, jvm.optins 设置的最大最小内存值才有效
network.host: 192.168.208.120
http.port: 9200
discovery.zen.ping.unicast.hosts: ['192.168.208.120','192.168.208.121']
discovery.zen.minimum_master_nodes: 2 # nodes/2 + 1
[root@elk-server ~]# systemctl restart elasticsearch
2、添加节点
新增节点配置
1 、 安装 node 节点
[root@elk- node-2~]# iptables -F
[root@elk- node-2~]# setenforce 0
setenforce: SELinux is disabled
[root@elk-server ~]# yum localinstall elasticsearch-6.6.0-x86_64.rpm
2、修改配置文件
[root@elk- node-2~]# cat /etc/elasticsearch/elasticsearch.yml |egrep -v '^#|^$'
cluster.name: els_luster #集群名称
node.name: node-2 #节点名称
path.data: /var/lib/elasticsearch #数据目录
path.logs: /var/log/elasticsearch #日志目录
bootstrap.memory_lock: true #内存锁定, 内存锁定开起了, jvm.optins 设置的最大最小内存值才有效
network.host: 192.168.208.121
discovery.zen.ping.unicast.hosts: ['192.168.208.120','192.168.208.121']
discovery.zen.minimum_master_nodes: 2 # nodes/2 + 1
http.cors.allow-origin: "/.*/"
http.cors.enabled: true
3、修改内存锁定
[root@elk-node-2/]# vim /etc/systemd/system/elasticsearch.service.d/override.conf
输入如下:
[Service]
LimitMEMLOCK=infinity
3、检查数据目录 授权
chown -R elasticsearch:elasticsearch
4、重启服务
[root@elk-node-2/]# systemctl daemon-reload
[root@elk-node-2/]# systemctl restart elasticsearch
5、检查端口和日志 9200,9300
故障案例:
1、脑裂:
原因:即 两个 master 设置为2的时候 ,一台出现故障导致集群不可用
解决:把还存活的节点的配置文件集群文件配置改为 1或者注释掉
discovery.zen.minimum_master_nodes: 2 # nodes/2 + 1
2、2个节点,master设置为2的时候,一台出现故障导致集群不可用 解决方案: 把还存活的节点的配置文件集群选举相关的选项注释掉或者改成1 discovery.zen.minimum_master_nodes: 1 重启服务
结论: 两个节点数据不一致 会导致查询结果不一致
3、添加第三节点
1、修改主节点配置文件
discovery.zen.ping.unicast.hosts: ["192.168.208.120","192.168.208.121","192.168.208.122"]
2、增加的第三台节点配置文件
[root@elk-server ~]# cat /etc/elasticsearch/elasticsearch.yml |egrep -v '^#|^$'
cluster.name: els_luster #集群名称
node.name: node-3 #节点名称
path.data: /var/lib/elasticsearch #数据目录
path.logs: /var/log/elasticsearch #日志目录
bootstrap.memory_lock: true #内存锁定, 内存锁定开起了, jvm.optins 设置的最大最小内存值才有效
network.host: 192.168.208.122
discovery.zen.ping.unicast.hosts: ['192.168.208.120','192.168.208.122']
discovery.zen.minimum_master_nodes: 2 # nodes/2 + 1
http.cors.allow-origin: "/.*/"
http.cors.enabled: true
问题: 添加第三个节点后, 所有的分片, 有的都变成空了
理解: 数据分配:默认数据分片: 5分片,1副本
监控状态 1.监控集群健康状态 不是 green or 2.监控集群节点数量 不是 2
[root@elk-server ~]# curl -s -XGET 'http://192.168.208.120:9200/_cat/nodes?human&pretty' 192.168.208.121 10 96 1 0.07 0.47 0.67 mdi - node-2 192.168.208.120 27 95 5 0.03 0.22 0.39 mdi * node-1
curl -s -XGET 'http://192.168.208.120:9200/_cat/nodes?human&pretty' |wc -l
三、备份索引
必须要有Node环境和npm软件
nodejs
npm
1.nodejs环境安装
https://nodejs.org/dist/v10.16.3/node-v10.16.3-linux-x64.tar.xz
tar xf node-v10.16.3-linux-x64.tar.xz -C /opt/node
vim /etc/profile
export PATH=/opt/node/bin:$PATH
source /etc/profile
[root@db-01 ~]# node -v
v10.16.3
[root@db-01 ~]# npm -v
6.9.0
2.指定使用国内淘宝npm源
npm install -g cnpm --registry=https://registry.npm.taobao.org
cnpm install elasticdump -g
elasticdump \
--input=http://192.168.208.120:9200/index01 \
--output=/data/index01.json \
--type=data
elasticdump \
--input=http://192.168.208.120:9200/oldzhang \
--output=$|gzip > /data/oldzhang.json.gz
elasticdump \
--input=/data/oldzhang.json \
--output=http://192.168.208.120:9200/oldzhang
6.注意
恢复的时候需要先解压缩成json格式
恢复的时候,如果已经存在相同的数据,会被覆盖掉
如果新增加的数据,则不影响,继续保留
四、分词器
安装分词器,集群所有节点都要安装
root@node1 elasticsearch]# cd /usr/share/elasticsearch/bin/
./elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v6.6.0/elasticsearch-analysis-ik-6.6.0.zip
重启所有安装了的节点
创建索引
curl -XPUT http://localhost:9200/index05
源码包7.3.0 安装启动 elasticsearch 、 filebeat配置:
1、配置使用自己的 jdk
[root@localhost]# cd /usr/local/elasticsearc/bin
[root@localhost bin]# vim elasticsearch
export JAVA_HOME=/usr/local/elasticsearch-7.3.0/jdk
export PATH=$JAVA_HOME/bin:$PATH
source "`dirname "$0"`"/elasticsearch-env
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="/usr/local/elasticsearch-7.3.0/jdk/bin/java"
else
JAVA=`which java`
fi
2、修改 vm.max_map_count
[root@localhost bin]# cat /etc/sysctl.conf
fs.file-max=65535
vm.max_map_count=655360
3、配置文件
[root@localhost config]# pwd
/usr/local/elasticsearch-7.3.0/config
[root@localhost config]# egrep -v "^$|^#" elasticsearch.yml
node.name: node-1
path.data: /data/es-data
path.logs: /var/log/elasticsearch
network.host: 10.153.105.156
http.port: 9200
discovery.seed_hosts: ["10.153.105.156",]
http.cors.allow-origin: "/.*/"
http.cors.enabled: true
启动 elasticsearch
filebeat源码安装后的配置
1 设置模块路径 , 启动相关需要监控的模块
[root@localhost filebeat]# ll
总用量 87628
drwxr-x--- 3 root root 37 4月 27 23:58 data
-rw-r--r-- 1 root root 242580 7月 25 2019 fields.yml
-rwxr-xr-x 1 root root 89144818 7月 25 2019 filebeat
-rw-r--r-- 1 root root 78871 7月 25 2019 filebeat.reference.yml
-rw------- 1 root root 474 4月 28 00:07 filebeat.yml
-rw------- 1 root root 7883 7月 25 2019 filebeat.yml.default
drwxr-xr-x 3 root root 14 7月 25 2019 kibana
-rw-r--r-- 1 root root 13675 7月 25 2019 LICENSE.txt
drwx------ 2 root root 140 4月 28 00:10 logs
drwxr-xr-x 33 root root 4096 7月 25 2019 module
drwxr-xr-x 2 root root 4096 4月 27 23:57 modules.d
-rw-r--r-- 1 root root 216284 7月 25 2019 NOTICE.txt
-rw-r--r-- 1 root root 802 7月 25 2019 README.md
[root@localhost filebeat]# vim filebeat.yml
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
reload.period: 10s
filebeat.inputs:
- type: log
enabled: false
paths:
- /var/log/messages
setup.kibana:
host: "10.153.105.156:5601"
output.elasticsearch:
hosts: ["10.153.105.156:9200"]
index: "IP156messages-%{[beat.version]}-%{+yyyy.MM}"
setup.template.name: "system"
setup.template.pattern: "system-*"
setup.template.enabled: false
setup.template.overwrite: true
[root@localhost filebeat]# ./filebeat modules enable system
启动 filebeat