服务器 | 配置 | 服务 | 系统 | es版本 |
---|---|---|---|---|
10.100.12.144 | 16G 8core 500G磁盘 | node.master/node.data | CentOS 7.4 | es7.5 |
10.100.12.145 | 16G 8core 500G磁盘 | node.master/node.data | CentOS 7.4 | es7.5 |
10.100.12.146 | 16G 8core 500G磁盘 | node.master/node.data | CentOS 7.4 | es7.5 |
10.100.12.147 | 16G 8core 500G磁盘 | node.data | CentOS 7.4 | es7.5 |
10.100.12.148 | 16G 8core 500G磁盘 | node.data | CentOS 7.4 | es7.5 |
Linux中,每个进程默认打开的最大文件句柄数是1000,对于服务器进程来说,显然太小,通过修改/etc/security/limits.conf来增大打开最大句柄数和/etc/security/limits.d/20-nproc.conf 配置
$ vi /etc/security/limits.conf
* soft nproc 1024000
* hard nproc 1024000
$ vi /etc/security/limits.d/20-nproc.conf
* soft nproc 65535
$ cat /etc/sysctl.conf
#CTCDN系统优化参数
#关闭ipv6
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
#决定检查过期多久邻居条目
net.ipv4.neigh.default.gc_stale_time=120
#使用arp_announce / arp_ignore解决ARP映射问题
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.all.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
# 避免放大攻击
net.ipv4.icmp_echo_ignore_broadcasts = 1
# 开启恶意icmp错误消息保护
net.ipv4.icmp_ignore_bogus_error_responses = 1
#关闭路由转发
net.ipv4.ip_forward = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.send_redirects = 0
#开启反向路径过滤
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
#处理无源路由的包
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.accept_source_route = 0
#关闭sysrq功能
kernel.sysrq = 0
#core文件名中添加pid作为扩展名
kernel.core_uses_pid = 1
# 开启SYN洪水攻击保护
net.ipv4.tcp_syncookies = 1
#修改消息队列长度
kernel.msgmnb = 65536
kernel.msgmax = 65536
#设置最大内存共享段大小bytes
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
#timewait的数量,默认180000
net.ipv4.tcp_max_tw_buckets = 6000
net.ipv4.tcp_sack = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
#每个网络接口接收数据包的速率比内核处理这些包的速率快时,允许送到队列的数据包的最大数目
net.core.netdev_max_backlog = 262144
#限制仅仅是为了防止简单的DoS 攻击
net.ipv4.tcp_max_orphans = 3276800
#未收到客户端确认信息的连接请求的最大值
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_timestamps = 0
#内核放弃建立连接之前发送SYNACK 包的数量
net.ipv4.tcp_synack_retries = 1
#内核放弃建立连接之前发送SYN 包的数量
net.ipv4.tcp_syn_retries = 1
#启用timewait 快速回收
net.ipv4.tcp_tw_recycle = 1
#开启重用。允许将TIME-WAIT sockets 重新用于新的TCP 连接
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_fin_timeout = 1
#当keepalive 起用的时候,TCP 发送keepalive 消息的频度。缺省是2 小时
net.ipv4.tcp_keepalive_time = 1800
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
#允许系统打开的端口范围
net.ipv4.ip_local_port_range = 1024 65000
#修改防火墙表大小,默认65536
net.netfilter.nf_conntrack_max=655350
net.netfilter.nf_conntrack_tcp_timeout_established=1200
# 确保无人能修改路由表
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.all.secure_redirects = 0
net.ipv4.conf.default.secure_redirects = 0
vm.max_map_count = 1000000
fs.nr_open = 10000000
fs.file-max = 11000000
$ cd /usr/local/src
$ wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.5.0-x86_64.rpm
$ wget https://artifacts.elastic.co/downloads/kibana/kibana-7.5.0-x86_64.rpm
## JDK建议13版本
$ https://www.oracle.com/technetwork/java/javase/downloads/jdk13-downloads-5672538.html
$ for i in 10.100.12.145 10.100.12.146 10.100.12.147 10.100.12.148 ;do scp /usr/local/src/jdk-13.0.1_linux-x64_bin.rpm $i:/usr/local/src/ ;done
$ for i in 10.100.12.145 10.100.12.146 10.100.12.147 10.100.12.148 ;do scp /usr/local/src/elasticsearch-7.5.0-x86_64.rpm $i:/usr/local/src/ ;done
各个节点安装
$ yum localinstall -y /usr/local/src/jdk-13.0.1_linux-x64_bin.rpm
$ yum localinstall -y /usr/local/src/elasticsearch-7.5.0-x86_64.rpm
验证jdk
$ java -version
java version "13.0.1" 2019-10-15
Java(TM) SE Runtime Environment (build 13.0.1+9)
Java HotSpot(TM) 64-Bit Server VM (build 13.0.1+9, mixed mode, sharing)
配置es (使用SSL)
## master节点生成证书
$ cd /usr/share/elasticsearch/
$ ../bin/elasticsearch-certutil ca ##生成ca证书 保存elastic-stack-ca.p12路径并输入密码(123qwe123)
$../bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12 ##生成客户端证书
保存elastic-certificates.p12路径并输入密码(123qwe123)
将elastic-stack-ca.p12 拷贝到各个节点的/etc/elasticsearch/下(必须在此目录下的任意一层目录)
$ for i in 10.100.12.144 10.100.12.145 10.100.12.146 10.100.12.147 10.100.12.148 ;do scp /usr/share/elasticsearch/elastic-* $i:/etc/elasticsearch/;done
elasticsearch.yml中增加配置
$ egrep -v '^#' /etc/elasticsearch/elasticsearch.yml
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /etc/elasticsearch/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /etc/elasticsearch/elastic-certificates.p12
所有elasticsearch节点将密码添加至elasticsearch-keystore(密码123qwe123)
$ bin/elasticsearch-keystore add xpack.security.transport.ssl.keystore.secure_password
$ bin/elasticsearch-keystore add xpack.security.transport.ssl.truststore.secure_password
最终3个master节点配置如下(3个节点不同在于node.name)
cluster.name: cluster-7.5.0
node.name: node-1
node.master: true
node.data: true
path.data: /home/work/elasticsearch/data
path.logs: /home/work/elasticsearch/logs
http.port: 9200
network.host: 0.0.0.0
network.publish_host: 10.100.12.144
discovery.seed_hosts: ["10.100.12.144:9300", "10.100.12.145:9300", "10.100.12.146:9300"]
cluster.initial_master_nodes: ["10.100.12.144","10.100.12.145","10.100.12.146"]
discovery.zen.minimum_master_nodes: 2
discovery.zen.ping_timeout: 60s # 心跳超时时间
discovery.zen.fd.ping_interval: 120s # 节点检测时间
discovery.zen.fd.ping_timeout: 120s #ping 超时时间
discovery.zen.fd.ping_retries: 3 # 心跳重试次数
gateway.recover_after_nodes: 4
gateway.recover_after_time: 10m
gateway.expected_nodes: 5
bootstrap.system_call_filter: false
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /etc/elasticsearch/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /etc/elasticsearch/elastic-certificates.p12
2个数据节点(因为是数据节点不参与选举master,去掉node.master: true配置项,注意更改node.name)
$ egrep -v '^#' /etc/elasticsearch/elasticsearch.yml
cluster.name: cluster-7.5.0
node.name: node-4 ##注意更改
node.master: false
node.data: true
path.data: /home/work/elasticsearch/data
path.logs: /home/work/elasticsearch/logs
http.port: 9200
network.host: 0.0.0.0
network.publish_host: 10.100.12.147
discovery.seed_hosts: ["10.100.12.144:9300", "10.100.12.145:9300", "10.100.12.146:9300"]
cluster.initial_master_nodes: ["10.100.12.144","10.100.12.145","10.100.12.146"]
discovery.zen.minimum_master_nodes: 2
discovery.zen.ping_timeout: 60s # 心跳超时时间
discovery.zen.fd.ping_interval: 120s # 节点检测时间
discovery.zen.fd.ping_timeout: 120s #ping 超时时间
discovery.zen.fd.ping_retries: 3 # 心跳重试次数
gateway.recover_after_nodes: 4
gateway.recover_after_time: 10m
gateway.expected_nodes: 5
bootstrap.system_call_filter: false
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.security.enabled: true ## 开启x-pack
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /etc/elasticsearch/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /etc/elasticsearch/elastic-certificates.p12
启动es不可使用root权限,需创建普通用户,安装es时候系统已经默认新增了elasticsearch组和用户,但是 是nologin的,还需要手动创建普通用户。
##创建数据目录
# mkdir /home/work/elasticsearch/{data,logs} -p
## 创建普通账户
# useradd elastic -G elasticsearch
# echo elastic |passwd elastic --stdin
## 授权
# chown -R elastic:elasticsearch /usr/share/elasticsearch/
# chown -R elastic:elasticsearch /etc/elasticsearch/
# chown -R elastic:elasticsearch /home/work/elasticsearch/
所有节点启动es
$ systemctl enable elasticsearch.service
$ systemctl start elasticsearch.service
稍等一会 选举出master
配置用户密码
[elastic@khbq-es01 elasticsearch-7.6.2_master]$ ./bin/elasticsearch-setup-passwords interactive
Initiating the setup of passwords for reserved users elastic,apm_system,kibana,logstash_system,beats_system,remote_monitoring_user.
You will be prompted to enter passwords as the process progresses.
Please confirm that you would like to continue [y/N]y
Enter password for [elastic]:
Reenter password for [elastic]:
Enter password for [apm_system]:
Reenter password for [apm_system]:
Enter password for [kibana]:
Reenter password for [kibana]:
Enter password for [logstash_system]:
Reenter password for [logstash_system]:
Enter password for [beats_system]:
Reenter password for [beats_system]:
Enter password for [remote_monitoring_user]:
Reenter password for [remote_monitoring_user]:
Changed password for user [apm_system]
Changed password for user [kibana]
Changed password for user [logstash_system]
Changed password for user [beats_system]
Changed password for user [remote_monitoring_user]
Changed password for user [elastic]
$ curl --user elastic:xx 'localhost:9200/_cat/master?v'
id host ip node
pMpl1On4TAmRlxUjm6IKcA 10.100.12.145 10.100.12.145 node-2
$ curl --user elastic:xx 'localhost:9200/_cat/nodes?v'
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
10.100.12.147 49 98 14 1.39 1.53 1.31 dilm - node-4
10.100.12.144 66 98 20 3.26 2.19 1.69 dilm - node-1
10.100.12.146 38 98 11 2.83 2.06 1.71 dilm - node-3
10.100.12.148 29 98 18 1.27 1.23 1.23 dilm - node-5
10.100.12.145 68 98 23 2.11 2.22 2.06 dilm * node-2
通过以上步骤es集群安装完成。
es7版本已经自带x-pack,无需自行安装,通过配置xpack.security.enabled: true 来开启x-pack ,kibana来展示x-pack安全组件,下一章节安装kibana。
集群挂掉后会找不到master节点,更新es配置如下(去掉了证书配置):
cluster.name: cluster-7.5.0
node.name: node-1
node.master: true
node.data: true
path.data: /home/work/elasticsearch/data_2020021113
path.logs: /home/work/elasticsearch/logs_20200213
http.port: 9200
network.host: 0.0.0.0
discovery.zen.ping.unicast.hosts: ["10.100.12.144", "10.100.12.145", "10.100.12.146"]
discovery.zen.minimum_master_nodes: 2
http.cors.enabled: true
http.cors.allow-origin: "*"
模版
PUT _template/logstash-kafka
{
"order" : 6,
"version" : 60001,
"index_patterns" : [
"logstash-kafka*"
],
"settings" : {
"index" : {
"refresh_interval" : "60s",
"number_of_shards" : "5",
"analysis.analyzer.default.type": "ik_max_word",
"translog" : {
"sync_interval" : "60s",
"durability" : "async"
},
"number_of_replicas" : "1"
}
},
"mappings" : {
"dynamic_templates" : [
{
"message_field" : {
"path_match" : "message",
"mapping" : {
"norms" : false,
"type" : "text"
},
"match_mapping_type" : "string"
}
},
{
"string_fields" : {
"mapping" : {
"norms" : false,
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"match_mapping_type" : "string",
"match" : "*"
}
}
],
"properties" : {
"@timestamp" : {
"type" : "date"
},
"geoip" : {
"dynamic" : true,
"properties" : {
"ip" : {
"type" : "ip"
},
"latitude" : {
"type" : "half_float"
},
"location" : {
"type" : "geo_point"
},
"longitude" : {
"type" : "half_float"
}
}
},
"@version" : {
"type" : "keyword"
}
}
},
"aliases" : { }
}
其他操作
PUT _all/_settings
{
"index":{
"max_result_window":10000000
}
}
PUT _cluster/settings
{
"persistent": {
"search.max_buckets": 10000000
}
}
PUT _snapshot/snapshotPro
{
"type": "fs",
"settings": {
"location": "/home/work/snapshot",
"compress": true
}
}
GET _snapshot
GET _all/_settings
GET /_snapshot
GET /_snapshot/
GET _cluster/settings
GET _template/lgb
GET /_cluster/settings
GET /_cat/aliases
GET _template/logstash-log4j
GET _template/logstash
GET _nodes/10.100.12.144/hot_threads
GET _template/logstash-kafka
GET /_cluster/settings
GET _template/logstash-kafka
PUT _cluster/settings?pretty
PUT _cluster/settings?pretty
{
"persistent": {
"cluster": {
"routing": {
"allocation": {
"allow_rebalance": "indices_primaries_active",
"cluster_concurrent_rebalance": "8",
"node_concurrent_recoveries": "8",
"enable": "all"
}
}
},
"indices": {
"store": {
"throttle": {
"max_bytes_per_sec": "50mb"
}
}
}
},
"transient": {}
}
GET /_cat/templates
GET /_cat/thread_pool?v
GET /_cat/nodes?v
GET /_cat/allocation?v
GET /_cat/
GET /_cat/master?v
GET /_stats/fielddata?fields=*
GET /_nodes/stats/indices/fielddata?fields=*
GET /_nodes/stats/indices/fielddata?level=indices&fields=*
GET /_cluster/health?pretty=true
GET _template/tykh
GET /_cluster/state
GET /_cluster/settings
GET /_nodes/10.100.20.111
GET /_all/_settings
GET _cat/health?v
PUT _cluster/settings
{
"persistent":{
"search.max_buckets": 100000
}
}
POST /_analyze
{
"text": "我爱北京天安门",
"analyzer": "ik_max_word"
}
GET _template/template_default
GET _cluster/settings
PUT _cluster/settings
{
"transient" : {
"cluster.routing.allocation.enable" : "all"
}
}
GET _analyze?pretty
{
"analyzer":"ik_smart",
"text":"中国人民警察的服务宗旨"
}
GET logstash-kafka-htjf-filebeat-htjf-econtract-jf-2019-12/_analyze
{
"field": "text",
"text": "中国人民警察的服务宗旨"
}
GET blog
PUT blog/article/
{
"title": "中国人民警察的服务宗旨",
"content": "Version 1.0 released today!",
"priority": 10,
"tags": ["announce", "elasticsearch", "release"]
}
POST _setting/cluster
{
"search.max_buckets": "100000"
}
POST _analyze
{
"analyzer": "ik_smart",
"text": "5号电池"
}
POST _analyze
{
"analyzer": "ik_max_word",
"text": "5号电池"
}