IP | 分类 |
---|---|
192.168.20.51 | elasticsearch1 & kibana |
192.168.20.52 | elasticsearch2 |
192.168.20.53 | elasticsearch3 |
在最后添加一行
sudo vim /etc/sysctl.conf
vm.max_map_count=655360
执行并生效
sudo sysctl -p
sudo sysctl -a | grep max_map_count
编辑elasticsearch.yml文件
cluster.name: es-cluster
node.name: node1
node.master: true
node.data: true
bootstrap.memory_lock: true
network.host: 192.168.20.51
http.port: 9200
discovery.seed_hosts: ["192.168.20.52","192.168.20.53"]
cluster.initial_master_nodes: ["node1","node2","node3"]```
编辑kibana.yml文件
server.host: "192.168.20.51"
elasticsearch.hosts: ["http://192.168.20.51:9200","http://192.168.20.52:9200","http://192.168.20.53:9200"]
i18n.locale: "zh-CN"
编辑docker-compose.yml启动
version: '2.2'
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.15.2
container_name: es01
hostname: elastic
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /data/es_data:/usr/share/elasticsearch/data
- /data/es_logs:/usr/share/elasticsearch/logs
- ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
ports:
- 9200:9200
- 9300:9300
network_mode: "host"
kibana:
image: docker.elastic.co/kibana/kibana:7.15.2
container_name: kibana
hostname: kibana
restart: always
volumes:
- ./kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- 5601:5601
network_mode: "host"
depends_on:
- es01
注意:映射目录要创建,权限修改
mkdir /data/es_data /data/es_logs
启动
docker-compose up -d
编辑elasticsearch.yml
cluster.name: es-cluster
node.name: node2
node.master: true
node.data: true
bootstrap.memory_lock: true
network.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: ["192.168.20.51","192.168.20.53"]
cluster.initial_master_nodes: ["node1","node2","node3"]
编辑docker-compose.yml
version: '2.2'
services:
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:7.15.2
container_name: es02
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /data/es_data:/usr/share/elasticsearch/data
- /data/es_logs:/usr/share/elasticsearch/logs
- ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
ports:
- 9200:9200
- 9300:9300
network_mode: "host"
注意:映射目录要创建,权限修改
mkdir /data/es_data /data/es_logs
启动
docker-compose up -d
编辑elasticsearch.yml
cluster.name: es-cluster
node.name: node3
node.master: true
node.data: true
bootstrap.memory_lock: true
network.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: ["192.168.20.51","192.168.20.52"]
cluster.initial_master_nodes: ["node1","node2","node3"]
编辑docker-compose.yml
version: '2.2'
services:
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:7.15.2
container_name: es03
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /data/es_data:/usr/share/elasticsearch/data
- /data/es_logs:/usr/share/elasticsearch/logs
- ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
ports:
- 9200:9200
- 9300:9300
network_mode: "host"
注意:映射目录要创建,权限修改
mkdir /data/es_data /data/es_logs
启动
docker-compose up -d
浏览器查看
192.168.20.51:9200
192.168.20.52:9200
192.168.20.53:9200
192.168.20.51:5601
编辑filebeat.docker.yml
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
filebeat.inputs:
- type: log
paths:
- /log/syslog
exclude_lines: ['sda']
filebeat.autodiscover:
providers:
- type: docker
hints.enabled: true
setup.template.settings:
index.number_of_shards: 3
processors:
- add_cloud_metadata: ~
- decode_json_fields:
fields: ['message']
target: ''
overwrite_keys: true
output.elasticsearch:
hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'
username: '${ELASTICSEARCH_USERNAME:}'
password: '${ELASTICSEARCH_PASSWORD:}'
filebeat脚本filebeat.sh
#!/bin/bash
docker run -d \
-v /var/log/:/log/ \
-v /data/filebeat_registry:/usr/share/filebeat/data/registry/ \
-h filebeat \
--name=filebeat \
--user=root \
--volume="$(pwd)/filebeat.docker.yml:/usr/share/filebeat/filebeat.yml:ro" \
--volume="/var/lib/docker/containers:/var/lib/docker/containers:ro" \
--volume="/var/run/docker.sock:/var/run/docker.sock:ro" \
docker.elastic.co/beats/filebeat:7.15.2 filebeat -e -strict.perms=false \
-E output.elasticsearch.hosts=["192.168.20.51:9200","192.168.20.52:9200","192.168.20.53:9200"]
启动filebeat.sh
./filebeat.sh
每个es节点的elasticsearch.yml文件添加开启安全功能
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
重新启动elasticsearch
任意的es节点,设置内置账户密码
./bin/elasticsearch-setup-passwords interactive
我这里全部设置密码为123456,将interactive替换为auto,随机生成密码
更改kibana的连接密码,编辑kibana.yml文件
server.host: "192.168.20.51"
elasticsearch.hosts: ["http://192.168.20.51:9200","http://192.168.20.52:9200","http://192.168.20.53:9200"]
elasticsearch.username: "kibana_system"
elasticsearch.password: "123456"
i18n.locale: "zh-CN"
这里方便重启kibana,从docker-compose中剥离出kibana
编写**kibana.sh **启动脚本
#!/bin/bash
kibana_env="-v $(pwd)/kibana.yml:/usr/share/kibana/config/kibana.yml"
docker run -d $kibana_env --network=host --name kibana -h kibana docker.elastic.co/kibana/kibana:7.15.2
启动kibana
chmod +x kibana.sh
./kibana.sh
浏览器访问的时候需要账户密码,用户elastic为最高权限
在任意一个es节点上,生成CA证书,还可以为该证书设置密码,这里我不设置,直接回车
./bin/elasticsearch-certutil ca
为集群中的节点生成证书和私钥,证书无密码,直接回车就好
./bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12
将证书复制到每个es节点的**/data/certs**下
mkdir /data/certs
这里相比之前就增加一行挂载证书目录
version: '2.2'
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.15.2
container_name: es01
hostname: elastic
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /data/certs:/usr/share/elasticsearch/config/certs
- /data/elk_data:/usr/share/elasticsearch/data
- /data/elk_logs:/usr/share/elasticsearch/logs
- ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
ports:
- 9200:9200
- 9300:9300
network_mode: "host"
每个es节点都同样操作
每个es节点的elasticsearch.yml都增加以下内容
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.client_authentication: required
xpack.security.transport.ssl.keystore.path: certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12
重新所有的es节点
docker restart es01
在任意的es节点
./bin/elasticsearch-certutil http
Generate a CSR? [y/N]n
Use an existing CA? [y/N]y
Password for elastic-stack-ca.p12:
For how long should your certificate be valid? [5y] 5y
Generate a certificate per node? [y/N]y
node #1 name: es-cluster
Is this correct [Y/n]y
When you are done, press <ENTER> once more to move on to the next step.
192.168.20.51
192.168.20.52
192.168.20.53
Is this correct [Y/n]y
Do you wish to change any of these options? [y/N]n
Generate additional certificates? [Y/n]n
What filename should be used for the output zip file? [/usr/share/elasticsearch/elasticsearch-ssl-http.zip]
cp elasticsearch/http.p12 config/certs/
cp kibana/elasticsearch-ca.pem config/certs/
把这两个证书复制到每个es节点
每个es节点的elasticsearch.yml都增加以下内容
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.keystore.path: certs/http.p12
将私钥的密码添加到 Elasticsearch 中的安全设置中。
./bin/elasticsearch-keystore add xpack.security.http.ssl.keystore.secure_password
重启所有的es节点
在kibana.yml文件增加内容
# 修改为https的方式连接
elasticsearch.hosts: ["https://192.168.20.51:9200","https://192.168.20.52:9200","https://192.168.20.53:9200"]
xpack.security.encryptionKey: "something_at_least_32_characters"
xpack.encryptedSavedObjects.encryptionKey: "encryptedSavedObjects12345678909876543210"
elasticsearch.ssl.certificateAuthorities: /usr/share/kibana/config/certs/elasticsearch-ca.pem
编写**kibana.sh **,增加证书挂载目录
#!/bin/bash
kibana_env="-v $(pwd)/kibana.yml:/usr/share/kibana/config/kibana.yml -v /data/certs:/usr/share/kibana/config/certs"
docker run -d $kibana_env --network=host --name kibana -h kibana docker.elastic.co/kibana/kibana:7.15.2
启动kibana
./kibana.sh
更改**filebeat.docker.yml **
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
filebeat.inputs:
- type: log
paths:
- /log/syslog
# exclude_lines: ['sda']
filebeat.autodiscover:
providers:
- type: docker
hints.enabled: true
setup.template.settings:
index.number_of_shards: 3
processors:
- add_cloud_metadata: ~
- decode_json_fields:
fields: ['message']
target: ''
overwrite_keys: true
output.elasticsearch:
protocol: 'https'
hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'
username: 'elastic'
password: '123456'
ssl:
certificate_authorities: ["certs/elasticsearch-ca.pem"]
verification_mode: "certificate"
更新filebeat.sh
#!/bin/bash
docker run -d \
-v /var/log/:/log/ \
-v /data/filebeat_registry:/usr/share/filebeat/data/registry/ \
-v /data/certs:/usr/share/filebeat/certs/ \
-h filebeat \
--name=filebeat \
--user=root \
--volume="$(pwd)/filebeat.docker.yml:/usr/share/filebeat/filebeat.yml:ro" \
--volume="/var/lib/docker/containers:/var/lib/docker/containers:ro" \
--volume="/var/run/docker.sock:/var/run/docker.sock:ro" \
docker.elastic.co/beats/filebeat:7.15.2 filebeat -e -strict.perms=false \
-E output.elasticsearch.hosts=["192.168.20.51:9200","192.168.20.52:9200","192.168.20.53:9200"]
启动 filebeat.sh,浏览器kibana查看日志