首先安装docker,需要centos7的机子,centos6的百度自己折腾
#安装docker
yum install -y docker-io
systemctl start docker
进入正题,这里搭建的是ELK7.2版本
- 搭建elasticsearch
- 修改内核参数
vim /etc/sysctl.conf
vm.max_map_count = 655360
vm.swappiness = 1
- 进入data 创建目录elk
cd /data && mkdir elk - 进入elk,创建目录data logs
cd elk
mkdir data
mkdir logs - 回到data 授予elk1000:1000权限(es默认权限是1000)
cd /data
chown 1000:1000 elk -R - 接下来是证书,就不折腾了,直接上链接
链接: https://pan.baidu.com/s/1giKC... 提取码: gxye
文件上传到/data/elk/certs
- 这里搭建的是4个节点的es,在4台机子分别建目录,其中备选主节点设置个数为(节点数 / 2 + 1),我设置的是第一个节点作为数据节点,然后分别执行以下命令
docker run --name ES \
-d --net=host \
--restart=always \
--privileged=true \
--ulimit nofile=655350 \
--ulimit memlock=-1 \
--memory=16G \ #根据你服务器的内存来设置#
--memory-swap=-1 \
--volume /data:/data \
--volume /data/elk/logs:/usr/share/elasticsearch/logs \
--volume /data/elk/certs:/usr/share/elasticsearch/config/certs \
--volume /etc/localtime:/etc/localtime \
-e TERM=dumb \
-e ELASTIC_PASSWORD='changeme' \ #账号密码默认是elastic 这里设置密码changeme#
-e ES_JAVA_OPTS="-Xms8g -Xmx8g" \ #运行指定内存为服务器内存一般#
-e cluster.name="my-es" \ #自定义集群名字#
-e node.name="node-1" \ #分别设置各个节点的节点名#
-e node.master=true \ #备选主节点为true 数据节点为false#
-e node.data=true \
-e node.ingest=false \
-e node.attr.rack="0402-K03" \
-e discovery.seed_hosts="*.*.*.*,*.*.*.*,*.*.*.*,*.*.*.*" \ #4台机子的ip#
-e xpack.security.enabled=true \
-e xpack.security.transport.ssl.enabled=true \
-e xpack.security.transport.ssl.verification_mode=certificate \
-e xpack.security.transport.ssl.keystore.path="certs/elastic-certificates.p12" \
-e xpack.security.transport.ssl.truststore.path="certs/elastic-certificates.p12" \
-e xpack.monitoring.collection.enabled=true \
-e xpack.monitoring.exporters.my_local.type=local \
-e xpack.monitoring.exporters.my_local.use_ingest=false \
-e gateway.recover_after_nodes=1 \
-e network.host=*.*.*.* \ #本机ip#
-e network.publish_host=*.*.*.* \ #本机ip#
-e transport.tcp.port=9300 \
-e http.port=9200 \
-e path.data=/data/elk/data \
-e path.logs=logs \
-e bootstrap.memory_lock=true \
-e bootstrap.system_call_filter=false \
-e indices.fielddata.cache.size="25%" \
elasticsearch:7.2.0
去掉##和中间的东西 执行就可以了
- 完成以后就可以执行以下命令看一下日志了
docker logs ES -f
- 检测一下结果
curl --user elastic:changeme -XGET http://ip:9200/_cat/indices
看到这张图(图放的是双节点的图)就代表你搭建成功了,number_of_nodes代表的节点数 最后一个参数代表加载了多少
2.搭建kibana
- 搭建kibana比较简单,先拉一下镜像
docker pull kibana
- 执行命令开启服务
docker run --name kibana \
--restart=always \
-d --net=host \
-v /data:/data \
-v /etc/localtime:/etc/localtime \
--privileged \
-e TERM=dumb \
-e SERVER_HOST=0.0.0.0 \
-e SERVER_PORT=5601 \
-e SERVER_NAME=Kibana-100 \
-e ELASTICSEARCH_HOSTS=http://localhost:9200 \
-e ELASTICSEARCH_USERNAME=elastic \
-e ELASTICSEARCH_PASSWORD=changeme \
-e XPACK_MONITORING_UI_CONTAINER_ELASTICSEARCH_ENABLED=true \
-e LOG_FILE=/data/elasticsearch/logs/kibana.log \
kibana:7.2.0
- kibana搭建比较简单,接下来就做个nginx设置一下就好了,给个简单的配置
server {
listen 80;
#listen [::]:80 default_server;
#server_name ;
#root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-Nginx-Proxy true;
proxy_set_header Connection "";
proxy_pass http://127.0.0.1:5601;
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
- logstash安装
- 创建目录授权
cd /data
mkdir config
cd config
mkdir pipeline
cd /data
chown 1000:1000 config
- 在config中创建以下文件
vim log4j2.properties
logger.elasticsearchoutput.name = logstash.outputs.elasticsearch
logger.elasticsearchoutput.level = debug
vim logstash.yml
内容直接放空,wq出来就好
vim pipelines.yml
- pipeline.id: my-logstash
path.config: "/usr/share/logstash/config/pipeline/*.conf"
pipeline.workers: 3
紧接着进入pipeline目录,创建要
cd pipeline
vim redis2es.conf
input {
redis {
data_type => "list"
codec => "json"
key => "xxx"
host => "xxxxxxx"
port => 6379
password => "xxxxxx"
threads => 1
batch_count => 100
}
}
output {
elasticsearch {
hosts => ["ip1:9200"]
index => "%{srv}-%{type}-%{+yyyy.MM.dd}"
document_type => "%{type}"
user => "elastic"
password => "changme"
}
file {
path => "/usr/share/logstash/%{srv}-%{type}-%{+yyyyMMdd}.log"
gzip => true
flush_interval => 10
workers => 1
}
}
以上xxx请自行脑补
- elk数据迁移
我这里使用的是logstash,速度挺快的,配置文件如下
#logstash 输入插件
input {
elasticsearch {
hosts => [ "*.*.*.*:9200" ] #来源集群
index => "*"
user => "elastic"
password => "changeme"
}
}
#logstash 输出插件
output {
elasticsearch {
hosts => [ "*.*.*.*:9200" ] #目标集群
index => "%{[@metadata][_index]}" # 和来源索引相同
user => "elastic"
password => "changeme"
}
}