目录
一.docker安装ES(单机版)
1.设置max_map_count不能启动es会启动不起来(因虚拟内存太少导致)
2.下载镜像并运行
3.浏览器访问ip:9200 如果出现以下界面就是安装成功
4.可装可不装部分:
二.docker安装ES(集群版)
1.真集群版本
2.伪集群版本
三.ES常用API
一.基础api
四.Java操作es
1.引入依赖
2.实例化http请求对象
3.es中的curd
转载自:docker安装elasticsearch(最详细版)_bright的博客-CSDN博客_docker安装elasticsearch
cat /proc/sys/vm/max_map_count
sysctl -w vm.max_map_count=262144
#拉取镜像
docker pull elasticsearch:7.7.0
#启动镜像
docker run --name elasticsearch -d -e ES_JAVA_OPTS="-Xms512m -Xmx512m" -e "discovery.type=single-node" -p 9200:9200 -p 9300:9300 elasticsearch:7.7.0
--name表示镜像启动后的容器名称
-d: 后台运行容器,并返回容器ID;
-e: 指定容器内的环境变量
-p: 指定端口映射,格式为:主机(宿主)端口:容器端口
1.安装elasticsearch-head(es图形界面)
#拉取镜像
docker pull mobz/elasticsearch-head:5
#创建容器
docker run -d --name elasticsearch-head -p 9100:9100 mobz/elasticsearch-head:5
2.浏览器打开: http://IP:9100
尝试连接easticsearch会发现无法连接上,由于是前后端分离开发,所以会存在跨域问题,需要在服务端做CORS的配置,解决方法:
docker exec -it elasticsearch /bin/bash (进不去使用容器id进入)
vi config/elasticsearch.yml
#在最下面添加2行
http.cors.enabled: true
http.cors.allow-origin: "*"-
退出并重启es服务
3.安装ik分词器
这里采用离线安装
下载地址:
https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.7.0/elasticsearch-analysis-ik-7.7.0.zip
将IK分词器上传到/tmp目录中
#将压缩包移动到容器中
docker cp /tmp/elasticsearch-analysis-ik-7.7.0.zip elasticsearch:/usr/share/elasticsearch/plugins
#进入容器
docker exec -it elasticsearch /bin/bash
#创建目录
mkdir /usr/share/elasticsearch/plugins/ik
#将文件压缩包移动到ik中
mv /usr/share/elasticsearch/plugins/elasticsearch-analysis-ik-7.7.0.zip /usr/share/elasticsearch/plugins/ik
#进入目录
cd /usr/share/elasticsearch/plugins/ik
#解压
unzip elasticsearch-analysis-ik-7.7.0.zip
#删除压缩包
rm -rf elasticsearch-analysis-ik-7.7.0.zip
退出并重启容器
转载自:docker配置搭建elasticsearch集群 - 自然洒脱 - 博客园
1.三台服务器
2.修改每台主机的内核参数vm.max_map_count
3.编辑docker-compose.yaml文件
version: '3'
services:
elasticsearch: # 服务名称
image: elasticsearch:7.7.0 # 使用的镜像
container_name: elasticsearch # 容器名称
restart: always # 失败自动重启策略
environment:
- node.name=node-130 # 节点名称,集群模式下每个节点名称唯一
- network.publish_host=192.168.81.130 # 用于集群内各机器间通信,对外使用,其他机器访问本机器的es服务,一般为本机宿主机IP
- network.host=0.0.0.0 # 设置绑定的ip地址,可以是ipv4或ipv6的,默认为0.0.0.0,即本机
- discovery.seed_hosts=192.168.81.130,192.168.81.131,192.168.81.132 # es7.0之后新增的写法,写入候选主节点的设备地址,在开启服务后,如果master挂了,哪些可以被投票选为主节点
- cluster.initial_master_nodes=192.168.81.130,192.168.81.131,192.168.81.132 # es7.0之后新增的配置,初始化一个新的集群时需要此配置来选举master
- cluster.name=es-cluster # 集群名称,相同名称为一个集群, 三个es节点须一致
# - http.cors.enabled=true # 是否支持跨域,是:true // 这里设置不起作用,但是可以将此文件映射到宿主机进行修改,然后重启,解决跨域
# - http.cors.allow-origin="*" # 表示支持所有域名 // 这里设置不起作用,但是可以将此文件映射到宿主机进行修改,然后重启,解决跨域
- bootstrap.memory_lock=true # 内存交换的选项,官网建议为true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m" # 设置内存,如内存不足,可以尝试调低点
ulimits: # 栈内存的上限
memlock:
soft: -1 # 不限制
hard: -1 # 不限制
volumes:
- ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml # 将容器中es的配置文件映射到本地,设置跨域, 否则head插件无法连接该节点
- ./data:/usr/share/elasticsearch/data # 存放数据的文件, 注意:这里的esdata为 顶级volumes下的一项。
ports:
- 9200:9200 # http端口,可以直接浏览器访问
- 9300:9300 # es集群之间相互访问的端口,jar之间就是通过此端口进行tcp协议通信,遵循tcp协议。
4.编辑elasticsearch.yml配置文件
network.host: 0.0.0.0
http.cors.enabled: true # 是否支持跨域
http.cors.allow-origin: "*" # 表示支持所有域名
5.初次运行的时候,注释掉compose文件中的volume,需要先运行起来,然后将容器中的data目录cp到宿主机上,否则报错
#注释掉挂载后先执行
docker-compose up -d
#然后执行
docker cp elasticsearch:/usr/share/elasticsearch/data .
#删除容器,重新执行docker-compose
6.访问web界面http://192.168.81.132:9200/_cluster/health?pretty
7.中文分词器
FROM elasticsearch:7.7.0
ADD https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.7.0/elasticsearch-analysis-ik-7.7.0.zip /usr/share/elasticsearch/plugins/ik/
RUN cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
&& echo 'Asia/Shanghai' > /etc/timezone \
&& cd /usr/share/elasticsearch/plugins/ik/ \
&& unzip elasticsearch-analysis-ik-7.7.0.zip \
&& rm -f elasticsearch-analysis-ik-7.7.0.zip
执行docker build .
8.curl命令对es的操作示例:
获取:curl -XGET 'http://localhost:9200/odin_device_device_collection' 删:curl -XDELETE 'http://localhost:9200/odin_device_device_collection' 插:curl -XPUT 'http://localhost:9200/odin_device_device_collection' -H 'Content-Type: application/json' -d '数据内容' 查:curl -XGET "http://localhost:9200/odin_device_device_collection/_doc/_search" -H 'Content-Type: application/json' -d'{"query": {"match_all": {}},"from": 0,"size": 300}' 清空:curl -XPOST "http://localhost:9200/odin_device_device_collection/_doc/_delete_by_query" -H 'Content-Type: application/json' -d'{"query": {"match_all": {}}}'
version: '3'
services:
elasticsearch_n0:
image: elasticsearch:7.7.0
container_name: elasticsearch_n0
privileged: true
environment:
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- $PWD/node0-elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- $PWD/data/node0:/usr/share/elasticsearch/data
- $PWD/logs/node0:/usr/share/elasticsearch/logs
ports:
- 9200:9200
- 9300:9300
elasticsearch_n1:
image: elasticsearch:7.7.0
container_name: elasticsearch_n1
privileged: true
environment:
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- $PWD/node1-elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- $PWD/data/node1:/usr/share/elasticsearch/data
- $PWD/logs/node1:/usr/share/elasticsearch/logs
ports:
- 9201:9200
- 9301:9301
elasticsearch_n2:
image: elasticsearch:7.7.0
container_name: elasticsearch_n2
privileged: true
environment:
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- $PWD/node2-elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- $PWD/data/node2:/usr/share/elasticsearch/data
- $PWD/logs/node2:/usr/share/elasticsearch/logs
ports:
- 9202:9200
- 9302:9302
kibana:
image: kibana:7.7.0
container_name: kibana
environment:
- SERVER_NAME=kibana
- ELASTICSEARCH_HOSTS=["http://elasticsearch_n0:9200","http://elasticsearch_n1:9201","http://elasticsearch_n2:9202"]
- XPACK_MONITORING_ENABLED=true
ports:
- 5601:5601
depends_on:
- elasticsearch_n0
#集群名称,三台集群,要配置相同的集群名称!!!
cluster.name: es-cluster
#节点名称
node.name: node0
#是不是有资格主节点
node.master: true
#是否存储数据
node.data: true
#最⼤集群节点数
node.max_local_storage_nodes: 3
#⽹关地址
network.host: 0.0.0.0
#端⼝
http.port: 9200
#内部节点之间沟通端⼝
transport.tcp.port: 9300
#es7.x 之后新增的配置,写⼊候选主节点的设备地址,在开启服务后可以被选为主节点
discovery.seed_hosts: ["192.168.10.211:9300","192.168.10.211:9301","192.168.10.211:9302"]
#es7.x 之后新增的配置,初始化⼀个新的集群时需要此配置来选举master
#数据和存储路径
cluster.initial_master_nodes: ["node0", "node1", "node2"]
http.cors.allow-origin: "*"
http.cors.enabled: true
http.cors.allow-credentials: true
其他 node1-elasticsearch.yml, node2-elasticsearch.yml 内容一样,如
#集群名称,三台集群,要配置相同的集群名称!!!
cluster.name: es-cluster
#节点名称
node.name: node1
#是不是有资格主节点
node.master: true
#是否存储数据
node.data: true
#最⼤集群节点数
node.max_local_storage_nodes: 3
#⽹关地址
network.host: 0.0.0.0
#端⼝
http.port: 9201
#内部节点之间沟通端⼝
transport.tcp.port: 9301
#es7.x 之后新增的配置,写⼊候选主节点的设备地址,在开启服务后可以被选为主节点
discovery.seed_hosts: ["192.168.10.211:9300","192.168.10.211:9301","192.168.10.211:9302"]
#es7.x 之后新增的配置,初始化⼀个新的集群时需要此配置来选举master
#数据和存储路径
cluster.initial_master_nodes: ["node0", "node1", "node2"]
http.cors.allow-origin: "*"
http.cors.enabled: true
http.cors.allow-credentials: true
1.启动
chmod -R 777 /apps/es-cluster/
docker-compose -f es-cluster-compose.yml up -d
2.使用es-head 插件查看
1.查看ES集群的健康状况
curl localhost:9200/_cluster/health?pretty
{
"cluster_name" : "es",
"status" : "yellow", 当前集群状态
"timed_out" : false,
"number_of_nodes" : 3, 当前集群在线的节点个数为3
"number_of_data_nodes" : 3, 在线的数据节点数
"active_primary_shards" : 16055, 活跃的主分片数量
"active_shards" : 32107, 活跃的分片数量,包括主分片和副本。
"relocating_shards" : 0, 正在移动的分片数量
"initializing_shards" : 0, 正初初始化的分片个数为:0
"unassigned_shards" : 3, 未分配的分片个数为:3
"delayed_unassigned_shards" : 0, 由于节点离线导致延迟分配的分片数量
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 99.99065711616318 所有活跃分片/打开的所有索引的分片总数
}
2.查看ES的设置
curl localhost:9200/_cluster/settings?pretty
{
"persistent" : { // 永久设置,重启仍然有效
"action" : {
"auto_create_index" : ".security,.monitoring-*,.watch*,.triggered_watches,.quota,noah*,basp*",
"destructive_requires_name" : "false"
}
},
"transient" : { } // 临时设置,重启失效
}
3.动态设置参数
临时生效:transient修改方法为:curl -XPUT 'http://localhost:9200/_cluster/settings?pretty' -d '{"transient":{"dynamic.parma":"value"}}'
永久生效:persistent修改方法为:curl -XPUT 'http://localhost:9200/_cluster/settings?pretty' -d '{"persistent":{"dynamic.parma":"value"}}'
4.查看ES在线的节点(存在节点缺失的情况可用该命令查看缺失节点为哪些)
curl localhost:9200/_cat/nodes?v
5.查看ES的主节点
curl localhost:9200/_cat/master curl localhost:9200/_cat/master?v // 加上 ?v 将显示字段名.
6.查看所有索引
curl localhost:9200/_cat/indices curl localhost:9200/_cat/indices?v // 加上 ?v 将显示字段名.
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
green open java_index jkI8Nh7kSseKq-NV6afXsw 1 1 4 0 9.4kb 4.1kb
green open java_index_test T-PQDsnETYWGdlCzlFeNng 1 1 0 0 416b 208b
green open .apm-custom-link E7lkF1s5QZSByYjzbeqK7Q 1 1 0 0 416b 208b
green open .kibana_task_manager_1 N6p03AdHToKq9AC7vk4ctg 1 1 5 3 57.9kb 33.9kb
green open .apm-agent-configuration -tfJPaTfTX2c4-wYCBJjXg 1 1 0 0 416b 208b
green open .kibana_1 HK6kygrtSViEEn9s4KAkjw 1 1 4 0 61.6kb 30.8kb
7.查看具体某个索引
curl localhost:9200/_cat/indices/{index} curl localhost:9200/_cat/indices/{index}?v // 加上 ?v 将显示字段名.
curl -sXGET localhost:9200/_cat/indices/zsy*?v // 查询前缀是zsy的所有索引
8.查询索引的分片情况
curl localhost:9200/_cat/shards?v
index shard prirep state docs store ip node
.kibana_1 0 p STARTED 4 30.8kb 172.19.0.4 node0
.kibana_1 0 r STARTED 4 30.8kb 172.19.0.2 node1
.apm-agent-configuration 0 p STARTED 0 208b 172.19.0.3 node2
.apm-agent-configuration 0 r STARTED 0 208b 172.19.0.2 node1
java_index 0 p STARTED 4 4.1kb 172.19.0.4 node0
java_index 0 r STARTED 4 5.2kb 172.19.0.3 node2
.kibana_task_manager_1 0 r STARTED 5 24kb 172.19.0.3 node2
.kibana_task_manager_1 0 p STARTED 5 33.9kb 172.19.0.2 node1
java_index_test 0 p STARTED 0 208b 172.19.0.4 node0
java_index_test 0 r STARTED 0 208b 172.19.0.2 node1
.apm-custom-link 0 p STARTED 0 208b 172.19.0.4 node0
.apm-custom-link 0 r STARTED 0 208b 172.19.0.3 node2
9.查询指定索引的分片情况
curl localhost:9200/_cat/shards/{index}?v
10.查看segments内存占用情况
curl -sXGET "http://localhost:9200/_cat/nodes?h=name,segments.memory,heap.max&v"
11.查看线程池
curl -sXGET "http://localhost:9200/_cat/thread_pool?v"
12.查看ES集群线程池状态
#此处的search是刚才查看线程池中的name curl -sXGET "http://localhost:9200/_cat/thread_pool/search?v"
如果集群存在入库有延迟的情况,执行thread_poolAPI,如果reject>0,说明集群的处理能力低于入库请求,请求业务方降低入库速率。
node_name name active queue rejected es.***.0 search 4 0 0 es.***.0 search 61 977 4326 es.***.1 search 61 495 3497
如果reject>0,说明集群的处理能力低于查询请求,需要降低查询速率。
13.打开指定索引
curl -XPOST "http://localhost:9200/{index}/_open"
14.关闭指定索引
curl -XPOST "http://localhost:9200/{index}/_close"
org.springframework.boot
spring-boot-starter-data-elasticsearch
@Bean
public RestHighLevelClient restHighLevelClient(){
RestHighLevelClient restHighLevelClient = new RestHighLevelClient(
RestClient.builder(
//ES集群的相关信息,如果有多个就配置多个
new HttpHost("192.168.10.211",9200,"http"),
new HttpHost("192.168.10.211",9201,"http"),
new HttpHost("192.168.10.211",9202,"http")
)
);
return restHighLevelClient;
}
@Test
void testCreate() throws IOException {
//创建索引请求
CreateIndexRequest javaIndex = new CreateIndexRequest("java_index");
//客户端执行请求创建索引
CreateIndexResponse createIndexResponse =restHighLevelClient.indices().create(javaIndex, RequestOptions.DEFAULT);
}
@Test
void testGet() throws IOException {
GetIndexRequest javaIndex = new GetIndexRequest("java_index");
Boolean bool = restHighLevelClient.indices().exists(javaIndex,RequestOptions.DEFAULT);
System.out.println(bool);
}
@Test
void testDelete() throws IOException {
DeleteIndexRequest javaIndex = new DeleteIndexRequest("java_index");
AcknowledgedResponse delete = restHighLevelClient.indices().delete(javaIndex,RequestOptions.DEFAULT);
System.out.println(delete.isAcknowledged());
}
//测试添加文档
@Test
void addDocument() throws IOException {
User user = new User();
user.setName("zsy");
user.setAge(26);
//创建请求
IndexRequest javaIndex = new IndexRequest("java_index");
//填充规则
//文档编号
javaIndex.id("1");
//将对象放入请求中
javaIndex.source(JSON.toJSONString(user), XContentType.JSON);
//客户端发送请求,接收响应结果
IndexResponse indexResponse = restHighLevelClient.index(javaIndex,RequestOptions.DEFAULT);
System.out.println(indexResponse.toString());
System.out.println(indexResponse.status());
}
//测试判断文档是否存在
@Test
void existDocument() throws IOException {
GetRequest javaIndex = new GetRequest("java_index","1");
Boolean bool = restHighLevelClient.exists(javaIndex,RequestOptions.DEFAULT);
System.out.println(bool);
}
//测试获取文档信息
@Test
void getDocument() throws IOException {
GetRequest javaIndex = new GetRequest("java_index","1");
GetResponse getResponse = restHighLevelClient.get(javaIndex,RequestOptions.DEFAULT);
System.out.println(getResponse.getSourceAsString());
System.out.println(getResponse);
}
//修改获取文档信息
@Test
void updateDocument() throws IOException {
User user = new User();
user.setName("测试数据");
user.setAge(18);
UpdateRequest javaIndex = new UpdateRequest("java_index","1");
javaIndex.doc(JSON.toJSONString(user),XContentType.JSON);
UpdateResponse updateResponse = restHighLevelClient.update(javaIndex,RequestOptions.DEFAULT);
System.out.println(updateResponse.toString());
System.out.println(updateResponse.status());
}
//删除文档信息
@Test
void deleteDocument() throws IOException {
DeleteRequest javaIndex = new DeleteRequest("java_index","1");
DeleteResponse delete = restHighLevelClient.delete(javaIndex,RequestOptions.DEFAULT);
System.out.println(delete);
System.out.println(delete.status());
}
//测试批量添加文档下信息
@Test
void batchAddDocument() throws IOException {
//创建批量操作对象
BulkRequest bulk = new BulkRequest();
ArrayList list = new ArrayList<>();
list.add(new User().setName("张一山").setAge(16));
list.add(new User().setName("张二牛").setAge(11));
list.add(new User().setName("张三").setAge(22));
list.add(new User().setName("张武").setAge(19));
for(int i=0;i