最近使用ES,略有体会 这里使用RestHighLevelClient,这是趋势。
<dependency>
<groupId>org.elasticsearch.clientgroupId>
<artifactId>elasticsearch-rest-high-level-clientartifactId>
<version>7.4.2version>
dependency>
springboot还要导入:改变它自己对es的版本依赖
<properties>
<elasticsearch.version>7.4.2elasticsearch.version>
properties>
@Configuration
public class GulimallEsSearchConfig{
public static final RequestOptions COMMON_OPTIONS;
static {
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
COMMON_OPTIONS = builder.build();
}
/**
*方式一:无账号密码连接方式
**/
@Bean
public RestHighLevelClient esRestClient(){
RestHighLevelClient client = new RestHighLevelClient(
RestClient.builder(
new HttpHost("localhost", 9200, "http"),
//集群配置法
new HttpHost("localhost", 9201, "http")));
}
return client;
}
/**
*方式二 使用账号密码连接(多个ip)
**/
@Bean
public RestHighLevelClient esRestClient(){
RestClientBuilder builder = RestClient.builder(
new HttpHost("21.145.229.153",9200,"http"),
new HttpHost("21.145.229.253",9200,"http"),
new HttpHost("21.145.229.353",9200,"http"));
CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider .setCredentials(AuthScope.ANY,new UsernamePasswordCredentials("elastic","1qaz!QAZ"));
builder.setHttpClientConfigCallback(f->f.setDefaultCredentialsProvider(credentialsProvider ));
RestHighLevelClient restClient = new RestHighLevelClient (builder);
return client;
}
/**
*方式三 从配置文件获取连接配置、账号密码(多个ip)
**/
@Configuration
public class EsConfig {
public static final RequestOptions COMMON_OPTIONS;
@Value("${elasticsearch.urls}")
private String urls;
@Value("${elasticsearch.account}")
private String account;
@Value("${elasticsearch.password}")
private String password;
static {
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
COMMON_OPTIONS = builder.build();
}
@Bean
public RestHighLevelClient restHighLevelClient() {
RestClientBuilder builder = null;
String ipAddr = null;
String[] urlArr;
Integer port = null;
if (!StringUtils.isBlank(urls)) {
String[] urlsArr = urls.split(",");
for (int i = 0; i < urlsArr.length; i++) {
String url = urlsArr[i];
urlArr = url.split(":");
ipAddr = urlArr[0];
port = (urlArr[1] == null ? 0 : Integer.parseInt(urlArr[1]));
builder = RestClient.builder(new HttpHost(ipAddr, port, "http"));
}
}
CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(account, password));
builder.setHttpClientConfigCallback(f -> f.setDefaultCredentialsProvider(credentialsProvider));
RestHighLevelClient restClient = new RestHighLevelClient(builder);
return restClient;
}
}
public R complexQuery(EsQueryParams esQueryParams) {
Integer start = esQueryParams.getStart() == null ? 0 : esQueryParams.getStart() - 1;
Integer limit = esQueryParams.getLimit() == null ? 0 : esQueryParams.getLimit();
String keyWord = esQueryParams.getKeyWord();
Integer minBalance = esQueryParams.getMinBalance();
Integer maxBalance = esQueryParams.getMaxBalance();
String address = esQueryParams.getAddress();
String city = esQueryParams.getCity();
String firstname = esQueryParams.getFirstname();
String employer = esQueryParams.getEmployer();
String userId = esQueryParams.getUserId();
//1.创建请求
SearchRequest request = new SearchRequest();
//这里是7.4.2不需要指定type了,8以后就没有type了
request.indices("bank");
//2、创建请求参数
SearchSourceBuilder ssb = new SearchSourceBuilder();
//分页并排序(第一页是从0开始的,所以上面的start-1)
/**
***这里要自己计算出起始页是第多少条,from是指从多少条开始截取(包含第from这条),limit是指从from开始往后取多少条数据**差点坑在这里了,这里的分页跟mysql一样的 from和size的含义分别是:跳过前from条、取limit条数据
**/
int from = start*limt;
ssb
.from(from )
.size(limit)
.sort("balance", SortOrder.DESC) //排序
.trackTotalHits(true);//查全部数据(如果不写或者写false当总记录数超过10000时会返回总数10000,配置为true就会返回真实条数)
//指定返回字段
ssb.fetchSource(new String[]{"account_number", "balance", "firstname", "lastname", "age", "gender", "address", "employer", "email", "city", "state"},new String[]{});
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
/**
* //QueryBuilders.matchQuery()表示模糊查询----用来做keyWord的搜索
* //QueryBuilders.termQuery()表示精确查询
/**
* 精确匹配:要采用 字段.keyword 才匹配得到,直接匹配那么匹配不到。原因:
* elasticsearch 里默认的IK分词器是会将每一个中文都进行了分词的切割,所以你直接想查一整个词,或者一整句话是无返回结果的
*
*/
//精确匹配
if (!StringUtils.isBlank(address)) {
boolQueryBuilder.filter(QueryBuilders.termQuery("address.keyword", address));
}
if (!StringUtils.isBlank(city)) {
boolQueryBuilder.filter(QueryBuilders.termQuery("city.keyword", city));
}
if (!StringUtils.isBlank(employer)) {
boolQueryBuilder.filter(QueryBuilders.termQuery("employer.keyword", employer));
}
/**
*精确匹配(可能多字段,例如userId传一个或多个,用逗号隔开:1,2,3,4,5,6)
**/
if (!StringUtils.isBlank(userId)) {
String[] userIdArr = userId.split(",");
String userIdsStr = "";
if (userIdArr != null && userIdArr.length >= 2) {
for (String oneUserId : userIdArr) {
userIdsStr += !StringUtils.isBlank(userIdsStr) ? (" " + oneUserId) : oneUserId;
}
/**
*例如:address匹配 "唐人" 或者 "2号"
boolQueryBuilder.must(QueryBuilders.matchQuery("address", "唐人 2号").operator(Operator.OR));
如果用Operator.AND就是代表"唐人" 并且 "2号"
注意:这里没有.keyword
**/
boolQueryBuilder.filter(QueryBuilders.matchQuery("userId", userIdsStr).operator(Operator.OR));
} else if (userIdArr != null && userIdArr.length <= 1) {
userIdsStr = userIdArr[0];
boolQueryBuilder.filter(QueryBuilders.termsQuery("userId.keyword", userIdsStr));
}
}
//模糊匹配
if (!StringUtils.isBlank(firstname)) {
boolQueryBuilder.filter(QueryBuilders.matchQuery("firstname", firstname));
}
//多字段模糊匹配要用matchPhraseQuery,否则它会分词,例如:搜索“学校”,会被分为 "学","校",会查出包含这两个之一的数据,但是我们只想要包含"学校"整体的数据。所以必须用短语查询matchPhraseQuery
if (!StringUtils.isBlank(keyWord)) {
boolQueryBuilder.
should(QueryBuilders.matchPhraseQuery("lastname", keyWord)).
should(QueryBuilders.matchPhraseQuery("email", keyWord)).
should(QueryBuilders.matchPhraseQuery("employer", keyWord));
}
if (minBalance != null && maxBalance != null) {
//范围查找(只针对数值,不能针对字符串)
boolQueryBuilder.filter(QueryBuilders.rangeQuery("balance").gte(minBalance).lte(maxBalance));
}
ssb.query(boolQueryBuilder);
log.info("获取到的请求参数:{}", ssb);
request.source(ssb);
Map<String, Object> map = new HashMap<>();
List<Map<String, Object>> list = new ArrayList<>();
SearchResponse response = null;
RestStatus status = null;
try {
response = restHighLevelClient.search(request, ElasticsearchConfig.COMMON_OPTIONS);
status = response.status();
map.put("status", status);
long totalHits = response.getHits().getTotalHits().value;
Integer totalPage = (int) Math.ceil((double) totalHits / limit);
map.put("currPage", start);
map.put("pageSize ", limit);
map.put("totalPage", totalPage);
map.put("totalCount ", totalHits);
SearchHit[] searchHits = response.getHits().getHits();
for (SearchHit hit : searchHits) {
Map<String, Object> sourceAsMap = hit.getSourceAsMap();
String index = hit.getIndex();
list.add(sourceAsMap);
}
map.put("list", list);
} catch (IOException e) {
e.printStackTrace();
return R.error("查询失败!" + e.getMessage());
}
}
return R.ok("查询成功!").put("result", map);
}
//增, source 里对象创建方式可以是JSON字符串,或者Map,或者XContentBuilder 对象 IndexRequest
indexRequest = new IndexRequest("index","ID").source(builder);
restHighLevelClient.index(indexRequest);
//删
DeleteRequest deleteRequest = new DeleteRequest("index", "ID");
restHighLevelClient.delete(deleteRequest);
//改, source 里对象创建方式可以是JSON字符串,或者Map,或者XContentBuilder 对象 >UpdateRequest updateRequest = new UpdateRequest("index","ID").doc(builder);
restHighLevelClient.update(updateRequest);
//查
GetRequest getRequest = new GetRequest("index","ID");
restHighLevelClient.get(getRequest);
IndexRequest request = new IndexRequest("es_user");
Map<String, Object> jsonMap = new HashMap<>();
jsonMap.put("user_name", "张启桥");
jsonMap.put("post_date", new Date());
jsonMap.put("age", 23);
jsonMap.put("gender", "男");
jsonMap.put("height", 180);
jsonMap.put("address", "四川省成都市犀浦镇百草路12号");
request.source(jsonMap);
IndexResponse response = null;
try {
response = restHighLevelClient.index(request, ElasticsearchConfig.COMMON_OPTIONS);
if(response.getResult().name().equalsIgnoreCase("created")){
return R.ok("创建成功!");
}else {
return R.error("失败!");
}
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 造点假数据
*/
List<Map<String, Object>> list = new ArrayList<>();
for (int i = 0; i < 100; i++) {
Map<String, Object> map = new HashMap<>();
map.put("address", "四川省成都市犀浦镇百草路"+12+i+"号");
map.put("gender", "男");
map.put("user_name", RandomStringUtils.randomAlphanumeric(10));
map.put("post_date", new Date());
map.put("age", 23+i);
map.put("height", 155+i);
list.add(map);
}
/**
* 批量从插入数据
*/
BulkRequest request =new BulkRequest();
for(int j=0;j<list.size();j++){
Map<String,Object> item = list.get(j);
request.add(new IndexRequest("es_user").
source(item));
}
try {
BulkResponse bulk = restHighLevelClient.bulk(request, ElasticsearchConfig.COMMON_OPTIONS);
if(bulk.status().getStatus() == 200){
return R.ok("批量操作成功!");
}
} catch (IOException e) {
e.printStackTrace();
}
SearchRequest request = new SearchRequest();
request.indices("eslog");
SearchSourceBuilder builder = new SearchSourceBuilder();
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
//addIds后面是多个id
boolQueryBuilder.filter(QueryBuilders.idsQuery().addIds("oI9GVHQBH0SEUrtlhvX7", "oY9HVHQBH0SEUrtlaPUO", "3Fz9aHQBxI7zG-AK_rLc"));
builder.query(boolQueryBuilder);
request.source(builder);
List<Map<String, Object>> list = new ArrayList<>();
Map<String, Object> map = new HashMap<>();
try {
SearchResponse response = restHighLevelClient.search(request, ElasticsearchConfig.COMMON_OPTIONS);
SearchHit[] searchHits = response.getHits().getHits();
for (SearchHit hit : searchHits) {
Map<String, Object> sourceAsMap = hit.getSourceAsMap();
list.add(sourceAsMap);
}
map.put("data", list);
return R.ok("查询成功!").put("result", map);
} catch (IOException e) {
e.printStackTrace();
}
Map<String, Object> map = new HashMap<>();
map.put("id", 5);
map.put("address", "四川省成都市犀浦镇百草路" + 10082 + "号");
map.put("gender", "nv");
map.put("user_name", RandomStringUtils.randomAlphanumeric(10));
map.put("post_date", new Date());
map.put("age", 23);
map.put("height", 168);
UpdateRequest request = new UpdateRequest("es_user",map.get("id").toString()).doc(map);
try {
UpdateResponse update = restHighLevelClient.update(request, ElasticsearchConfig.COMMON_OPTIONS);
if(update.status().getStatus() == 200){
return R.ok("修改成功");
}else {
return R.error("修改失败@");
}
} catch (IOException e) {
e.printStackTrace();
}
public Map<String, Object> getDataById(String id) {
/**这里的id是es插入数据生成的id,不是数据中的id**/
GetRequest request = new GetRequest("eslog", id);
GetResponse response = null;
try {
response = restHighLevelClient.get(request, EsConfig.COMMON_OPTIONS);
Map<String, Object> sourceAsMap = response.getSourceAsMap();
/**把es生成的那个id也放进map**/
sourceAsMap.put("_id", id);
return sourceAsMap;
} catch (IOException e) {
log.error(e.getMessage(), e);
/**抛个自定义异常**/
throw new RuntimeException("^^^^^^")
}
return null;
}
public List<Map<String, Object>> getByOwnId(String id) {
SearchRequest request = new SearchRequest();
request.indices("eslog");
SearchSourceBuilder ssb = new SearchSourceBuilder();
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
boolQueryBuilder.filter(QueryBuilders.termsQuery("id.keyword", id));
ssb.query(boolQueryBuilder);
request.source(ssb);
SearchResponse response = null;
try {
response = restHighLevelClient.search(request, EsConfig.COMMON_OPTIONS);
SearchHit[] hits = response.getHits().getHits();
List<Map<String, Object>> list = new ArrayList<>();
list.clear();
for (SearchHit hit : hits) {
Map<String, Object> sourceAsMap = hit.getSourceAsMap();
list.add(sourceAsMap);
}
return list;
} catch (IOException e) {
log.error(e.getMessage(), e);
ErrorUtils.throwIDaasException(AuditErrorCode.INNER_ERROR);
}
return null;
}
根据id删除一条数据
public Long deleteById(String id) {
DeleteByQueryRequest request = new DeleteByQueryRequest();
request.indices("eslog");
request.setQuery(new TermQueryBuilder("id.keyword", id));
// 更新最大文档数
request.setSize(10);
// 批次大小
request.setBatchSize(1000);
// 并行
request.setSlices(2);
// 使用滚动参数来控制“搜索上下文”存活的时间
request.setScroll(TimeValue.timeValueMinutes(10));
// 超时
request.setTimeout(TimeValue.timeValueMinutes(2));
// 刷新索引
request.setRefresh(true);
BulkByScrollResponse response = null;
try {
response = restHighLevelClient.deleteByQuery(request, EsConfig.COMMON_OPTIONS);
/**返回0表示删除成功,-1表示失败**/
return response.getStatus().getUpdated();
} catch (IOException e) {
log.error(e.getMessage(),e);
ErrorUtils.throwIDaasException(AuditErrorCode.INNER_ERROR);
}
return -1L;
}
为了按条件进行分组统计,常常需要聚合查询:
//获取公司代码
String companyCode = countByCondRequestDTO.getCompanyCode();
//获取白名单
String isInWhitlelist = countByCondRequestDTO.getIsInWhitlelist();
//获取系统id
String unitCode = countByCondRequestDTO.getUnitCode();
//获取时间范围-开始时间
String startTime = countByCondRequestDTO.getStartTime();
//获取时间范围-结束时间
String endTime = countByCondRequestDTO.getEndTime();
/**
* 获取分组条件:分组条件不能为空
*/
String groupField = countByCondRequestDTO.getGroupField();
if (StringUtils.isBlank(groupField)) {
//自定义异常(这里不再写异常了)
ErrorUtils.throwIDaasException(AuditErrorCode.GROUP_FIELD_ISNOTALLOWNULL);
}
SearchRequest request = new SearchRequest();
request.indices("eslog");
SearchSourceBuilder ssb = new SearchSourceBuilder();
/**
*按条件进行查询(即分组数据限制在条件范围内)
**/
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
if (!StringUtils.isBlank(companyCode)) {
boolQueryBuilder.filter(QueryBuilders.termsQuery("companyCode.keyword", companyCode));
}
if (!StringUtils.isBlank(isInWhitlelist)) {
boolQueryBuilder.filter(QueryBuilders.termsQuery("isInWhitlelist.keyword", isInWhitlelist));
}
if (!StringUtils.isBlank(unitCode)) {
boolQueryBuilder.filter(QueryBuilders.termsQuery("unitCode.keyword", unitCode));
}
Long startTimeMills = null;
Long endTimeMills = null;
if (!StringUtils.isBlank(startTime) && startTime.length() == TIME_LENGTH) {
startTimeMills = getTimeMills(startTime + " 00:00:00");
}
if (!StringUtils.isBlank(endTime) && endTime.length() == TIME_LENGTH) {
endTimeMills = getTimeMills(endTime + " 23:59:59");
}
if (startTimeMills != null) {
boolQueryBuilder.filter(QueryBuilders.rangeQuery(CommonEnum.UniversalField.LOGINTIMEMILLS.getValue()).gte(startTimeMills));
}
if (endTimeMills != null) {
boolQueryBuilder.filter(QueryBuilders.rangeQuery(CommonEnum.UniversalField.LOGINTIMEMILLS.getValue()).lte(endTimeMills));
}
/**
*按条件聚合:
*terms里是集合的名字(一般是分组字段名+Agg)
*field是分组字段,加上.keyword 不再分词
*order(BucketOrder.count(false))是倒序排列,如果写true那就是分组后按数据量从小到大排列
*size是显示多少条数据(可前端传入,实现动态可控)
**/
ssb.aggregation(AggregationBuilders
.terms(groupField + "Agg")
.field(groupField + ".keyword")
.order(BucketOrder.count(false))
.size(10));
ssb.query(boolQueryBuilder);
request.source(ssb);
SearchResponse response = null;
Map<String, Long> resultMap = new HashMap<>();
resultMap.clear();
try {
response = restHighLevelClient.search(request, EsConfig.COMMON_OPTIONS);
Aggregations aggregations = response.getAggregations();
//这里从刚才定义的聚合名字中查找
ParsedStringTerms parsedStringTerms = aggregations.get(groupField + "Agg");
List<? extends Terms.Bucket> buckets = parsedStringTerms.getBuckets();
for (Terms.Bucket bucket : buckets) {
String key = bucket.getKey().toString();
long docCount = bucket.getDocCount();
resultMap.put(key, docCount);
}
//这个方法请参看另一篇博客:Map集合按value值进行排序。这里不再重复
CountByCondResponseDTO cbc = getArrAfterSort(resultMap);
return cbc;