在之前的一篇文章中配置了IK分词器,在这里说一下怎么使elasticsearch可以同时使用两个分词器:主要是参考这位老哥的http://blog.csdn.net/napoay/article/details/53907921的做法。
这里还是主要使用sense,主要是kibana还没有搞清楚具体怎么使用
PUT /medcl/
{
"index": {
"analysis": {
"analyzer": {
"ik_pinyin_analyzer": {
"type": "custom",
"tokenizer": "ik_smart",
"filter": ["my_pinyin", "word_delimiter"]
}
},
"filter": {
"my_pinyin": {
"type": "pinyin",
"first_letter": "prefix",
"padding_char": " "
}
}
}
}
}
输入这些东西,稍微解释一下子:创建一个为medcl的index,然后自定义一个分词器analyzer叫做ik_pinyin_analyzer,使用的是ik_smart进行分词,然后添加两个过滤器,一个是自定义的my_pinyin,另一个是elasticsearch原本就有的word_delimiter,my_pinyin使用的就是拼音分词器过滤的。elasticsearch自带的过滤器有很多,http://blog.csdn.net/i6448038/article/details/50625397这篇博客中写的很详细。
然后建议一个type为folks的mapping,此时使用的就是刚刚自己定义的ik_pinyin_analyzer作为分词器。
POST /medcl/folks/_mapping
{
"folks": {
"properties": {
"name": {
"type": "keyword",
"fields": {
"pinyin": {
"type": "text",
"store": "no",
"term_vector": "with_positions_offsets",
"analyzer": "ik_pinyin_analyzer"
}
}
}
}
}
}
往服务器中添加两条索引信息
POST /medcl/folks/andy
{"name":["刘德华","刘邦"]}
POST /medcl/folks/tina
{"name":"中华人民共和国国歌"}
测试一下是否可以被查询到
POST /medcl/folks/_search
{
"query": {
"match": {
"name.pinyin": "国歌"#改成zhonghua测试拼音的是否通过
}
},
"highlight": {
"fields": {
"*": {}
}
}
}
查询到的结果长这样子:
{
"took": 7,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 2,
"max_score": 2.6638038,
"hits": [
{
"_index": "medcl",
"_type": "folks",
"_id": "tina",
"_score": 2.6638038,
"_source": {
"name": "中华人民共和国国歌"
},
"highlight": {
"name.pinyin": [
"中华人民共和国国歌"
]
}
},
{
"_index": "medcl",
"_type": "folks",
"_id": "andy",
"_score": 0.22009256,
"_source": {
"name": [
"刘德华",
"刘邦"
]
},
"highlight": {
"name.pinyin": [
"刘德华"
]
}
}
]
}
}
上面的内容不是本文的重点,接下来才是本文的重点:配置同义词。
在config文件下新建一个目录analysis用来存放同义词文件,新建文件synonyms.txt,里面存放同义词,这里存放同义词的方式有两种:(注意是英文状态的逗号)
还有一点要注意的,就是文件保存的格式,尤其是在本地编辑之后上传到服务器的,要把文件保存成utf-8格式的,要不然会报错的。
第一种:
中文,汉语
第二种
中文,汉语=>中文
第一种在分词的时候,有“中文”的地方,都会解析成“中文,汉语”,把“中文,汉语”存入索引中。
第二种在分词的时候,“中文,汉语”都会解析成为“中文”,然后把“中文”存入索引中。
两种实现的功能是一样的,我才用的是第一种,这种维护起来比较好维护。定义好同义词文件之后重启es。
和刚刚创建的ik_pinyin分词器使想相同的,创建一个名为xjs的index,里面定义了两种分词器,一种是by_smart,另一种是ik_max_word,两个都采用了自己定义的同义词过滤器。
PUT /xjs
{
"index": {
"analysis": {
"analyzer": {
"by_smart": {
"type": "custom",
"tokenizer": "ik_smart",
"filter": ["by_sfr"]
},
"by_max_word": {
"type": "custom",
"tokenizer": "ik_max_word",
"filter": ["by_sfr"]
}
},
"filter": {
"by_sfr": {
"type": "synonym",
"synonyms_path": "analysis/synonyms.txt"#同义词文件的位置
}
}
}
}
}
然后创建mapping,指定分词的时候采用最大粒度的by_max_word,搜索的时候使用的是by_smart。
PUT /xjs/typename/_mapping
{
"properties": {
"title": {
"type": "text",
"index": "analyzed",
"analyzer": "by_max_word",
"search_analyzer": "by_smart"
}
}
}
这个时候测试一下同义词是否生效
POST /xjs/_analyze?pretty=true&analyzer=by_smart
{"text":"中文"}
会得到下面的内容,很清楚看到中文两个字被分词成了中文和汉语两个词。
{
"tokens": [
{
"token": "中文",
"start_offset": 0,
"end_offset": 2,
"type": "CN_WORD",
"position": 0
},
{
"token": "汉语",
"start_offset": 0,
"end_offset": 2,
"type": "SYNONYM",
"position": 0
}
]
}
添加两条测试索引:
POST /xjs/title/1
{"title":"汉语的重要性"}
POST /xjs/title/2
{"title":"中文其实很好学的"}
然后搜索试一下:
POST /xjs/title/_search
{
"query" : { "match" : { "title" : "中文" }},
"highlight" : {
"pre_tags" : ["" ],
"post_tags" : [""],
"fields" : {
"note" : {}
}
}
}
会看到两条信息都被索引出来了
{
"took": 2,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 2,
"max_score": 0.60057575,
"hits": [
{
"_index": "xjs",
"_type": "title",
"_id": "1",
"_score": 0.60057575,
"_source": {
"title": "汉语的重要性"
}
},
{
"_index": "xjs",
"_type": "title",
"_id": "2",
"_score": 0.5930795,
"_source": {
"title": "中文其实很好学的"
}
}
]
}
}
在logstash默认采用的分词器使standard,就是把中文分成一个字一个字,这个可以在测试高亮的时候看出来,会发现每个字都被高亮了,而不是每个词语。
在elasticsearch5.x之前,只要在elasticsearch中配置分词器为ik之后就可以让logstash使用ik分词器,但是5.x之后这种方式被取消了,这就显得很麻烦了。
在logstash-5.5.0目录新建一个template文件夹,然后新建一个logstash.json文件,注意配置这个文件的时候一定要小新,因为运行的时候会不会提示该文件有错误的。里面配置信息和上面的两个基本上差不多,这里就不做解释了。
{
"template": "*",
"version": 50001,
"settings": {
"index.refresh_interval": "5s",
"index": {
"analysis": {
"analyzer": {
"by_smart": { "type": "custom", "tokenizer": "ik_smart", "filter": [ "by_sfr" ] },
"by_max_word": { "type": "custom", "tokenizer": "ik_max_word", "filter": [ "by_sfr" ] } },
"filter": {
"by_sfr": { "type": "synonym", "synonyms_path": "/usr/local/elasticsearch-5.5.0/config/analysis/synonyms.txt" } }
}
}
},
"mappings": {
"_default_": {
"_all": {
"enabled": true,
"norms": false,
"analyzer": "by_max_word",
"search_analyzer": "by_smart"
},
"dynamic_templates": [
{
"message_field": {
"path_match": "message",
"match_mapping_type": "string",
"mapping": { "type": "text", "norms": false, "analyzer": "by_max_word", "search_analyzer": "by_smart" } }
},
{
"string_fields": {
"match": "*",
"match_mapping_type": "string",
"mapping": { "type": "text", "norms": false, "analyzer": "by_max_word", "search_analyzer": "by_smart", "fields": { "keyword": { "type": "keyword" } } } }
}
],
"properties": {
"@timestamp": {
"type": "date",
"include_in_all": false },
"@version": {
"type": "keyword",
"include_in_all": false }
}
}
}
}
然后进入到配置logstash和mysql数据库同步的conf文件中,添加两行内容
input {
stdin {
}
jdbc {
# mysql jdbc connection string to our backup databse
jdbc_connection_string => "jdbc:mysql://localhost:3306/test01"
# the user we wish to excute our statement as
jdbc_user => "root"
jdbc_password => "123456"
# the path to our downloaded jdbc driver
jdbc_driver_library => "/usr/local/logstash-5.5.0/mysql-connector-java-6.0.6.jar"
# the name of the driver class for mysql
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_paging_enabled => "true"
jdbc_page_size => "50000"
#statement_filepath => "config-mysql/test02.sql"
statement => "select * from test02"
schedule => "* * * * *"
type => "test02"
}
}
filter {
json {
source => "message"
remove_field => ["message"]
}
}
output {
elasticsearch {
hosts => "127.0.0.1:9200"
index => "test01"
document_id => "%{id}"
#添加下面两行内容
template_overwrite => true
#文件的位置
template => "/usr/local/logstash-5.5.0/template/logstash.json"
}
stdout {
codec => json_lines
}
}
然后重启一下logstash,就可以发现配置的信息已经加载进来了,
可以看一下默认的配置文件
GET /_template
然后会得到大概长这样的文件,和刚刚那个配置的信息基本上是相同的,如果不同的话,便是刚刚json文件配置有错误。
{
"logstash": {
"order": 0,
"version": 50001,
"template": "*",
"settings": {
"index": {
"analysis": {
"filter": { "by_sfr": { "type": "synonym", "synonyms_path": "/usr/local/elasticsearch-5.5.0/config/analysis/synonyms.txt" } },
"analyzer": { "by_smart": { "filter": [ "by_sfr" ], "type": "custom", "tokenizer": "ik_smart" }, "by_max_word": { "filter": [ "by_sfr" ], "type": "custom", "tokenizer": "ik_max_word" } } },
"refresh_interval": "5s"
}
},
"mappings": {
"_default_": {
"dynamic_templates": [
{
"message_field": { "path_match": "message", "mapping": { "search_analyzer": "by_smart", "norms": false, "analyzer": "by_max_word", "type": "text" }, "match_mapping_type": "string" } },
{
"string_fields": { "mapping": { "search_analyzer": "by_smart", "norms": false, "analyzer": "by_max_word", "type": "text", "fields": { "keyword": { "type": "keyword" } } }, "match_mapping_type": "string", "match": "*" } }
],
"_all": {
"search_analyzer": "by_smart",
"norms": false,
"analyzer": "by_max_word",
"enabled": true },
"properties": {
"@timestamp": { "include_in_all": false, "type": "date" },
"@version": { "include_in_all": false, "type": "keyword" } }
}
},
"aliases": {}
}
}
接下来就可以添加一下信息来测试了,这个根据自己数据库中的信息去测试,例如刚刚配置了中文,汉语的同义词,可以在数据库中添加和这两个词相关的记录,然后搜索一下中文或者汉语会发现新添加的两条就都可以被索引出来。