安装需要的版本
https://github.com/medcl/elasticsearch-analysis-ik/releases
#创建ik目录
mkdir ik
#移动所有文件到ik目录
mv * ik
IK分词器有两种分词模式:ik_max_word和ik_smart模式
GET _analyze
{
"text":"我是中国人"
}
//结果
{
"tokens" : [
{
"token" : "我",
"start_offset" : 0,
"end_offset" : 1,
"type" : "" ,
"position" : 0
},
{
"token" : "是",
"start_offset" : 1,
"end_offset" : 2,
"type" : "" ,
"position" : 1
},
{
"token" : "中",
"start_offset" : 2,
"end_offset" : 3,
"type" : "" ,
"position" : 2
},
{
"token" : "国",
"start_offset" : 3,
"end_offset" : 4,
"type" : "" ,
"position" : 3
},
{
"token" : "人",
"start_offset" : 4,
"end_offset" : 5,
"type" : "" ,
"position" : 4
}
]
}
会做最粗粒度的拆分,比如会将“中华人民共和国人民大会堂”拆分为中华人民共和国、人民大会堂。
GET _analyze
{
"analyzer": "ik_smart",
"text":"我是中国人"
}
//结果
{
"tokens" : [
{
"token" : "我",
"start_offset" : 0,
"end_offset" : 1,
"type" : "CN_CHAR",
"position" : 0
},
{
"token" : "是",
"start_offset" : 1,
"end_offset" : 2,
"type" : "CN_CHAR",
"position" : 1
},
{
"token" : "中国人",
"start_offset" : 2,
"end_offset" : 5,
"type" : "CN_WORD",
"position" : 2
}
]
}
会将文本做最细粒度的拆分,比如会将“中华人民共和国人民大会堂”拆分为“中华人民共和国、中华人民、中华、华人、人民共和国、人民、共和国、大会堂、大会、会堂等词语。
GET _analyze
{
"analyzer": "ik_max_word",
"text":"我是中国人"
}
//结果
{
"tokens" : [
{
"token" : "我",
"start_offset" : 0,
"end_offset" : 1,
"type" : "CN_CHAR",
"position" : 0
},
{
"token" : "是",
"start_offset" : 1,
"end_offset" : 2,
"type" : "CN_CHAR",
"position" : 1
},
{
"token" : "中国人",
"start_offset" : 2,
"end_offset" : 5,
"type" : "CN_WORD",
"position" : 2
},
{
"token" : "中国",
"start_offset" : 2,
"end_offset" : 4,
"type" : "CN_WORD",
"position" : 3
},
{
"token" : "国人",
"start_offset" : 3,
"end_offset" : 5,
"type" : "CN_WORD",
"position" : 4
}
]
}
进入plugins–>ik–>config目录,新建一个my.dic的文件,并写入内容。
新增测试分词
进入plugins–>ik–>config目录,修改IKAnalyzer.cfg.xml文件的内容
修改文件地址
my.dic
DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<comment>IK Analyzer 扩展配置comment>
<entry key="ext_dict">my.dicentry>
<entry key="ext_stopwords">entry>
properties>
GET _analyze
{
"analyzer": "ik_max_word",
"text":"新增测试分词"
}
//结果
{
"tokens" : [
{
"token" : "新增测试分词",
"start_offset" : 0,
"end_offset" : 6,
"type" : "CN_WORD",
"position" : 0
},
{
"token" : "新增",
"start_offset" : 0,
"end_offset" : 2,
"type" : "CN_WORD",
"position" : 1
},
{
"token" : "测试",
"start_offset" : 2,
"end_offset" : 4,
"type" : "CN_WORD",
"position" : 2
},
{
"token" : "分词",
"start_offset" : 4,
"end_offset" : 6,
"type" : "CN_WORD",
"position" : 3
}
]
}