BooleanQuery booleanQuery=new BooleanQuery();
TermQuery query = null;
for(String s:list){
Term term1=new Term(字段1, s);
query1=new TermQuery(term1);
float weight1 = (float)2.0;
query1.setBoost(weight1);
Term term2=new Term(字段2, s);
query2=new TermQuery(term2);
float weight2 = (float)2.0;
query2.setBoost(weight2);
BooleanQuery bq=new BooleanQuery();
bq.add(query1, BooleanClause.Occur.MUST);
bq.add(query2, BooleanClause.Occur.MUST);
booleanQuery.add(bq, BooleanClause.Occur.SHOULD);
}
TopDocs topDocs = isearcher.search(booleanQuery ,20);
for(String s:list){
Term term=new Term(字段, s);
query=new TermQuery(term);
float weight = (float)2.0;
query.setBoost(weight);
booleanQuery.add(query , BooleanClause.Occur.SHOULD);
}
List
Analyzer analyzer = new IKAnalyzer();
//获取Lucene的TokenStream对象
TokenStream ts = null;
try {
ts = analyzer.tokenStream(要分词的句子, new StringReader(要分词的句子));
//获取词元文本属性
CharTermAttribute term = ts.addAttribute(CharTermAttribute.class);
//重置TokenStream(重置StringReader)
ts.reset();
//迭代获取分词结果
while (ts.incrementToken()) {
list.add(term.toString());
}
//关闭TokenStream(关闭StringReader)
ts.end();
} catch (IOException e) {
e.printStackTrace();
} finally {
//释放TokenStream的所有资源
if(ts != null){
try {
ts.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}