该例子基于Lucene 4.8.0
2.5, 获取搜索结果内容(Document,get方法)
IndexWriter
Directory
Analyzer
Document
Field
IndexSearcher
Query
TopDocs
具体代码如下:
import java.io.IOException; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.queryparser.classic.ParseException; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.Version; //创建索引又分为以下几步: //1.1, 创建索引的存储位置(Directory类) //1.2, 创建索引写者(IndexWriter) //1.3, 创建索引文档(Document)并添加相关域(Field) //1.4, 索引写者添加索引文档 //1.5, 关闭相关资源 // //查询分为以下几步: //2.1, 打开索引存储位置的文件,创建索引查询(IndexSearcher) //2.2, 创建查询(Query) //2.3, 查询(search方法) //2.4, 获取搜索结果的指针(TopDocs),此时的指针并没有载入读取搜索结果的实质内容,仅仅是指针 //2.5, 获取搜索结果内容(Document,get方法) public class HelloLucene { public static void main(String[] args) throws IOException, ParseException { // 0. Specify the analyzer for tokenizing text. // The same analyzer should be used for indexing and searching // 这个类是用来把文本文件里的内容分割成一个个字符串 // 如果用SimpleAnalyzer类,那么会把整个文件内容当成是一个字符串 StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_48); // 1. create the index Directory index = new RAMDirectory(); // 1.1 IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_48, analyzer); IndexWriter w = new IndexWriter(index, config); // 1.2 addDoc(w, "Lucene in Action", "193398817"); addDoc(w, "Lucene for Dummies", "55320055Z"); addDoc(w, "Managing Gigabytes", "55063554A"); addDoc(w, "The Art of Computer Science", "9900333X"); w.close(); // 1.5 // 2. query String querystr = args.length > 0 ? args[0] : "lucene"; // the "title" arg specifies the default field to use // when no field is explicitly specified in the query. Query q = new QueryParser(Version.LUCENE_48, "title", analyzer).parse(querystr); // 2.2 // 3. search int hitsPerPage = 10; IndexReader reader = DirectoryReader.open(index); // 2.1 IndexSearcher searcher = new IndexSearcher(reader); // 2.1 TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true); searcher.search(q, collector); // 2.3 ScoreDoc[] hits = collector.topDocs().scoreDocs; // 2.4 // 4. display results System.out.println("Found " + hits.length + " hits."); for (int i = 0; i < hits.length; ++i) { int docId = hits[i].doc; Document d = searcher.doc(docId); // 2.5 System.out.println((i + 1) + ". " + d.get("isbn") + "\t" + d.get("title")); } // reader can only be closed when there // is no need to access the documents any more. reader.close(); } private static void addDoc(IndexWriter w, String title, String isbn) throws IOException { Document doc = new Document(); // 1.3 doc.add(new TextField("title", title, Field.Store.YES)); // 1.3 // use a string field for isbn because we don't want it tokenized doc.add(new StringField("isbn", isbn, Field.Store.YES)); // 1.3 w.addDocument(doc); // 1.4 } }