添加索引时并不是每个document都马上添加到同一个索引文件,它们首先被写入到不同的小文件,然后再合并成一个大索引文件,这里每个小文件都是一个segment。
-
IndexWriter writer =
new IndexWriter(“/data/index/”,
new StandardAnalyzer(),
true);
-
Document doc =
new Document();
-
doc.
add(
new Field(
"title",
"lucene introduction", Field.Store.YES, Field.Index.TOKENIZED));
-
doc.
add(
new Field(
"content",
"lucene works well", Field.Store.YES, Field.Index.TOKENIZED));
-
writer.addDocument(doc);
-
writer.optimize();
-
writer.close();
-
Directory dir =
new RAMDirectory();
-
IndexWriter writer =
new IndexWriter(dir,
new StandardAnalyzer(),
true);
-
Document doc =
new Document();
-
doc.
add(
new Field(
"title",
"lucene introduction", Field.Store.YES, Field.Index.TOKENIZED));
-
doc.
add(
new Field(
"content",
"lucene works well", Field.Store.YES, Field.Index.TOKENIZED));
-
writer.addDocument(doc);
-
writer.optimize();
-
writer.close();
-
Directory dir = FSDirectory.getDirectory(PATH,
false);
-
IndexReader reader = IndexReader.open(dir);
-
Term term =
new Term(field, key);
-
reader.deleteDocuments(term);
-
reader.close();
-
Directory dir = FSDirectory.getDirectory(PATH,
false);
-
IndexReader reader = IndexReader.open(dir);
-
Term term =
new Term(“title”, “lucene introduction”);
-
reader.deleteDocuments(term);
-
reader.close();
-
-
IndexWriter writer =
new IndexWriter(dir,
new StandardAnalyzer(),
true);
-
Document doc =
new Document();
-
doc.
add(
new Field(
"title",
"lucene introduction", Field.Store.YES, Field.Index.TOKENIZED));
-
doc.
add(
new Field(
"content",
"lucene is funny", Field.Store.YES, Field.Index.TOKENIZED));
-
writer.addDocument(doc);
-
writer.optimize();
-
writer.close();
-
Term t =
new Term(
"content",
" lucene";
-
Query query =
new TermQuery(t);
-
TermQuery termQuery1 =
new TermQuery(
new Term(
"content",
"java");
-
TermQuery termQuery
2 =
new TermQuery(
new Term(
"content",
"perl");
-
BooleanQuery booleanQuery =
new BooleanQuery();
-
booleanQuery.
add(termQuery
1, BooleanClause.Occur.SHOULD);
-
booleanQuery.
add(termQuery
2, BooleanClause.Occur.SHOULD);
Query query = new WildcardQuery(new Term("content", "use*");
-
PhraseQuery query =
new PhraseQuery();
-
query.setSlop(
5);
-
query.
add(
new Term(
"content ", “中”));
-
query.
add(
new Term(“content”, “日”));
PrefixQuery query = new PrefixQuery(new Term("content ", "中");
Query query = new FuzzyQuery(new Term("content", "wuzza");
RangeQuery query = new RangeQuery(new Term(“time”, “20060101”), new Term(“time”, “20060130”), true);
-
Directory dir = FSDirectory.getDirectory(PATH,
false);
-
IndexSearcher
is =
new IndexSearcher(dir);
-
QueryParser parser =
new QueryParser(
"content",
new StandardAnalyzer());
-
Query query = parser.parse(
"+(title:lucene content:lucene) +time:[20060101 TO 20060130]";
-
Hits hits =
is.search(query);
-
for (
int i =
0; i < hits.length(); i++)
-
{
-
Document doc = hits.doc(i);
-
System.
out.println(doc.
get(
"title");
-
}
-
is.close();
-
Directory dir = FSDirectory.getDirectory(PATH,
false);
-
IndexSearcher
is =
new IndexSearcher(dir);
-
QueryParser parser =
new QueryParser(
"content",
new StandardAnalyzer());
-
Query query = parser.parse(
"title:lucene content:lucene";
-
RangeFilter filter =
new RangeFilter(
"time",
"20060101",
"20060230",
true,
true);
-
Hits hits =
is.search(query, filter);
-
for (
int i i < hits.length(); i++)
-
{
-
Document doc = hits.doc(i);
-
System.
out.println(doc.
get(
"title");
-
}
-
is.close();
-
// index
-
document.Add(FieldDate, DateField.DateToString(date), Field.Store.YES, Field.Index.UN_TOKENIZED);
-
-
//...
-
-
// search
-
Filter filter =
new DateFilter(FieldDate, DateTime.Parse(
"2005-10-1"), DateTime.Parse(
"2005-10-30"));
-
Hits hits = searcher.Search(query, filter);
-
// index
-
document.Add(
new Field(FieldNumber, NumberTools.LongToString((
long)price), Field.Store.YES, Field.Index.UN_TOKENIZED));
-
-
//...
-
-
// search
-
Filter filter =
new RangeFilter(FieldNumber, NumberTools.LongToString(
100L), NumberTools.LongToString(
200L),
true,
true);
-
Hits hits = searcher.Search(query, filter);
QueryFilter filter = new QueryFilter(QueryParser.Parse("name2", FieldValue, analyzer));
-
Filter filter =
new DateFilter(FieldDate, DateTime.Parse(
"2005-10-10"), DateTime.Parse(
"2005-10-15"));
-
Filter filter2 =
new RangeFilter(FieldNumber, NumberTools.LongToString(
11L), NumberTools.LongToString(
13L),
true,
true);
-
-
-
Query query = QueryParser.Parse(
"name*", FieldName, analyzer);
-
query =
new FilteredQuery(query, filter);
-
query =
new FilteredQuery(query, filter2);
-
-
-
IndexSearcher searcher =
new IndexSearcher(reader);
-
Hits hits = searcher.Search(query);
-
Directory dir = FSDirectory.getDirectory(PATH,
false);
-
IndexSearcher
is =
new IndexSearcher(dir);
-
QueryParser parser =
new QueryParser(
"content",
new StandardAnalyzer());
-
Query query = parser.parse(
"title:lucene content:lucene";
-
RangeFilter filter =
new RangeFilter(
"time",
"20060101",
"20060230",
true,
true);
-
Sort sort =
new Sort(“time”);
-
Hits hits =
is.search(query, filter, sort);
-
for (
int i =
0; i < hits.length(); i++)
-
{
-
Document doc = hits.doc(i);
-
System.
out.println(doc.
get(
"title");
-
}
-
is.close();
-
MultiReader reader =
new MultiReader(
new IndexReader[] { IndexReader.Open(
@"c:\index"), IndexReader.Open(
@"\\server\index") });
-
IndexSearcher searcher =
new IndexSearcher(reader);
-
Hits hits = searcher.Search(query);
-
-
-
或
-
IndexSearcher searcher1 =
new IndexSearcher(reader1);
-
IndexSearcher searcher2 =
new IndexSearcher(reader2);
-
MultiSearcher searcher =
new MultiSearcher(
new Searchable[] { searcher1, searcher2 });
-
Hits hits = searcher.Search(query);
-
BooleanQuery query =
new BooleanQuery();
-
query.Add(query1,
true,
false);
-
query.Add(query2,
true,
false);
-
//...
-
-
Console.WriteLine(
"Syntax: {0}", query.ToString());
-
FSDirectory fsDir = FSDirectory.getDirectory(
"/data/index",
true);
-
RAMDirectory ramDir =
new RAMDirectory();
-
IndexWriter fsWriter =
new IndexWriter(fsDir,
new StandardAnalyzer(),
true);
-
IndexWriter ramWriter =
new IndexWriter(ramDir,
new StandardAnalyzer(),
true);
-
while (there are documents to index)
-
{
-
... create Document ...
-
ramWriter.addDocument(doc);
-
if (condition
for flushing memory to disk has been met)
-
{
-
fsWriter.addIndexes(
new Directory[] { ramDir });
-
ramWriter.close();
-
ramWriter =
new IndexWriter(ramDir,
new StandardAnalyzer(),
true);
-
}
-
}
-
Directory fsDir = FSDirectory.getDirectory(“/data/index/”,
false);
-
Directory ramDir =
new RAMDirectory(fsDir);
-
Searcher searcher =
new IndexSearcher(ramDir);
-
public static List<string> GetWordsByPanGuAnalyzer(string str)
-
{
-
string ret =
"";
-
Analyzer analyzer =
new PanGuAnalyzer();
-
-
StringReader reader =
new StringReader(str);
-
TokenStream ts = analyzer.TokenStream(str, reader);
-
bool hasNext = ts.IncrementToken();
-
ITermAttribute ita;
-
while (hasNext)
-
{
-
ita = ts.GetAttribute
();
-
ret += ita.Term +
"|";
-
hasNext = ts.IncrementToken();
-
}
-
ts.CloneAttributes();
-
reader.Close();
-
analyzer.Close();
-
return ret.Split(
'|').ToList<
string>();
-
}
下载地址: http://download.csdn.net/detail/huwei2003/9697994
--- end ---