仅仅考虑两篇文章的词组,并未考虑文本的语义信息。
实现原理:
1. 对两篇文档进行词频统计;
2. 利用“TF-IDF和余弦相似度”原理,计算两篇文档的相似度。
实现过程:
1.利用lucene对大量文章建立索引,创建语料库,来提高TF-IDF的准确度。
2. 通过余弦公式计算出两篇文章的相似度。
package twodocsimiliary;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
import java.text.DecimalFormat;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.FSDirectory;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;
/**
* 仅仅考虑词组,并未考虑文本的语义信息
* @author wangss
* @date Aug 26, 2014
*/
public class comparisontwodoc {
public static Map words = new HashMap();
public static void main(String []args){
// AnalyzerWord analyzer = new AnalyzerWord();
String path_a = "H:\\a.txt";
String path_b = "H:\\c.txt";
String str = readFiles(path_a);
String str2 = readFiles(path_b);
Map tf_a = iniCosine(str);
Map tf_b = iniCosine(str2);
long molecular=0;//分子
long denominator_a=0;//分母
long denominator_b=0;
System.out.println("两篇文档相似的词有:");
DecimalFormat df = new DecimalFormat("0.00");
for(long tfa :tf_a.keySet()){
denominator_a += tf_a.get(tfa)*tf_a.get(tfa);
molecular += tf_a.get(tfa)*(null==tf_b.get(tfa)?0:tf_b.get(tfa));
if(tf_a.get(tfa)!=null && tf_b.get(tfa)!=null){
System.out.println(words.get(tfa)+" TF-IDF词频统计 文档一:" +df.format(tf_a.get(tfa))+";文档二:"+df.format(tf_b.get(tfa)));
}
}
for(long tfb : tf_b.keySet()){
denominator_b += tf_b.get(tfb)*tf_b.get(tfb);
}
double result = 0;
if(denominator_a!=0 && denominator_b!=0){
result = (molecular/(Math.sqrt(denominator_a)*Math.sqrt(denominator_b)));
}
System.out.println("两篇文档相似度:"+df.format(result*100) +"%");
}
private static String readFiles(String path_a) {
try {
InputStreamReader file_a = new InputStreamReader(new FileInputStream(new File(path_a)), "GBK");
BufferedReader bufferedReader = new BufferedReader(file_a);
StringBuffer str_a = new StringBuffer();
String lineTxt = null;
while((lineTxt = bufferedReader.readLine()) != null){
// System.out.println(lineTxt);
str_a.append(lineTxt);
}
file_a.close();
return str_a.toString();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
return null;
}
}
private static Map iniCosine(String str) {
Map tf = new HashMap();
Map idf = new HashMap();
Reader input = new StringReader(str);
// 智能分词关闭(对分词的精度影响很大)
IKSegmenter iks = new IKSegmenter(input, true);
Lexeme lexeme = null;
// StringBuilder sb = new StringBuilder();
try {
//读取索引
IndexReader indexReader = DirectoryReader.open(FSDirectory.open(new File("H:\\testIndex")));
int allDocs = indexReader.numDocs();//文档总数
// System.out.println("文档总数:"+allDocs);
/*QueryParser queryParser = new MultiFieldQueryParser(
Version.LUCENE_45,new String[]{"content"} , new IKAnalyzer());*/
while ((lexeme = iks.next()) != null) {
String lexemeText = lexeme.getLexemeText();
long hash = ELFHash(lexemeText);
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
TopDocs topDocs = indexSearcher.search(new TermQuery(new Term("content",lexemeText)), indexReader.maxDoc());
int totalHits = topDocs.scoreDocs.length;
double log = Math.log(allDocs/(totalHits+1));
if(log<0) log = 0;//文档反转频度|
idf.put(hash, log);
tf.put(hash, null==tf.get(hash)?1:tf.get(hash).longValue()+1);
words.put(hash, lexemeText);
}
//计算TF-IDF的值
for(long m : idf.keySet()){
idf.put(m, tf.get(m)*idf.get(m));
}
} catch (IOException e) {
e.printStackTrace();
}
return idf;
}
public static long ELFHash(String str){
long hash = 0;
long x = 0;
for(int i = 0; i> 24);
hash &= ~x;
}
}
return (hash & 0x7FFFFFFF);
}
}