用lucene实现摘要的高亮点

注明:该类主要是符合本人项目的需求,内容摘要的高亮点实现没这么复杂,此类中不仅仅包含了内容的高亮点实现,还包含了获取xml内容和html纯文本的提取。注意一点的是在对内容进行高亮度化的过程中,其实也就是进行全文检索的过程,所以对文本内容进行分词是必不可少的。否则将无法找到文本中对应的关键词。详细请看类的实现

/*
 * @(#)SummaryHighlighter.java
 * Copyright(c)
 */
package com.chengyi.util;

import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;

import javax.swing.ListCellRenderer;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cw.Segmenter;
import org.apache.lucene.analysis.cw.SegmenterUtils;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.htmlparser.util.ParserException;
import org.w3c.dom.CDATASection;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.w3c.dom.Text;

/**
 * 用lucene实现搜索结果的高亮度
 * 
 * @version 1.0 2009.02.03
 * @author zhx
 * 
 */
public class SummaryHighlighter {

	/** 高亮度内容属性,只是有标识作用,没有具体意义*/
	private static String CONTENT = "content";

    /**
	 * 实现内容摘要的高亮点
	 * 此方法传入的xmlContent内容是一个xml文本(格式见测试文件2.txt),我们需根据该xml的结构进行  
	 * 分析,并提取该xml中的文本,特殊的是从xml中提取出来的文本又是html格式,所以又得做第二次提取,
	 * 才能获取到纯文本 
     *  	 
	 * @param content
	 *            需高亮度内容
	 * @param keyWord
	 *            高亮度关键字
	 * @return 高亮度摘要
	 */
	public static String getHighlighterSummary(String xmlContent, String keyWord) {
		// TODO Auto-generated method stub
		String hightContent = "";
		String segmenterContent = "";
		String htmlContent = "";
		String content = "";
		// 获取html内容
		htmlContent = SummaryHighlighter.getHtmlContentFromXML(xmlContent);
		// 从html中提取纯文本
		content = SummaryHighlighter.getTextFromHtml(htmlContent);
		//System.out.println("提取到的纯文本:" + content);
		// 对内容进行分词
		segmenterContent = SummaryHighlighter.segmentString(content);
		// Analyzer analyzer = new CWordAnalyzer();
		Analyzer analyzer = new SimpleAnalyzer();
		QueryParser queryParser = new QueryParser(CONTENT, analyzer);
		// 设置相似度
		queryParser.setFuzzyMinSim(0.9f);
		try {
			//分析关键词
			Query query = queryParser.parse(segmentString(keyWord));
			// Term term=new Term(CONTENT,keyWord);
			// Query query = new TermQuery(term);
			QueryScorer scorer = new QueryScorer(query);
			Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter(
					"<font color=\"#cc0033\">", "</font>"), scorer);
			Reader reader = new StringReader(segmenterContent);
			TokenStream tokenStream = analyzer.tokenStream(CONTENT, reader);
			//获取高亮点后的内容
			hightContent = highlighter.getBestFragments(tokenStream,
					segmenterContent, 0, "...");
			//去除内容中的空格
			hightContent = hightContent.replace(" ", "").replace(
					"<fontcolor=\"#cc0033\">", "<font color=\"#cc0033\">");
			//System.out.println(hightContent);
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (org.apache.lucene.queryParser.ParseException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return hightContent;
	}

	/**
	 * 分词处理
	 * 
	 * @param in
	 *            处理内容
	 * @return 分词后的内容
	 */
	public static String segmentString(String in) {
		String ret = null;
		Segmenter mainsegmenter;
		try {
			mainsegmenter = SegmenterUtils.getSegmenter(System
					.getProperty("java.io.tmpdir")
					+ "/zword.obj");
			StringBuffer buffer = null;
			if (mainsegmenter != null) {
				BufferedReader bin = new BufferedReader(new StringReader(in));
				buffer = new StringBuffer();
				String dataline;
				String processed;
				try {
					while ((dataline = bin.readLine()) != null) {
						processed = mainsegmenter.segmentLine(dataline, " ");
						buffer.append(processed).append("\n");
					}
				} catch (IOException ioe) {
					// ignored
				} finally {
					try {
						bin.close();
					} catch (Exception e) {
					}
				}
			} else {
			}
			if (buffer != null) {
				ret = buffer.toString();
			}
		} catch (ClassCastException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
		} catch (FileNotFoundException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
		} catch (IOException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
		} catch (ClassNotFoundException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
		}

		// System.out.println(buffer.toString());
		return ret;
	}

	/**
	 * 读取一个文件到字符串里.
	 * 
	 * @param sFileName
	 *            文件名
	 * @param sEncode
	 *            String
	 * @return 文件内容
	 */
	public static String readTextFile(String sFileName, String sEncode) {
		StringBuffer sbStr = new StringBuffer();

		try {
			File ff = new File(sFileName);
			InputStreamReader read = new InputStreamReader(new FileInputStream(
					ff), sEncode);
			BufferedReader ins = new BufferedReader(read);

			String dataLine = "";
			while (null != (dataLine = ins.readLine())) {
				sbStr.append(dataLine);
				// sbStr.append("\r\n");
			}

			ins.close();
		} catch (Exception e) {
			e.printStackTrace();
		}
		//System.out.println("读入的文章内容:" + sbStr.toString());
		return sbStr.toString();
	}

	/**
	 * 从xml中获取内容,此方法对应测试文件2.txt的节点结构,目的是获取<![cdata[.....]]>中的内容
	 * 
	 * @param xmlContent
	 *            xml内容
	 * @return 提取出来的内容
	 */
	public static String getHtmlContentFromXML(String xmlContent) {
		xmlContent = xmlContent.replaceAll("UTF-8", "GBK");
		String content = "";
		DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
		DocumentBuilder builder;
		try {
			builder = factory.newDocumentBuilder();
			org.w3c.dom.Document doc = builder.parse(new ByteArrayInputStream(
					xmlContent.getBytes()));
			// normalize text representation
			doc.getDocumentElement().normalize();
			NodeList listOfContents = doc
					.getElementsByTagName("static-content");
			int totalContents = listOfContents.getLength();
			// System.out.println("Total no of people : " + totalContents);
			for (int s = 0; s < listOfContents.getLength(); s++) {
				Node contentNode = listOfContents.item(s);
				NodeList listOfCdata = contentNode.getChildNodes();
				int totalCdata = listOfCdata.getLength();
				for (int i = 0; i < totalCdata; i++) {
					Node cdataNode = listOfCdata.item(i);
					if (cdataNode.getNodeType() == Node.CDATA_SECTION_NODE) {
						CDATASection cdataSection = (CDATASection) cdataNode;
						content = cdataSection.getWholeText();
						// System.out.print(content);
					}
					/*
					 * if (node1.getNodeType() == Node.TEXT_NODE) { n++; String
					 * type = node1.getNodeName(); System.out.println("第" + n +
					 * "个是" + type + "节点,内容:"); Text textNode = (Text) node1;
					 * String content = textNode.getWholeText();
					 * System.out.print(content); }
					 */
				}// end of if clause
			}// end of for loop with s var

		} catch (ParserConfigurationException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (org.xml.sax.SAXException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

		return content;

	}

	/**
	 * 提取html中的纯文本内容
	 * 
	 * @param htmlContent html内容
	 * @return 纯文本内容
	 */
	public static String getTextFromHtml(String htmlContent) {
		boolean bContent = true;
		StringBuffer sBuffer = new StringBuffer(8096 * 2);
		char[] cBuffer = htmlContent.toCharArray();
		int nCount = cBuffer.length;
		for (int i = 0; i < nCount; i++) {
			if (bContent == false) {
				if (cBuffer[i] == '>')
					bContent = true;
				else
					continue;
			} else {
				if (cBuffer[i] == '<') {
					bContent = false;
					continue;
				} else if (cBuffer[i] == '\n' || cBuffer[i] == ' '
						|| cBuffer[i] == ' ' || cBuffer[i] == ' ') {
					continue;
				} else if (cBuffer[i] == '&' && cBuffer[i + 1] == 'n'
						&& cBuffer[i + 2] == 'b' && cBuffer[i + 3] == 's'
						&& cBuffer[i + 4] == 'p' && cBuffer[i + 5] == ';') {
					i = i + 5;
					continue;
				}

				sBuffer.append(cBuffer[i]);
			}
		}
		return sBuffer.toString();
	}
	


	/**
	 * test
	 * 
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub
		String content = SummaryHighlighter.readTextFile("d:/2.txt", "gbk");
		String keyWord = "中国";
		SummaryHighlighter.getHighlighterSummary(content, keyWord);

	}


}



测试文件2.txt的内容:
<?xml version='1.0' encoding='UTF-8'?>
<root available-locales="en_US," default-locale="en_US">
<static-content language-id="en_US">
<![CDATA[<p>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 凡中国产业集群网在<a href="http://www.csic99.com">www.csic99.com</a>运作的网站明确标示由中国产业集群合作伙伴运营的中国产业集群该城市网页及该城市的相关页面,均由中国产业集群的合作伙伴按照中国产业集群授权书及中国产业集群网的用户协议、使用规则等规定,在其所获得授权范围与授权期限内予以运营中国产业集群在此申明会积极帮助您,以支持您合法权益得到保障。 <br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 因中国产业集群合作伙伴在其被授权范围与期限内,按照法律法规规定及中国产业集群网的相关规定,所作之行为,如果您认为损害到了您的合法权益,您可以按照中国产业集群网约定的办法予以解决;中国产业集群合作伙伴在其被 授权范围以外或违反 法律法规规定及违反中国产业集群网的规定,所作之行为( 包括网上及非网上行为)而使您(任何中国产业集群网用户或任何第三方)受到的任何损失,或与一名或多名用户发生争议,就上述损失和/或争议产生或在任何方面与上述损失和/或争议有关的每一种类和性质的已知或未知、可疑或非可疑、披露或未披露的索赔、要求和损害,特此申明如下:鉴于中国产业集群合作伙伴之授权范围自其被授权之日起,已在本网站显著位置予以公开,中国产业集群网的用户协议及相关规定与免责申明等也在本网站显著位置予以,且中国产业集群仅作为网上分发信息的渠道,而并非信息的发布方,同时,中国产业集群没有事先审核用户上传的内容,也没有事后参与用户之间的实际联络之义务,故对于因为中国产业集群合作伙伴在其被授权范围以外或违反法律法规规定及中国产业集群网的相关规定,所作之行为(包括网上及非网上行为)而使您受到任何损失,或与一名或多名用户发生争议,就上述损失和/或争议产生或在任何方面与上述损失和/或争议有关的每一种类和性质的已知或未知、可疑或非可疑、披露或未披露的索赔、要求和损害, 特此申明免除中国产业集群(和中国产业集群的高级职员、董事、代理人、关联公司、母公司、子公司和雇员)的任何责任。 <br />请您仔细阅读相关授权文书,本网站用户协议及相关规定与免责申明等,以避免不必要的损失。&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 中国产业集群网</p>]]>
</static-content>
</root>

你可能感兴趣的:(apache,xml,swing,Lucene,全文检索)