Jboss 的 hibernate search 支持 hibernate 应该比较好。
所以想在项目 里面使用 hibernate search 进行 搜索。
分词使用的是 IKAnalyzer
网站是 :
http://code.google.com/p/ik-analyzer/
使用的 是 hibernate 3.6.8 + spring 3.0.6 + hibernate search 3.4.1 +IKAnalyzer 3.2.8
数据库是mysql 链接池是 c3p0
在 hibernate search 3.4 版本的时候 就不需要配置 hibernate 的监听了。(Jboss 自家的东西支持就是好点)
下面是配置文件:
<bean id="sessionFactory" class="org.springframework.orm.hibernate3.annotation.AnnotationSessionFactoryBean"> <property name="dataSource"> <ref bean="dataSource" /> </property> <property name="packagesToScan"> <list> <value>com.freewebsys</value> </list> </property> <!-- <property name="mappingDirectoryLocations"> <list> <value>classpath:com/**/pojo</value> </list> </property> --> <property name="hibernateProperties"> <props> <prop key="hibernate.dialect">${hibernate.dialect}</prop> <!-- <prop key="hibernate.dialect"> org.hibernate.dialect.Oracle9Dialect </prop> <prop key="hibernate.dialect"> org.hibernate.dialect.MySQLDialect </prop> --> <prop key="hibernate.hbm2ddl.auto">${hibernate.hbm2ddl.auto}</prop> <prop key="cglib.use_reflection_optimizer">true</prop> <prop key="hibernate.show_sql">${hibernate.show_sql}</prop> <prop key="hibernate.jdbc.fetch_size">${hibernate.jdbc.fetch_size}</prop> <prop key="hibernate.jdbc.batch_size">${hibernate.jdbc.batch_size}</prop> <prop key="hibernate.cache.use_query_cache">${hibernate.cache.use_query_cache}</prop> <!-- <prop key="hibernate.cache.provider_class">${hibernate.cache.provider_class} </prop> --> <prop key="hibernate.query.substitutions">true 1, false 0, yes 'Y', no 'N'</prop> <!-- add hibernate search. --> <prop key="hibernate.search.default.directory_provider">${hibernate.search.default.directory_provider}</prop> <prop key="hibernate.search.default.indexBase">${hibernate.search.default.indexBase}</prop> <prop key="hibernate.search.analyzer">${hibernate.search.analyzer}</prop> </props> </property> </bean>
<property name="packagesToScan">这个属性直接可以把配置下面的java bena 读取出来。不用一条一条添加了。
然后就是配置 hiberante 事物。
<!-- ####下面是用spring对事务进行配置的代码.#####开始 --> <bean id="transactionManager" class="org.springframework.orm.hibernate3.HibernateTransactionManager"> <property name="sessionFactory"> <ref bean="sessionFactory" /> </property> </bean> <bean id="hibernateTemplate" class="org.springframework.orm.hibernate3.HibernateTemplate"> <property name="sessionFactory"> <ref bean="sessionFactory" /> </property> <property name="cacheQueries"> <value>true</value> </property> </bean> <bean id="transactionInterceptor" class="org.springframework.transaction.interceptor.TransactionInterceptor"> <property name="transactionManager" ref="transactionManager" /> <property name="transactionAttributes"> <props> <prop key="*">PROPAGATION_REQUIRED,-Exception</prop> <prop key="find*">PROPAGATION_REQUIRED,readOnly </prop> <prop key="list*">PROPAGATION_REQUIRED,readOnly </prop> <prop key="get*">PROPAGATION_REQUIRED,readOnly </prop><!-- find和get方法开头的是只读的事务,其他的都进行提交回滚. --> </props> </property> </bean> <bean class="org.springframework.aop.framework.autoproxy.BeanNameAutoProxyCreator"> <property name="beanNames"> <value>*Service</value> <!-- <value>*Mgr, *Service , *Director , *Outputter , importTask</value> 这个是通过对事务进行批量配置.对bean的名字含有Service的统统进行事务配置. 这样严格控制业务逻辑在service实现.如果有在action层进行查询的无所谓.但是 保持更新在action是绝度不行的. --> </property> <property name="interceptorNames"> <list> <value>transactionInterceptor</value> </list> </property> </bean> <bean class="org.springframework.transaction.interceptor.TransactionAttributeSourceAdvisor"> <property name="transactionInterceptor" ref="transactionInterceptor" /> </bean> <!-- Auto scan the components --> <context:component-scan base-package="com.freewebsys" />
创建搜索的 bean
package com.freewebsys.demo.pojo; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import static javax.persistence.GenerationType.IDENTITY; import javax.persistence.Id; import javax.persistence.Table; import javax.persistence.UniqueConstraint; import org.hibernate.search.annotations.Analyzer; import org.hibernate.search.annotations.DocumentId; import org.hibernate.search.annotations.Field; import org.hibernate.search.annotations.Index; import org.hibernate.search.annotations.Indexed; import org.hibernate.search.annotations.Store; import org.wltea.analyzer.lucene.IKAnalyzer; @Entity @Table(name = "user_info") @Indexed public class UserInfo implements java.io.Serializable { private Long id; private String userName; private String passwd; private String city; private String content; public UserInfo() { } @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @Column(name = "id", unique = true, nullable = false) @DocumentId public Long getId() { return id; } public void setId(Long id) { this.id = id; } @Column(name = "user_name", unique = false, nullable = true, length = 100) @Field(name = "user_name", index = Index.TOKENIZED, store = Store.YES) public String getUserName() { return userName; } public void setUserName(String userName) { this.userName = userName; } @Column(name = "passwd", unique = false, nullable = true, length = 100) @Field(name = "passwd", index = Index.TOKENIZED, store = Store.YES) public String getPasswd() { return passwd; } public void setPasswd(String passwd) { this.passwd = passwd; } @Column(name = "city", unique = false, nullable = true, length = 100) @Field(name = "city", index = Index.TOKENIZED, store = Store.YES) public String getCity() { return city; } public void setCity(String city) { this.city = city; } @Column(name = "content", unique = false, nullable = true, length = 4000) @Field(name = "content", index = Index.TOKENIZED, store = Store.YES, analyzer = @Analyzer(impl = IKAnalyzer.class)) public String getContent() { return content; } public void setContent(String content) { this.content = content; } @Override public String toString() { return "UserInfo [id=" + id + ", userName=" + userName + ", passwd=" + passwd + ", city=" + city + ", content=" + content + "]"; } }
@Indexed 标识这个要被索引。
@Field(name = "user_name", index = Index.TOKENIZED, store = Store.YES)
标识 索引字段
@Field(name = "content", index = Index.TOKENIZED, store = Store.YES, analyzer = @Analyzer(impl = IKAnalyzer.class))
具体 其他参数 参考 博客 : http://sin90lzc.iteye.com/blog/1106258
标识索引字段并表示分词 为 IKAnalyzer
然后 就是 service 写的。。这里 省略了 dao 层 service 直接 继承 HibernateDaoSupport
package com.freewebsys.demo.service.impl; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.StopAnalyzer; import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.highlight.Highlighter; import org.apache.lucene.search.highlight.QueryScorer; import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleHTMLFormatter; import org.apache.lucene.util.Version; import org.hibernate.Query; import org.hibernate.SessionFactory; import org.hibernate.search.FullTextQuery; import org.hibernate.search.FullTextSession; import org.hibernate.search.Search; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.orm.hibernate3.support.HibernateDaoSupport; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.wltea.analyzer.lucene.IKAnalyzer; import com.freewebsys.demo.pojo.UserInfo; import com.freewebsys.demo.service.UserInfoService; @Service("userInfoService") public class UserInfoServiceImpl extends HibernateDaoSupport implements UserInfoService { private static Log log = LogFactory.getLog(UserInfoServiceImpl.class); @Autowired public void setMySessionFactory(SessionFactory sessionFactory) { setSessionFactory(sessionFactory); } @Transactional public void save(UserInfo userInfo) { getHibernateTemplate().save(userInfo); } @Transactional public void delete(UserInfo userInfo) { getHibernateTemplate().delete(userInfo); } /** * 使用hql 进行查询。 */ @Transactional public List<UserInfo> findUserInfo(String userName) { String hql = " from UserInfo userInfo where userInfo.userName = ? "; return getHibernateTemplate().find(hql, userName); } public List<UserInfo> findUserInfoBySearchContent(String content) { FullTextSession fullTextSession = Search .getFullTextSession(getSession()); QueryParser parser = new QueryParser(Version.LUCENE_31, "content", new SimpleAnalyzer(Version.LUCENE_31)); org.apache.lucene.search.Query luceneQuery = null; try { luceneQuery = parser.parse(content); } catch (ParseException e) { e.printStackTrace(); } FullTextQuery fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery, UserInfo.class); List<UserInfo> useList = (List<UserInfo>) fullTextQuery.list(); // 高亮设置 SimpleHTMLFormatter formatter = new SimpleHTMLFormatter( "<b><font color='red'>", "</font></b>"); QueryScorer qs = new QueryScorer(luceneQuery); Highlighter highlighter = new Highlighter(formatter, qs); // 这个20是指定关键字字符串的context的长度,你可以自己设定,因为不可能返回整篇正文内容 highlighter.setTextFragmenter(new SimpleFragmenter(20)); for (UserInfo userInfo : useList) { Analyzer analyzer = new IKAnalyzer(); try { String contentHighLighter = highlighter.getBestFragment( analyzer, content, userInfo.getContent()); System.out.println(contentHighLighter); userInfo.setContent(contentHighLighter); } catch (Exception e) { e.printStackTrace(); } } return useList; } /** * 查询全部数据 */ public List<UserInfo> findAllUserInfo() { String hql = " from UserInfo userInfo"; return getHibernateTemplate().find(hql); } }
最关键的就是搜索 然后 显示 高亮并 将文章内容截取 。
也可以使用setFirstResult setMaxResults 对搜索进行分页。
fullTextQuery.getResultSize() 是获得总页数。
fullTextQuery.setFirstResult((pageNo - 1) * pageSize); fullTextQuery.setMaxResults(pageSize);
在使用 高亮显示的时候 二次进行了 分词。找到 查询内容:
// 高亮设置 SimpleHTMLFormatter formatter = new SimpleHTMLFormatter( "<b><font color='red'>", "</font></b>"); QueryScorer qs = new QueryScorer(luceneQuery); Highlighter highlighter = new Highlighter(formatter, qs); // 这个20是指定关键字字符串的context的长度,你可以自己设定,因为不可能返回整篇正文内容 highlighter.setTextFragmenter(new SimpleFragmenter(20)); for (UserInfo userInfo : useList) { Analyzer analyzer = new IKAnalyzer(); try { String contentHighLighter = highlighter.getBestFragment( analyzer, content, userInfo.getContent()); System.out.println(contentHighLighter); userInfo.setContent(contentHighLighter); } catch (Exception e) { e.printStackTrace(); } } return useList;
在test 里面是测试 先初始化 数据库。
还可以对 数据里面的html 代码去掉。然后再展示:
QueryScorer qs = new QueryScorer(luceneQuery); Highlighter highlighter = new Highlighter(formatter, qs); // 这个20是指定关键字字符串的context的长度,你可以自己设定,因为不可能返回整篇正文内容 highlighter.setTextFragmenter(new SimpleFragmenter(20)); String contentStr = null; for (UserInfo userInfo : useList) { Analyzer analyzer = new IKAnalyzer(); try { contentStr = userInfo.getContent(); // 去掉所有html元素, contentStr = contentStr.replaceAll("<[a-zA-Z]+[1-9]?[^><]*>", "").replaceAll("</[a-zA-Z]+[1-9]?>", ""); String contentHighLighter = highlighter.getBestFragment( analyzer, content, contentStr); System.out.println(contentHighLighter); userInfo.setContent(contentHighLighter); } catch (Exception e) { e.printStackTrace(); } }
运行junit 测试:
List<UserInfo> list = userInfoService .findUserInfoBySearchContent("三个月"); System.out.println(list.size()); System.out.println("Finish ########"); for (UserInfo userInfo : list) { System.out.println(userInfo); }
查询结果如下:
<b><font color='red'>三个月</font></b>必须到杭州进行全身心开发,<b><font color='red'>三个月</font></b>之后 <b><font color='red'>三个月</font></b>可以全身心在杭州专注于项目开发。<b><font color='red'>三个月</font></b> 开发的<b><font color='red'>三个月</font></b>内,天使湾将在杭州每周举办分享 <b><font color='red'>三个月</font></b>内,不同创业团队在确保独立自主的 地。在杭州<b><font color='red'>三个月</font></b>期间创业团队的住宿餐饮 <b><font color='red'>三个月</font></b>绝对以一当十! 9.天使湾聚变
到页面就可以显示 高亮并 截取字符串了。
目前有一个问题就是 查询的时候 同时也执行 sql 查询。
如执行:
Hibernate: select this_.id as id0_0_, this_.city as city0_0_, this_.passwd as passwd0_0_, this_.user_name as user4_0_0_ from user_info this_ where (this_.id in (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?))
将 搜索查询 到的数据 从数据库中提取出来。
总的来说 hibernate search 将 搜索简化了很多。
附件是 工程代码: