[计算机]基于Lucene的中文字典分词模块
- 格式:ppt
- 大小:590.02 KB
- 文档页数:28
Lucene开发实例:Lucene中⽂分词(转载)2、从Oracle数据库中取数据创建索引(使⽤IK分词)1package lucene.util;23import org.apache.lucene.index.IndexWriter;4import org.apache.lucene.index.IndexWriterConfig;5import org.apache.lucene.index.CorruptIndexException;6import org.apache.lucene.store.FSDirectory;7import org.apache.lucene.store.Directory;8import org.apache.lucene.analysis.Analyzer;9import org.apache.lucene.analysis.standard.StandardAnalyzer;10import org.apache.lucene.util.Version;11import org.apache.lucene.document.Document;12import org.apache.lucene.document.Field;13import org.wltea.analyzer.lucene.IKAnalyzer;1415import java.sql.Connection;16import java.io.File;17import java.io.IOException;18import java.util.ArrayList;19import java.util.Date;2021import modules.gk.Gk_info;22import modules.gk.Gk_infoSub;23import web.sys.Globals;24import web.db.DBConnector;25import web.db.ObjectCtl;26import web.util.StringUtil;27//28public class LuceneIndex {29 IndexWriter writer = null;30 FSDirectory dir = null;31boolean create = true;3233public void init() {34long a1 = System.currentTimeMillis();35 System.out.println("[Lucene 开始执⾏:" + new Date() + "]");36 Connection con = DBConnector.getconecttion(); //取得⼀个数据库连接37try {38final File docDir = new File(Globals.SYS_COM_CONFIG.get("sys.index.path").toString());//E:\lucene39if (!docDir.exists()) {40 docDir.mkdirs();41 }42 String cr = Globals.SYS_COM_CONFIG.get("sys.index.create").toString();//true or false43if ("false".equals(cr.toLowerCase())) {44 create = false;45 }46 Directory dir = FSDirectory.open(docDir);47// Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_36);48 Analyzer analyzer = new IKAnalyzer(true);49 IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_36, analyzer);50if (create) {51// Create a new index in the directory, removing any52// previously indexed documents:53 iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);54 } else {55// Add new documents to an existing index:56 iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);57 }58 IndexWriter writer = new IndexWriter(dir, iwc);59 String sql = "SELECT indexno,title,describes,pdate,keywords FROM TABLEA WHERE STATE=1 AND SSTAG<>1 ";60int rowCount = ObjectCtl.getRowCount(con, sql);61int pageSize = StringUtil.StringToInt(Globals.SYS_COM_CONFIG.get("sys.index.size").toString()); //每页记录数62int pages = (rowCount - 1) / pageSize + 1; //计算总页数63 ArrayList list = null;64 Gk_infoSub gk = null;65for (int i = 1; i < pages+1; i++) {66long a = System.currentTimeMillis();67 list = ObjectCtl.listPage(con, sql, i, pageSize, new Gk_infoSub());68for (int j = 0; j < list.size(); j++) {69 gk = (Gk_infoSub) list.get(j);70 Document doc = new Document();71 doc.add(new Field("indexno", StringUtil.null2String(gk.getIndexno()), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));//主键不分词72 doc.add(new Field("title", StringUtil.null2String(gk.getTitle()), Field.Store.YES, Field.Index.ANALYZED));73 doc.add(new Field("describes", StringUtil.null2String(gk.getDescribes()), Field.Store.YES, Field.Index.ANALYZED));74 doc.add(new Field("pdate", StringUtil.null2String(gk.getPdate()), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));//⽇期不分词75 doc.add(new Field("keywords", StringUtil.null2String(gk.getKeywords()), Field.Store.YES, Field.Index.ANALYZED));76 writer.addDocument(doc);77 ObjectCtl.executeUpdateBySql(con,"UPDATE TABLEA SET SSTAG=1 WHERE indexno='"+gk.getIndexno()+"'");//更新已索引状态78 }80long b = System.currentTimeMillis();81long c = b - a;82 System.out.println("[Lucene " + rowCount + "条," + pages + "页,第" + i + "页花费时间:" + c + "毫秒]");83 }84 mit();8586 } catch (Exception e) {87 e.printStackTrace();88 } finally {89 DBConnector.freecon(con); //释放数据库连接90try {91if (writer != null) {92 writer.close();93 }94 } catch (CorruptIndexException e) {95 e.printStackTrace();96 } catch (IOException e) {97 e.printStackTrace();98 } finally {99try {100if (dir != null && IndexWriter.isLocked(dir)) {101 IndexWriter.unlock(dir);//注意解锁102 }103 } catch (IOException e) {104 e.printStackTrace();105 }106 }107 }108long b1 = System.currentTimeMillis();109long c1 = b1 - a1;110 System.out.println("[Lucene 执⾏完毕,花费时间:" + c1 + "毫秒,完成时间:" + new Date() + "]");111 }112 }3、单字段查询以及多字段分页查询⾼亮显⽰1package lucene.util;23import org.apache.lucene.store.FSDirectory;4import org.apache.lucene.store.Directory;5import org.apache.lucene.search.*;6import org.apache.lucene.search.highlight.SimpleHTMLFormatter;7import org.apache.lucene.search.highlight.Highlighter;8import org.apache.lucene.search.highlight.SimpleFragmenter;9import org.apache.lucene.search.highlight.QueryScorer;10import org.apache.lucene.queryParser.QueryParser;11import org.apache.lucene.queryParser.MultiFieldQueryParser;12import org.apache.lucene.analysis.TokenStream;13import org.apache.lucene.analysis.Analyzer;14import org.apache.lucene.analysis.KeywordAnalyzer;15import org.apache.lucene.document.Document;16import org.apache.lucene.index.IndexReader;17import org.apache.lucene.index.Term;18import org.apache.lucene.util.Version;19import modules.gk.Gk_infoSub;2021import java.util.ArrayList;22import java.io.File;23import java.io.StringReader;24import ng.reflect.Constructor;2526import web.util.StringUtil;27import web.sys.Globals;28import org.wltea.analyzer.lucene.IKAnalyzer;29//30public class LuceneQuery {31private static String indexPath;// 索引⽣成的⽬录32private int rowCount;// 记录数33private int pages;// 总页数34private int currentPage;// 当前页数35private int pageSize; //每页记录数3637public LuceneQuery() {38this.indexPath = Globals.SYS_COM_CONFIG.get("sys.index.path").toString();39 }4041public int getRowCount() {42return rowCount;43 }4445public int getPages() {46return pages;47 }49public int getPageSize() {50return pageSize;51 }5253public int getCurrentPage() {54return currentPage;55 }5657/**58 * 函数功能:根据字段查询索引59*/60public ArrayList queryIndexTitle(String keyWord, int curpage, int pageSize) {61 ArrayList list = new ArrayList();62try {63if (curpage <= 0) {64 curpage = 1;65 }66if (pageSize <= 0) {67 pageSize = 20;68 }69this.pageSize = pageSize; //每页记录数70this.currentPage = curpage; //当前页71int start = (curpage - 1) * pageSize;72 Directory dir = FSDirectory.open(new File(indexPath));73 IndexReader reader = IndexReader.open(dir);74 IndexSearcher searcher = new IndexSearcher(reader);75 Analyzer analyzer = new IKAnalyzer(true);76 QueryParser queryParser = new QueryParser(Version.LUCENE_36, "title", analyzer);77 queryParser.setDefaultOperator(QueryParser.AND_OPERATOR);78 Query query = queryParser.parse(keyWord);79int hm = start + pageSize;80 TopScoreDocCollector res = TopScoreDocCollector.create(hm, false);81 searcher.search(query, res);8283 SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<span style='color:red'>", "</span>");84 Highlighter highlighter = new Highlighter(simpleHTMLFormatter, new QueryScorer(query));85this.rowCount = res.getTotalHits();86this.pages = (rowCount - 1) / pageSize + 1; //计算总页数87 TopDocs tds = res.topDocs(start, pageSize);88 ScoreDoc[] sd = tds.scoreDocs;89for (int i = 0; i < sd.length; i++) {90 Document hitDoc = reader.document(sd[i].doc);91 list.add(createObj(hitDoc, analyzer, highlighter));92 }9394 } catch (Exception e) {95 e.printStackTrace();96 }9798return list;99100 }101/**102 * 函数功能:根据字段查询索引103*/104public ArrayList queryIndexFields(String allkeyword, String onekeyword, String nokeyword, int curpage, int pageSize) {105 ArrayList list = new ArrayList();106try {107if (curpage <= 0) {108 curpage = 1;109 }110if (pageSize <= 0) {111 pageSize = 20;112 }113this.pageSize = pageSize; //每页记录数114this.currentPage = curpage; //当前页115int start = (curpage - 1) * pageSize;116 Directory dir = FSDirectory.open(new File(indexPath));117 IndexReader reader = IndexReader.open(dir);118 IndexSearcher searcher = new IndexSearcher(reader);119 BooleanQuery bQuery = new BooleanQuery(); //组合查询120if (!"".equals(allkeyword)) {//包含全部关键词121 KeywordAnalyzer analyzer = new KeywordAnalyzer();122 BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD};//AND 123 Query query = MultiFieldQueryParser.parse(Version.LUCENE_36, allkeyword, new String[]{"title", "describes", "keywords"}, flags, analyzer); 124 bQuery.add(query, BooleanClause.Occur.MUST); //AND125 }126if (!"".equals(onekeyword)) { //包含任意关键词127 Analyzer analyzer = new IKAnalyzer(true);128 BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD};//OR 129 Query query = MultiFieldQueryParser.parse(Version.LUCENE_36, onekeyword, new String[]{"title", "describes", "keywords"}, flags, analyzer); 130 bQuery.add(query, BooleanClause.Occur.MUST); //AND131 }132if (!"".equals(nokeyword)) { //排除关键词133 Analyzer analyzer = new IKAnalyzer(true);134 BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD};//NOT 135 Query query = MultiFieldQueryParser.parse(Version.LUCENE_36, nokeyword, new String[]{"title", "describes", "keywords"}, flags, analyzer); 136 bQuery.add(query, BooleanClause.Occur.MUST_NOT); //AND137138 }139int hm = start + pageSize;140 TopScoreDocCollector res = TopScoreDocCollector.create(hm, false);141 searcher.search(bQuery, res);142 SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<span style='color:red'>", "</span>");143 Highlighter highlighter = new Highlighter(simpleHTMLFormatter, new QueryScorer(bQuery));144this.rowCount = res.getTotalHits();145this.pages = (rowCount - 1) / pageSize + 1; //计算总页数146 System.out.println("rowCount:" + rowCount);147 TopDocs tds = res.topDocs(start, pageSize);148 ScoreDoc[] sd = tds.scoreDocs;149 Analyzer analyzer = new IKAnalyzer();150for (int i = 0; i < sd.length; i++) {151 Document hitDoc = reader.document(sd[i].doc);152 list.add(createObj(hitDoc, analyzer, highlighter));153 }154155 } catch (Exception e) {156 e.printStackTrace();157 }158159return list;160161 }162163/**164 * 创建返回对象(⾼亮)165*/166167private synchronized static Object createObj(Document doc, Analyzer analyzer, Highlighter highlighter) {168169 Gk_infoSub gk = new Gk_infoSub();170try {171172if (doc != null) {173 gk.setIndexno(StringUtil.null2String(doc.get("indexno")));174 gk.setPdate(StringUtil.null2String(doc.get("pdate")));175 String title = StringUtil.null2String(doc.get("title"));176 gk.setTitle(title);177if (!"".equals(title)) {178 highlighter.setTextFragmenter(new SimpleFragmenter(title.length()));179 TokenStream tk = analyzer.tokenStream("title", new StringReader(title));180 String htext = StringUtil.null2String(highlighter.getBestFragment(tk, title));181if (!"".equals(htext)) {182 gk.setTitle(htext);183 }184 }185 String keywords = StringUtil.null2String(doc.get("keywords"));186 gk.setKeywords(keywords);187if (!"".equals(keywords)) {188 highlighter.setTextFragmenter(new SimpleFragmenter(keywords.length()));189 TokenStream tk = analyzer.tokenStream("keywords", new StringReader(keywords));190 String htext = StringUtil.null2String(highlighter.getBestFragment(tk, keywords));191if (!"".equals(htext)) {192 gk.setKeywords(htext);193 }194 }195 String describes = StringUtil.null2String(doc.get("describes"));196 gk.setDescribes(describes);197if (!"".equals(describes)) {198 highlighter.setTextFragmenter(new SimpleFragmenter(describes.length()));199 TokenStream tk = analyzer.tokenStream("keywords", new StringReader(describes));200 String htext = StringUtil.null2String(highlighter.getBestFragment(tk, describes));201if (!"".equals(htext)) {202 gk.setDescribes(htext);203 }204 }205206 }207return gk;208 }209catch (Exception e) {210211 e.printStackTrace();212return null;213 }214finally {215 gk = null;216 }217218 }219220private synchronized static Object createObj(Document doc) {221222 Gk_infoSub gk = new Gk_infoSub();223try {224225if (doc != null) {226 gk.setIndexno(StringUtil.null2String(doc.get("indexno")));227 gk.setPdate(StringUtil.null2String(doc.get("pdate")));228 gk.setTitle(StringUtil.null2String(doc.get("title")));229 gk.setKeywords(StringUtil.null2String(doc.get("keywords")));230 gk.setDescribes(StringUtil.null2String(doc.get("describes")));231 }232return gk;233 }234catch (Exception e) {235236 e.printStackTrace();237return null;238 }239finally {240 gk = null;241 }242243 }244 }单字段查询:1long a = System.currentTimeMillis();2try {3int curpage = StringUtil.StringToInt(StringUtil.null2String(form.get("curpage")));4int pagesize = StringUtil.StringToInt(StringUtil.null2String(form.get("pagesize")));5 String title = StringUtil.replaceLuceneStr(StringUtil.null2String(form.get("title")));6 LuceneQuery lu = new LuceneQuery();7 form.addResult("list", lu.queryIndexTitle(title, curpage, pagesize));8 form.addResult("curPage", lu.getCurrentPage());9 form.addResult("pageSize", lu.getPageSize());10 form.addResult("rowCount", lu.getRowCount());11 form.addResult("pageCount", lu.getPages());12 } catch (Exception e) {13 e.printStackTrace();14 }15long b = System.currentTimeMillis();16long c = b - a;17 System.out.println("[搜索信息花费时间:" + c + "毫秒]");多字段查询:1long a = System.currentTimeMillis();2try {3int curpage = StringUtil.StringToInt(StringUtil.null2String(form.get("curpage")));4int pagesize = StringUtil.StringToInt(StringUtil.null2String(form.get("pagesize")));5 String allkeyword = StringUtil.replaceLuceneStr(StringUtil.null2String(form.get("allkeyword")));6 String onekeyword = StringUtil.replaceLuceneStr(StringUtil.null2String(form.get("onekeyword")));7 String nokeyword = StringUtil.replaceLuceneStr(StringUtil.null2String(form.get("nokeyword")));8 LuceneQuery lu = new LuceneQuery();9 form.addResult("list", lu.queryIndexFields(allkeyword,onekeyword,nokeyword, curpage, pagesize));10 form.addResult("curPage", lu.getCurrentPage());11 form.addResult("pageSize", lu.getPageSize());12 form.addResult("rowCount", lu.getRowCount());13 form.addResult("pageCount", lu.getPages());14 } catch (Exception e) {15 e.printStackTrace();16 }17long b = System.currentTimeMillis();18long c = b - a;19 System.out.println("[⾼级检索花费时间:" + c + "毫秒]");4、Lucene通配符查询1 BooleanQuery bQuery = new BooleanQuery(); //组合查询2if (!"".equals(title)) {3 WildcardQuery w1 = new WildcardQuery(new Term("title", title+ "*"));4 bQuery.add(w1, BooleanClause.Occur.MUST); //AND5 }6int hm = start + pageSize;7 TopScoreDocCollector res = TopScoreDocCollector.create(hm, false);8 searcher.search(bQuery, res);5、Lucene嵌套查询实现SQL:(unitid like 'unitid%' and idml like 'id2%') or (tounitid like 'unitid%' and tomlid like 'id2%' and tostate=1)1 BooleanQuery bQuery = new BooleanQuery();2 BooleanQuery b1 = new BooleanQuery();3 WildcardQuery w1 = new WildcardQuery(new Term("unitid", unitid + "*"));4 WildcardQuery w2 = new WildcardQuery(new Term("idml", id2 + "*"));5 b1.add(w1, BooleanClause.Occur.MUST);//AND6 b1.add(w2, BooleanClause.Occur.MUST);//AND7 bQuery.add(b1, BooleanClause.Occur.SHOULD);//OR8 BooleanQuery b2 = new BooleanQuery();9 WildcardQuery w3 = new WildcardQuery(new Term("tounitid", unitid + "*"));10 WildcardQuery w4 = new WildcardQuery(new Term("tomlid", id2 + "*"));11 WildcardQuery w5 = new WildcardQuery(new Term("tostate", "1"));12 b2.add(w3, BooleanClause.Occur.MUST);//AND13 b2.add(w4, BooleanClause.Occur.MUST);//AND14 b2.add(w5, BooleanClause.Occur.MUST);//AND15 bQuery.add(b2, BooleanClause.Occur.SHOULD);//OR6、Lucene先根据时间排序后分页1int hm = start + pageSize;2 Sort sort = new Sort(new SortField("pdate", SortField.STRING, true));3 TopScoreDocCollector res = TopScoreDocCollector.create(pageSize, false);4 searcher.search(bQuery, res);5this.rowCount = res.getTotalHits();6this.pages = (rowCount - 1) / pageSize + 1; //计算总页数7 TopDocs tds =searcher.search(bQuery,rowCount,sort);// res.topDocs(start, pageSize);8 ScoreDoc[] sd = tds.scoreDocs;9 System.out.println("rowCount:" + rowCount);10int i=0;11for (ScoreDoc scoreDoc : sd) {12 i++;13if(i<start){14continue;15 }16if(i>hm){17break;18 }19 Document doc = searcher.doc(scoreDoc.doc);20 list.add(createObj(doc));21 }这个效率不⾼,正常的做法是创建索引的时候进⾏排序,之后使⽤分页⽅法,不要这样进⾏2次查询。
基于Lucene的中文分词器的设计与实现彭焕峰【摘要】According to the low efficiency of the Chinese words segmentation machines of Lucene, this paper designs a new word segmentation machine based on all-Hash segmentation mechanism according to binary-seek-by-word by analyzing many old dictionary mechanisms. The new mechanism uses the word's Hash value to reduce the number of string findings. The maintenance of dictionary file is convenient, and the developers can customize the dictionary based on different application to improve search efficiency.%针对Lucene自带中文分词器分词效果差的缺点,在分析现有分词词典机制的基础上,设计了基于全哈希整词二分算法的分词器,并集成到Lucene中,算法通过对整词进行哈希,减少词条匹配次数,提高分词效率。
该分词器词典文件维护方便,可以根据不同应用的要求进行定制,从而提高了检索效率。
【期刊名称】《微型机与应用》【年(卷),期】2011(030)018【总页数】3页(P62-64)【关键词】Lucene;哈希;整词二分;最大匹配【作者】彭焕峰【作者单位】南京工程学院计算机工程学院,江苏南京211167【正文语种】中文【中图分类】TP391.1信息技术的发展,形成了海量的电子信息数据,人们对信息检索的要求越来越高,搜索引擎技术也得到了快速发展,并逐渐地被应用到越来越多的领域。
java实现中⽂分词IK Analyzer是基于lucene实现的分词开源框架下载路径:/so/search/s.do?q=IKAnalyzer2012.jar&t=doc&o=&s=all&l=null 需要在项⽬中引⼊:IKAnalyzer2012.jarlucene-core-3.6.0.jar实现的两种⽅法:使⽤(lucene)实现:1import java.io.IOException;2import java.io.StringReader;3import org.wltea.analyzer.core.IKSegmenter;4import org.wltea.analyzer.core.Lexeme;56public class Fenci1 {7public static void main(String[] args) throws IOException{8 String text="你好,我的世界!";9 StringReader sr=new StringReader(text);10 IKSegmenter ik=new IKSegmenter(sr, true);11 Lexeme lex=null;12while((lex=ik.next())!=null){13 System.out.print(lex.getLexemeText()+",");14 }15 }1617 }使⽤(IK Analyzer)实现:1import java.io.IOException;2import java.io.StringReader;3import org.apache.lucene.analysis.Analyzer;4import org.apache.lucene.analysis.TokenStream;5import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;6import org.wltea.analyzer.lucene.IKAnalyzer;78public class Fenci {9public static void main(String[] args) throws IOException {11 String text="你好,我的世界!";12//创建分词对象13 Analyzer anal=new IKAnalyzer(true);14 StringReader reader=new StringReader(text);15//分词16 TokenStream ts=anal.tokenStream("", reader);17 CharTermAttribute term=ts.getAttribute(CharTermAttribute.class);18//遍历分词数据19while(ts.incrementToken()){20 System.out.print(term.toString()+",");21 }22 reader.close();23 System.out.println();24 }2526 }运⾏后结果:你好,我,的,世界,。
基于Lucene的中文文本分词
王继明;杨国林
【期刊名称】《内蒙古工业大学学报(自然科学版)》
【年(卷),期】2007(026)003
【摘要】中文文本分词技术是文本挖掘领域的一个重要分支,在中国仍然处于发展阶段.Apache Jakarta 的开源工程Lucene是一个十分优秀的基于Java语言的文本检索工具包,在国外已经得到广泛的应用.但是Lucene对中文分词功能的支持不太理想,给Lucene加入好的中文分词功能对Lucene在国内的发展和应用将会起到很大的推动作用.
【总页数】4页(P185-188)
【作者】王继明;杨国林
【作者单位】内蒙古工业大学信息工程学院,呼和浩特,010051;内蒙古工业大学信息工程学院,呼和浩特,010051
【正文语种】中文
【中图分类】TP311.11
【相关文献】
1.一种基于互信息的串扫描中文文本分词方法 [J], 赵秦怡;王丽珍
2.基于词条组合的中文文本分词方法 [J], 黄魏;高兵;刘异;杨克巍
3.基于Lucene的石墨烯中文文献搜索引擎设计与实现 [J], 肖显东;王勤生;杨永强;章国宝;
4.基于Lucene的中文是非问答系统的设计与实现 [J], 罗东霞;卿粼波;吴晓红
5.基于Lucene的中文是非问答系统的设计与实现 [J], 罗东霞;卿粼波;吴晓红因版权原因,仅展示原文概要,查看原文内容请购买。
基于Lucene的自定义中文分词器的设计与实现王桐;王韵婷【期刊名称】《电脑知识与技术》【年(卷),期】2014(000)002【摘要】该文设计了一个基于复杂形式最大匹配算法(MMSeg_Complex)的自定义中文分词器,该分词器采用四种歧义消除规则,并实现了用户自定义词库、自定义同义词和停用词的功能,可方便地集成到Lucene中,从而有效地提高了Lucene的中文处理能力。
通过实验测试表明,该分词器的分词性能跟Lucene自带的中文分词器相比有了极大的提高,并最终构建出了一个高效的中文全文检索系统。
%This paper designed a custom Chinese word analyzer that based on a complex form of maximum matching algorithm (MMSEG_Complex). This analyzer use four kinds of disambiguation rules, and has achieved user-defined thesaurus、custom func-tion of synonyms and stop words , which can be easily integrated into Lucene, thus effectively improving the Chinese processing capabilities of Lucene. Through experiments we found that this analyzer's performance of Chinese word segmentation has been greatly improved compared to the Chinese word analyzer which built-in Lucene, and then we can eventually build an effective Chinese full-text retrieval system.【总页数】4页(P430-433)【作者】王桐;王韵婷【作者单位】哈尔滨工程大学信息与通信工程学院,黑龙江哈尔滨 150001;哈尔滨工程大学信息与通信工程学院,黑龙江哈尔滨 150001【正文语种】中文【中图分类】TP393【相关文献】1.基于Lucene和MMSEG算法的中文分词器研究 [J], 邓晓枫;蒋廷耀2.基于Lucene的中文分词器的改进与实现 [J], 罗惠峰;郭淑琴3.基于Lucene的中文分词器的设计与实现 [J], 彭焕峰4.基于Lucene的中文是非问答系统的设计与实现 [J], 罗东霞;卿粼波;吴晓红5.基于Lucene的中文是非问答系统的设计与实现 [J], 罗东霞;卿粼波;吴晓红因版权原因,仅展示原文概要,查看原文内容请购买。
给Lucene添加中文分词3(猎兔的另一篇文章)给Lucene添加中文分词3(猎兔的另一篇文章)原文转贴如下:/en/The tokenizer compose of two part. The code in a jar file and a dictinary information(chinese language model) ,which is compressed in a zip file, you can uncompress it to a path.Make a CnAnalyzer class to test it:import java.io.Reader;import org.apache.lucene.analysis.Analyzer;import org.apache.lucene.analysis.LowerCaseFilter;import org.apache.lucene.analysis.TokenStream;import Tokenizer;import seg.result.PlaceFilter;/*** The Analyzer to demo CnT okenizer.**/public class CnAnalyzer extends Analyzer {//~ Constructors -----------------------------------------------------------public CnAnalyzer() {}//~ Methods ----------------------------------------------------------------/*** get token stream from input** @param fieldName lucene field name* @param reader input reader** @return TokenStream*/public final TokenStream tokenStream(String fieldName, Reader reader) {TokenStream result = new CnT okenizer(reader);result = new LowerCaseFilter(result);//还加入了地名过滤result = new PlaceFilter(result);return result;}}Use a test class to test CnAnalyzer:public static void testCnAnalyzer() throws Exception {long startTime;long endTime;StringReader input;CnT okenizer.makeT ag= false;String sentence="其中包括兴安至全州、桂林至兴安、全州至黄沙河、阳朔至平乐、桂林至阳朔、桂林市国道过境线灵川至三塘段、平乐至钟山、桂林至三江高速公路。