Solr进阶之拼写纠错功能的实现基础拼音
生活随笔
收集整理的這篇文章主要介紹了
Solr进阶之拼写纠错功能的实现基础拼音
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
思路:
1.當漢字個數小于等于三個漢字時,使用單個詞庫進行匹配(最大匹配法)
將漢字轉為同音字,查詢單個詞庫中的數據,選出音一樣的詞語列表,加上最小距離算法(保證至少一個漢字一樣),得出一個列表,按照一定的算法排序后,選出最好的那個詞語.
(詞語庫中詞語定時更新,索引對應詞語的查詢結果)
2.當漢字個數在4到6個數目時,使用最大匹配切詞法進行切詞處理,切分為單個最大詞后,使用1中的排序法則,排序后,(這里詞語的組合如何去做?? 先不做,涉及nlp),直接得到最佳的三個詞語吧,進行查詢,得到結果數.再次使用最小距離算法的方式(solr自身的)進行一次糾錯,將兩次糾錯結果進行算法排序,得出最優化結果
3.當查詢漢字個數大于6個時,使用切詞的方式進行處理,將詞庫中需要查詢到的漢字預先使用n-gram處理后,將漢字和拼音為一個單位放入多值字段中.
將查詢的漢字使用n-gram切分后,轉為拼音,進行上述字段的查詢,對每個拼音對應的進行高亮處理,取出對應的列表,將一系列漢字組合后,假設經過優化后(最小距離,前后關系 等)數目是3,將這個三個詞語再次查詢,查出相應的結果數目,
再次使用solr自身的最小距離算法模塊,查出一個糾錯列表
將兩次結果經過算法處理后,得出最優解,給出建議!
短詞糾錯部分實現 /*** @描述:最大匹配法 ---拼音加最小距離算法 * 電視機 名師輔導 權威名師輔導* @param word* @return String* @createTime:2016年9月12日* @author: songqinghu*/private String maxMatching(String word){Map<String, String> oldWord = NGramTokenizerUtil.analyzer(word);String spellword="";List<SpellWordTree> spellWords = new ArrayList<SpellWordTree>();matching(oldWord, word.length(), spellWords);if(spellWords.size()>0){int start = spellWords.get(spellWords.size()-1).getStart();if(start!=0){SpellWordTree spellWordTree = new SpellWordTree();spellWordTree.setKey(0+"-"+start);ArrayList<String> wordList = new ArrayList<String>();wordList.add(oldWord.get(spellWordTree.getKey()));spellWordTree.setWord(wordList);spellWords.add(spellWordTree);}List<String> words = new ArrayList<String>();words.add(spellword);//種子for (int i = spellWords.size()-1; i >=0; i--) {//spellword=spellword + spellWords.get(i).getWord();List<String> temp = new ArrayList<String>();for (String minword : spellWords.get(i).getWord()) {for (String seedword : words) {seedword = seedword + minword;temp.add(seedword);}}words = temp;}if(words.size()==1){return words.get(0);}Long maxCount = -1l;for (String string : words) {Long count = findSpellWrodResult(string);if(count>maxCount){spellword = string;maxCount = count;}}}//所有的結束 // for (SpellWordTree spellWord : spellWords) { // System.out.println(spellWord.getKey() + " : " + spellWord.getWord()); // }return spellword;}/*** @描述:循環最大匹配* @param oldWord* @param max* @param spellWords* @return SpellWordTree* @exception* @createTime:2016年9月13日* @author: songqinghu*/private void matching(Map<String, String> oldWord,int max,List<SpellWordTree> spellWords){for (int i = max; i >=0; i--) {for (int j = 0; j < i-1; j++) {String key = j+"-"+i;String value = oldWord.get(key);if(value.length()>1){SpellWordTree spellWordTree = new SpellWordTree();if(singleWordhandle(value, spellWordTree)){//組成樹狀結構 --后期組合出詞語 0 -max區間spellWordTree.setKey(key);//補全前面的詞語if(spellWordTree.getEnd()<max){SpellWordTree extraWord = new SpellWordTree();extraWord.setKey(spellWordTree.getEnd() +"-"+max);ArrayList<String> word = new ArrayList<String>();word.add(oldWord.get(spellWordTree.getEnd() +"-"+max));extraWord.setWord(word);spellWords.add(extraWord);}spellWords.add(spellWordTree);matching(oldWord, spellWordTree.getStart(), spellWords);return;}}}}}/*** * @描述:查詢糾錯詞的結果數 --選擇最優結果* @param word* @return Long* @createTime:2016年9月13日* @author: songqinghu*/private Long findSpellWrodResult(String word){Formula f = new Formula();f.append(new Query(ProductBean.Fd.name.name(), word)).tagO();f.append(new Query(ProductBean.Fd.multiple.name(), word));SolrQuery query = new SolrQuery();query.set(CommonParams.Q, f.toString());query.setStart(0);query.setRows(0);try {QueryResponse response = productClient.query(query);return response.getResults().getNumFound();} catch (SolrServerException | IOException e) {e.printStackTrace();}return 0l;}
長詞匹配部分 --使用solr的高亮來完成/*** * @描述:ngram 切割 轉為拼音后處理* @param word* @return String* @createTime:2016年9月14日* @author: songqinghu*/private String minSpilt(String oldWord){//分詞Map<String, String> terms = NGram11TokenizerUtil.analyzer(oldWord);//輸入原始詞匯組合高亮 等待比較Map<String, String> newTerms = new HashMap<String,String>();//高亮詞List<Set<String>> highlightingWord = highlightingWord(terms,newTerms,oldWord);//校驗詞TreeSet<String> treewords = new TreeSet<String>();for (Set<String> highterms : highlightingWord) {List<String> highList = new ArrayList<String>();highList.addAll(highterms);Set<String> keys = newTerms.keySet();List<String> deletekeys = new ArrayList<String>();Map<String, String> temp = new HashMap<String,String>();for (String key : keys) {if(!highList.contains(newTerms.get(key))){temp.put(key, newTerms.get(key));//待糾正}else{deletekeys.add(newTerms.get(key));//待刪除}}for (String deletekey : deletekeys) {highList.remove(deletekey);//糾正詞}HashMap<String, List<String>> words = new HashMap<String,List<String>>();//糾正后的詞for (String high : highList) {//獲取位置String[] pinyins = high.split(" ");for (String key : temp.keySet()) {String[] oldPinyins = temp.get(key).split(" ");for (int i = 1; i < oldPinyins.length; i++) {for (int j = 1; j < pinyins.length; j++) {if(oldPinyins[i].equals(pinyins[j])){//找到位置if(!words.containsKey(key)){List<String> word = new ArrayList<String>();word.add(pinyins[0]);//漢字words.put(key, word);}else{words.get(key).add(pinyins[0]);}i=oldPinyins.length;j=pinyins.length;}}}}}//組合 words termsList<String> newWords = new ArrayList<String>();newWords.add("");//種子for (int i = 0; i < terms.size(); i++) {if(words.containsKey(i+"-"+(i+1))){List<String> list = words.get(i+"-"+(i+1));List<String> tempword = new ArrayList<String>();for (String word : list) {for (String seed : newWords) {seed = seed +word;tempword.add(seed);}}newWords =tempword;}else{String word = terms.get(i+"-"+(i+1));List<String> tempword = new ArrayList<String>();for (String seed : newWords) {seed = seed +word;tempword.add(seed);}newWords =tempword;}} treewords.addAll(newWords);}Long maxCount =-1l;String result="";for (String word : treewords) {Long count = findSpellWrodResult(word);if(count>maxCount){result=word;}}return result;}/*** @描述:獲取高亮結果集合* @return void* @createTime:2016年9月14日* @author: songqinghu*/private List<Set<String>> highlightingWord(Map<String, String> terms, Map<String, String> newTerms,String oldWord){//按照順序取出 轉換為拼音 組合StringBuffer value = new StringBuffer("( ");//組裝for (int i = 0; i < oldWord.length(); i++) {String term = terms.get(i+"-"+(i+1));Set<String> pinyins = Pinyin4jUtil.converterToSpellToSet(term);StringBuffer oldtemp = new StringBuffer();oldtemp.append("<").append(term).append(">");for (String pinyin : pinyins) {oldtemp.append(" <").append(pinyin).append(">");}newTerms.put(i+"-"+(i+1), oldtemp.toString());StringBuffer temp = new StringBuffer();Iterator<String> iterator = pinyins.iterator();while (iterator.hasNext()) {temp.append(iterator.next());if(iterator.hasNext()){temp.append(" OR ");}}// String one = temp.toString();// temp.append(" OR ").append(term);// String two = temp.toString();// value.append("(( "+temp.toString()+" )" + " AND " + " ( " + two +" ))");value.append("(( ").append(temp.toString()).append(" )").append(" AND ").append(" ( ").append(temp.append(" OR ").append(term)).append(" ))");if(i<oldWord.length()-1){value.append(" AND ");}}value.append(")");SolrQuery query = new SolrQuery();query.set(CommonParams.Q, "spellWords : "+ value.toString());query.setHighlight(true);query.addHighlightField("spellWords");query.setHighlightSimplePre("<");query.setHighlightSimplePost(">");query.setHighlightSnippets(100);query.set(CommonParams.FL, ProductBean.Fd.id.name());query.setRows(3);try {QueryResponse response = productClient.query(query);SolrDocumentList docs = response.getResults();Map<String, Map<String, List<String>>> highlighting = response.getHighlighting();if(docs.size()>0){List<Set<String>> hightermList = new ArrayList<Set<String>>();for (SolrDocument doc : docs) {Set<String> hightterms = new TreeSet<String>();String id = doc.getFieldValue("id").toString();//具體糾錯處理Map<String, List<String>> map = highlighting.get(id);List<String> wordList = map.get("spellWords");for (String spell : wordList) {hightterms.add(spell);//收集去重復}//return hightterms; //先做一個hightermList.add(hightterms);}return hightermList;}} catch (SolrServerException | IOException e) {e.printStackTrace();} return null;}
分詞器: package cn.com.mx.gome.suggest.util.analyzer;import java.io.IOException; import java.io.StringReader; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.TreeSet;import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute;import cn.com.mx.gome.intensive.log.RsysLog; import cn.com.mx.gome.intensive.log.RsysLog.RsysExecLog;/*** NGram分割詞語* @Description: TODO* @author: songqinghu* @date: 2016年8月3日 上午10:53:28* Version:1.0*/ public class NGram11TokenizerUtil {private static final RsysExecLog logger = RsysLog.getInstance().getRsysExecLog();private static Tokenizer tokenizer = new NGramTokenizer(1,1);private static Object lock = new Object();private static Tokenizer getTokenizer(){if(tokenizer ==null){tokenizer = new NGramTokenizer(1,1);}return tokenizer;}/*** * @描述:對輸入的文本使用n-gram進行處理* @param text* @return Set<String>* @createTime:2016年8月3日* @author: songqinghu*/public static Map<String, String> analyzer(String text) {synchronized (lock) {StringReader sr = new StringReader(text); //N-gram模型分詞器 getTokenizer().setReader(sr);Map<String, String> words = tokenizer(getTokenizer());return words;}} private static Map<String, String> tokenizer(Tokenizer tokenizer) {Map<String, String> words = new HashMap<String,String>();try {tokenizer.reset();while(tokenizer.incrementToken()) { // CharTermAttribute word=tokenizer.addAttribute(CharTermAttribute.class); // System.out.println(word); // words.add(word.toString());CharTermAttribute charTermAttribute=tokenizer.addAttribute(CharTermAttribute.class); OffsetAttribute offsetAttribute=tokenizer.addAttribute(OffsetAttribute.class); words.put(offsetAttribute.startOffset()+"-"+offsetAttribute.endOffset(), charTermAttribute.toString());} tokenizer.reset();} catch (IOException e) {logger.error("tokenizer occor error : " + e);try {tokenizer.end();tokenizer.close();} catch (IOException e1) {logger.error("tokenizer close error : " + e);} tokenizer = null;} return words;} }
將漢字轉為同音字,查詢單個詞庫中的數據,選出音一樣的詞語列表,加上最小距離算法(保證至少一個漢字一樣),得出一個列表,按照一定的算法排序后,選出最好的那個詞語.
(詞語庫中詞語定時更新,索引對應詞語的查詢結果)
2.當漢字個數在4到6個數目時,使用最大匹配切詞法進行切詞處理,切分為單個最大詞后,使用1中的排序法則,排序后,(這里詞語的組合如何去做?? 先不做,涉及nlp),直接得到最佳的三個詞語吧,進行查詢,得到結果數.再次使用最小距離算法的方式(solr自身的)進行一次糾錯,將兩次糾錯結果進行算法排序,得出最優化結果
3.當查詢漢字個數大于6個時,使用切詞的方式進行處理,將詞庫中需要查詢到的漢字預先使用n-gram處理后,將漢字和拼音為一個單位放入多值字段中.
將查詢的漢字使用n-gram切分后,轉為拼音,進行上述字段的查詢,對每個拼音對應的進行高亮處理,取出對應的列表,將一系列漢字組合后,假設經過優化后(最小距離,前后關系 等)數目是3,將這個三個詞語再次查詢,查出相應的結果數目,
再次使用solr自身的最小距離算法模塊,查出一個糾錯列表
將兩次結果經過算法處理后,得出最優解,給出建議!
代碼實現:
將思路中的1.2合并了下.原理都是一樣的.
短詞糾錯部分實現 /*** @描述:最大匹配法 ---拼音加最小距離算法 * 電視機 名師輔導 權威名師輔導* @param word* @return String* @createTime:2016年9月12日* @author: songqinghu*/private String maxMatching(String word){Map<String, String> oldWord = NGramTokenizerUtil.analyzer(word);String spellword="";List<SpellWordTree> spellWords = new ArrayList<SpellWordTree>();matching(oldWord, word.length(), spellWords);if(spellWords.size()>0){int start = spellWords.get(spellWords.size()-1).getStart();if(start!=0){SpellWordTree spellWordTree = new SpellWordTree();spellWordTree.setKey(0+"-"+start);ArrayList<String> wordList = new ArrayList<String>();wordList.add(oldWord.get(spellWordTree.getKey()));spellWordTree.setWord(wordList);spellWords.add(spellWordTree);}List<String> words = new ArrayList<String>();words.add(spellword);//種子for (int i = spellWords.size()-1; i >=0; i--) {//spellword=spellword + spellWords.get(i).getWord();List<String> temp = new ArrayList<String>();for (String minword : spellWords.get(i).getWord()) {for (String seedword : words) {seedword = seedword + minword;temp.add(seedword);}}words = temp;}if(words.size()==1){return words.get(0);}Long maxCount = -1l;for (String string : words) {Long count = findSpellWrodResult(string);if(count>maxCount){spellword = string;maxCount = count;}}}//所有的結束 // for (SpellWordTree spellWord : spellWords) { // System.out.println(spellWord.getKey() + " : " + spellWord.getWord()); // }return spellword;}/*** @描述:循環最大匹配* @param oldWord* @param max* @param spellWords* @return SpellWordTree* @exception* @createTime:2016年9月13日* @author: songqinghu*/private void matching(Map<String, String> oldWord,int max,List<SpellWordTree> spellWords){for (int i = max; i >=0; i--) {for (int j = 0; j < i-1; j++) {String key = j+"-"+i;String value = oldWord.get(key);if(value.length()>1){SpellWordTree spellWordTree = new SpellWordTree();if(singleWordhandle(value, spellWordTree)){//組成樹狀結構 --后期組合出詞語 0 -max區間spellWordTree.setKey(key);//補全前面的詞語if(spellWordTree.getEnd()<max){SpellWordTree extraWord = new SpellWordTree();extraWord.setKey(spellWordTree.getEnd() +"-"+max);ArrayList<String> word = new ArrayList<String>();word.add(oldWord.get(spellWordTree.getEnd() +"-"+max));extraWord.setWord(word);spellWords.add(extraWord);}spellWords.add(spellWordTree);matching(oldWord, spellWordTree.getStart(), spellWords);return;}}}}}/*** * @描述:查詢糾錯詞的結果數 --選擇最優結果* @param word* @return Long* @createTime:2016年9月13日* @author: songqinghu*/private Long findSpellWrodResult(String word){Formula f = new Formula();f.append(new Query(ProductBean.Fd.name.name(), word)).tagO();f.append(new Query(ProductBean.Fd.multiple.name(), word));SolrQuery query = new SolrQuery();query.set(CommonParams.Q, f.toString());query.setStart(0);query.setRows(0);try {QueryResponse response = productClient.query(query);return response.getResults().getNumFound();} catch (SolrServerException | IOException e) {e.printStackTrace();}return 0l;}
長詞匹配部分 --使用solr的高亮來完成/*** * @描述:ngram 切割 轉為拼音后處理* @param word* @return String* @createTime:2016年9月14日* @author: songqinghu*/private String minSpilt(String oldWord){//分詞Map<String, String> terms = NGram11TokenizerUtil.analyzer(oldWord);//輸入原始詞匯組合高亮 等待比較Map<String, String> newTerms = new HashMap<String,String>();//高亮詞List<Set<String>> highlightingWord = highlightingWord(terms,newTerms,oldWord);//校驗詞TreeSet<String> treewords = new TreeSet<String>();for (Set<String> highterms : highlightingWord) {List<String> highList = new ArrayList<String>();highList.addAll(highterms);Set<String> keys = newTerms.keySet();List<String> deletekeys = new ArrayList<String>();Map<String, String> temp = new HashMap<String,String>();for (String key : keys) {if(!highList.contains(newTerms.get(key))){temp.put(key, newTerms.get(key));//待糾正}else{deletekeys.add(newTerms.get(key));//待刪除}}for (String deletekey : deletekeys) {highList.remove(deletekey);//糾正詞}HashMap<String, List<String>> words = new HashMap<String,List<String>>();//糾正后的詞for (String high : highList) {//獲取位置String[] pinyins = high.split(" ");for (String key : temp.keySet()) {String[] oldPinyins = temp.get(key).split(" ");for (int i = 1; i < oldPinyins.length; i++) {for (int j = 1; j < pinyins.length; j++) {if(oldPinyins[i].equals(pinyins[j])){//找到位置if(!words.containsKey(key)){List<String> word = new ArrayList<String>();word.add(pinyins[0]);//漢字words.put(key, word);}else{words.get(key).add(pinyins[0]);}i=oldPinyins.length;j=pinyins.length;}}}}}//組合 words termsList<String> newWords = new ArrayList<String>();newWords.add("");//種子for (int i = 0; i < terms.size(); i++) {if(words.containsKey(i+"-"+(i+1))){List<String> list = words.get(i+"-"+(i+1));List<String> tempword = new ArrayList<String>();for (String word : list) {for (String seed : newWords) {seed = seed +word;tempword.add(seed);}}newWords =tempword;}else{String word = terms.get(i+"-"+(i+1));List<String> tempword = new ArrayList<String>();for (String seed : newWords) {seed = seed +word;tempword.add(seed);}newWords =tempword;}} treewords.addAll(newWords);}Long maxCount =-1l;String result="";for (String word : treewords) {Long count = findSpellWrodResult(word);if(count>maxCount){result=word;}}return result;}/*** @描述:獲取高亮結果集合* @return void* @createTime:2016年9月14日* @author: songqinghu*/private List<Set<String>> highlightingWord(Map<String, String> terms, Map<String, String> newTerms,String oldWord){//按照順序取出 轉換為拼音 組合StringBuffer value = new StringBuffer("( ");//組裝for (int i = 0; i < oldWord.length(); i++) {String term = terms.get(i+"-"+(i+1));Set<String> pinyins = Pinyin4jUtil.converterToSpellToSet(term);StringBuffer oldtemp = new StringBuffer();oldtemp.append("<").append(term).append(">");for (String pinyin : pinyins) {oldtemp.append(" <").append(pinyin).append(">");}newTerms.put(i+"-"+(i+1), oldtemp.toString());StringBuffer temp = new StringBuffer();Iterator<String> iterator = pinyins.iterator();while (iterator.hasNext()) {temp.append(iterator.next());if(iterator.hasNext()){temp.append(" OR ");}}// String one = temp.toString();// temp.append(" OR ").append(term);// String two = temp.toString();// value.append("(( "+temp.toString()+" )" + " AND " + " ( " + two +" ))");value.append("(( ").append(temp.toString()).append(" )").append(" AND ").append(" ( ").append(temp.append(" OR ").append(term)).append(" ))");if(i<oldWord.length()-1){value.append(" AND ");}}value.append(")");SolrQuery query = new SolrQuery();query.set(CommonParams.Q, "spellWords : "+ value.toString());query.setHighlight(true);query.addHighlightField("spellWords");query.setHighlightSimplePre("<");query.setHighlightSimplePost(">");query.setHighlightSnippets(100);query.set(CommonParams.FL, ProductBean.Fd.id.name());query.setRows(3);try {QueryResponse response = productClient.query(query);SolrDocumentList docs = response.getResults();Map<String, Map<String, List<String>>> highlighting = response.getHighlighting();if(docs.size()>0){List<Set<String>> hightermList = new ArrayList<Set<String>>();for (SolrDocument doc : docs) {Set<String> hightterms = new TreeSet<String>();String id = doc.getFieldValue("id").toString();//具體糾錯處理Map<String, List<String>> map = highlighting.get(id);List<String> wordList = map.get("spellWords");for (String spell : wordList) {hightterms.add(spell);//收集去重復}//return hightterms; //先做一個hightermList.add(hightterms);}return hightermList;}} catch (SolrServerException | IOException e) {e.printStackTrace();} return null;}
使用到的工具類:
分詞器: package cn.com.mx.gome.suggest.util.analyzer;import java.io.IOException; import java.io.StringReader; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.TreeSet;import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute;import cn.com.mx.gome.intensive.log.RsysLog; import cn.com.mx.gome.intensive.log.RsysLog.RsysExecLog;/*** NGram分割詞語* @Description: TODO* @author: songqinghu* @date: 2016年8月3日 上午10:53:28* Version:1.0*/ public class NGram11TokenizerUtil {private static final RsysExecLog logger = RsysLog.getInstance().getRsysExecLog();private static Tokenizer tokenizer = new NGramTokenizer(1,1);private static Object lock = new Object();private static Tokenizer getTokenizer(){if(tokenizer ==null){tokenizer = new NGramTokenizer(1,1);}return tokenizer;}/*** * @描述:對輸入的文本使用n-gram進行處理* @param text* @return Set<String>* @createTime:2016年8月3日* @author: songqinghu*/public static Map<String, String> analyzer(String text) {synchronized (lock) {StringReader sr = new StringReader(text); //N-gram模型分詞器 getTokenizer().setReader(sr);Map<String, String> words = tokenizer(getTokenizer());return words;}} private static Map<String, String> tokenizer(Tokenizer tokenizer) {Map<String, String> words = new HashMap<String,String>();try {tokenizer.reset();while(tokenizer.incrementToken()) { // CharTermAttribute word=tokenizer.addAttribute(CharTermAttribute.class); // System.out.println(word); // words.add(word.toString());CharTermAttribute charTermAttribute=tokenizer.addAttribute(CharTermAttribute.class); OffsetAttribute offsetAttribute=tokenizer.addAttribute(OffsetAttribute.class); words.put(offsetAttribute.startOffset()+"-"+offsetAttribute.endOffset(), charTermAttribute.toString());} tokenizer.reset();} catch (IOException e) {logger.error("tokenizer occor error : " + e);try {tokenizer.end();tokenizer.close();} catch (IOException e1) {logger.error("tokenizer close error : " + e);} tokenizer = null;} return words;} }
糾錯演示:
短詞糾錯:
長詞糾錯:
到這里基本功能已經實現,剩余就是持續的優化工作了
總結
以上是生活随笔為你收集整理的Solr进阶之拼写纠错功能的实现基础拼音的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: [html] 元素的alt和title
- 下一篇: GPS静态观测系列