下面列出了org.apache.lucene.search.highlight.SimpleFragmenter#org.wltea.analyzer.lucene.IKAnalyzer 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private static void showSearchResults(IndexSearcher searcher, ScoreDoc[] hits, Query query, IKAnalyzer ikAnalyzer) throws IOException {
System.out.println("找到 " + hits.length + " 个命中.");
System.out.println("序号\t匹配度得分\t结果");
for (int i = 0; i < hits.length; i++) {
ScoreDoc scoreDoc = hits[i];
int docId = scoreDoc.doc;
Document document = searcher.doc(docId);
List<IndexableField> fields = document.getFields();
System.out.print((i + 1));
System.out.print("\t" + scoreDoc.score);
for (IndexableField f : fields) {
System.out.print("\t" + document.get(f.name()));
}
System.out.println();
}
}
/**
* 封裝一个方法,用于将数据库中的数据解析为一个个关键字词存储到索引文件中
* @param document
* @throws IOException
*/
public static void write(Document document) throws IOException {
//索引库的存储目录
Directory directory = FSDirectory.open(Paths.get(dir));
//在 6.6 以上版本中 version 不再是必要的,并且,存在无参构造方法,可以直接使用默认的 StandardAnalyzer 分词器。
//创建索引写入配置
Analyzer analyzer = new IKAnalyzer();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
//创建索引写入对象
IndexWriter indexWriter = new IndexWriter(directory,indexWriterConfig);
//写入到目录文件中
indexWriter.addDocument(document);
//提交事务
indexWriter.commit();
//关闭流
indexWriter.close();
}
/**
* 根据ID更新搜索内容
* @param blogArticle
* @throws IOException
*/
public static void updateIndexById(BlogArticle blogArticle) throws IOException{
Directory directory = FSDirectory.open(Paths.get(dir));// 打开文件索引目录
Analyzer analyzer = new IKAnalyzer();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
//创建索引写入对象
IndexWriter writer = new IndexWriter(directory,indexWriterConfig);
Document doc = new Document();
doc.add(new LongPoint("id",blogArticle.getId()));
doc.add(new TextField("title",blogArticle.getTitle(), Field.Store.YES));
doc.add(new TextField("marks",blogArticle.getMarks()==null?"":blogArticle.getMarks(),Field.Store.YES));
doc.add(new TextField("text",blogArticle.getText()==null?"":blogArticle.getText(),Field.Store.YES));
doc.add(new StoredField("href",blogArticle.getBlogChannel().getHref()));
doc.add(new StoredField("show_pic",blogArticle.getShowPic()==null?"":blogArticle.getShowPic()));
writer.updateDocument(new Term("id", blogArticle.getId().toString()), doc);
writer.commit();// 提交
writer.close();// 关闭
}
@RequestMapping("searchBook.do")
public ModelAndView searchBook(Book book) throws IOException, ParseException {
ModelAndView mav = new ModelAndView("searchBook");
// 关键字
String keyword = book.getName();
System.out.println(keyword);
// 准备中文分词器
IKAnalyzer analyzer = new IKAnalyzer();
// 索引
Directory index = createIndex(analyzer);
// 查询器
Query query = new QueryParser("name",analyzer).parse(keyword);
// 搜索
IndexReader reader = DirectoryReader.open(index);
IndexSearcher searcher = new IndexSearcher(reader);
int numberPerPage = 10;
ScoreDoc[] hits = searcher.search(query,numberPerPage).scoreDocs;
List<Book> books = new ArrayList<>();
for (int i = 0; i < hits.length; i++) {
ScoreDoc scoreDoc = hits[i];
int docId = scoreDoc.doc;
Document document = searcher.doc(docId);
Book tmpBook = bookService.get(Integer.parseInt(document.get("id")));
books.add(tmpBook);
}
mav.addObject("books",books);
return mav;
}
private Directory createIndex(IKAnalyzer analyzer) throws IOException {
Directory index = new RAMDirectory();
IndexWriterConfig config = new IndexWriterConfig(analyzer);
IndexWriter writer = new IndexWriter(index,config);
List<Book> books = bookService.listByBookType(1);
for (Book book : books) {
addDoc(writer,book);
}
writer.close();
return index;
}
public static void main(String[] args) throws IOException, ParseException {
// 1. 准备中文分词器
IKAnalyzer analyzer = new IKAnalyzer();
// 2. 索引
List<String> productNames = new ArrayList<>();
productNames.add("飞利浦led灯泡e27螺口暖白球泡灯家用照明超亮节能灯泡转色温灯泡");
productNames.add("飞利浦led灯泡e14螺口蜡烛灯泡3W尖泡拉尾节能灯泡暖黄光源Lamp");
productNames.add("雷士照明 LED灯泡 e27大螺口节能灯3W球泡灯 Lamp led节能灯泡");
productNames.add("飞利浦 led灯泡 e27螺口家用3w暖白球泡灯节能灯5W灯泡LED单灯7w");
productNames.add("飞利浦led小球泡e14螺口4.5w透明款led节能灯泡照明光源lamp单灯");
productNames.add("飞利浦蒲公英护眼台灯工作学习阅读节能灯具30508带光源");
productNames.add("欧普照明led灯泡蜡烛节能灯泡e14螺口球泡灯超亮照明单灯光源");
productNames.add("欧普照明led灯泡节能灯泡超亮光源e14e27螺旋螺口小球泡暖黄家用");
productNames.add("聚欧普照明led灯泡节能灯泡e27螺口球泡家用led照明单灯超亮光源");
Directory index = createIndex(analyzer, productNames);
// 3. 查询器
String keyword = "护眼带光源";
Query query = new QueryParser("name",analyzer).parse(keyword);
// 4. 搜索
IndexReader reader = DirectoryReader.open(index);
IndexSearcher searcher = new IndexSearcher(reader);
int numberPerPage = 1000;
System.out.printf("当前一共有%d条数据%n",productNames.size());
System.out.printf("查询关键字是:\"%s\"%n",keyword);
ScoreDoc[] hits = searcher.search(query,numberPerPage).scoreDocs;
// 5. 显示查询结果
showSearchResults(searcher, hits, query, analyzer);
}
private static Directory createIndex(IKAnalyzer analyzer, List<String> products) throws IOException {
Directory index = new RAMDirectory();
IndexWriterConfig config = new IndexWriterConfig(analyzer);
IndexWriter writer = new IndexWriter(index,config);
for (String name : products){
addDoc(writer,name);
}
writer.close();
return index;
}
/**
* 根据ID删除索引
* @param id
* @throws IOException
*/
public static void deleteIndexById(String id) throws IOException{
Directory directory = FSDirectory.open(Paths.get(dir));// 打开文件索引目录
Analyzer analyzer = new IKAnalyzer();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
//创建索引写入对象
IndexWriter writer = new IndexWriter(directory,indexWriterConfig);
IndexReader reader = DirectoryReader.open(directory);// 读取目
Query q = new TermQuery(new Term("id", id));
writer.deleteDocuments(q);// 删除指定ID的Document
writer.commit();// 提交
writer.close();// 关闭
reader.close();// 关闭
}
public static void main(String[] args) {
String keyWord = "Java的分词效果到底怎么样呢,我们来看一下吧[抱抱]";
//创建IKAnalyzer中文分词对象
IKAnalyzer analyzer = new IKAnalyzer();
// 使用智能分词
analyzer.setUseSmart(true);
// 打印分词结果
try {
printAnalysisResult(analyzer, keyWord);
} catch (Exception e) {
e.printStackTrace();
}
}
public void indexInit() throws Exception {
Analyzer analyzer = new IKAnalyzer();
// Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_44);
this.indexSettings = new LuceneIndexSettings(analyzer);
this.indexSettings.createFSDirectory("f:\\file");
this.luceneIndex = new LuceneIndex(this.indexSettings);
this.luceneIndexSearch = new LuceneIndexSearch(indexSettings, new LuceneResultCollector(indexSettings));
}
@Inject
public IKAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
loader = ServiceLoader.load(Configuration.class);
Iterator<Configuration> iterator = loader.iterator();
if (!iterator.hasNext()) {
throw new NotFoundIKAnalyzerConfigurationImplementation();
}
analyzer = new IKAnalyzer(iterator.next().init(index, indexSettings, env, name, settings));
}
@Override public IKAnalyzer get() {
return this.analyzer;
}
@Override
public IKAnalyzer get() {
return this.analyzer;
}
public IkAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings,boolean useSmart) {
super(indexSettings, name, settings);
Configuration configuration=new Configuration(env,settings).setUseSmart(useSmart);
analyzer=new IKAnalyzer(configuration);
}