类org.hibernate.search.SearchFactory源码实例Demo

下面列出了怎么用org.hibernate.search.SearchFactory的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: projectforge-webapp   文件: DatabaseDao.java
/**
 * 
 * @param clazz
 */
private long reindexMassIndexer(final Class< ? > clazz)
{
  final Session session = getSession();
  final Criteria criteria = createCriteria(session, clazz, null, true);
  final Long number = (Long) criteria.uniqueResult(); // Get number of objects to re-index (select count(*) from).
  log.info("Starting (mass) re-indexing of " + number + " entries of type " + clazz.getName() + "...");
  final FullTextSession fullTextSession = Search.getFullTextSession(session);
  try {
    fullTextSession.createIndexer(clazz)//
    .batchSizeToLoadObjects(25) //
    //.cacheMode(CacheMode.NORMAL) //
    .threadsToLoadObjects(5) //
    //.threadsForIndexWriter(1) //
    .threadsForSubsequentFetching(20) //
    .startAndWait();
  } catch (final InterruptedException ex) {
    log.error("Exception encountered while reindexing: " + ex.getMessage(), ex);
  }
  final SearchFactory searchFactory = fullTextSession.getSearchFactory();
  searchFactory.optimize(clazz);
  log.info("Re-indexing of " + number + " objects of type " + clazz.getName() + " done.");
  return number;
}
 
/**
 * Do real indexes optimization.
 */
public static void optimizeIndexes() throws Exception {
	FullTextSession ftSession = null;
	Session session = null;

	if (optimizeIndexesRunning) {
		log.warn("*** Optimize indexes already running ***");
	} else {
		optimizeIndexesRunning = true;
		log.debug("*** Begin optimize indexes ***");

		try {
			session = HibernateUtil.getSessionFactory().openSession();
			ftSession = Search.getFullTextSession(session);

			// Optimize indexes
			SearchFactory searchFactory = ftSession.getSearchFactory();
			searchFactory.optimize();
		} catch (Exception e) {
			throw e;
		} finally {
			optimizeIndexesRunning = false;
			HibernateUtil.close(ftSession);
			HibernateUtil.close(session);
		}

		log.debug("*** End optimize indexes ***");
	}
}
 
源代码3 项目: document-management-system   文件: IndexHelper.java
public void checkIndexOnStartup() {
	//log.info("Observed event {1} from Thread {0}", Thread.currentThread().getName(), App.INIT_SUCCESS);

	// See if we need to rebuild the index during startup ...
	FullTextEntityManager ftEm = Search.getFullTextEntityManager(entityManager);
	SearchFactory searchFactory = ftEm.getSearchFactory();
	ReaderProvider readerProvider = searchFactory.getReaderProvider();
	IndexReader reader = readerProvider.openReader(searchFactory.getDirectoryProviders(NodeDocumentVersion.class)[0]);
	int maxDoc = 0;

	try {
		maxDoc = reader.maxDoc();
	} finally {
		readerProvider.closeReader(reader);
	}

	if (maxDoc == 0) {
		log.warn("No objects indexed ... rebuilding Lucene search index from database ...");
		long _exit = 0L;
		long _entr = System.currentTimeMillis();

		try {
			int docs = doRebuildIndex();
			_exit = System.currentTimeMillis();
			log.info("Took " + (_exit - _entr)
					+ " (ms) to re-build the index containing " + docs
					+ " documents.");
		} catch (Exception exc) {
			if (exc instanceof RuntimeException) {
				throw (RuntimeException) exc;
			} else {
				throw new RuntimeException(exc);
			}
		}

		// build the spell checker index off of the HS index.
		buildSpellCheckerIndex(searchFactory);
	}
}
 
源代码4 项目: document-management-system   文件: IndexHelper.java
protected void buildSpellCheckerIndex(SearchFactory searchFactory) {
	IndexReader reader = null;
	Directory dir = null;
	long _entr = System.currentTimeMillis();
	File spellCheckIndexDir = new File("lucene_index/spellcheck");
	log.info("Building SpellChecker index in {0}", spellCheckIndexDir.getAbsolutePath());
	ReaderProvider readerProvider = searchFactory.getReaderProvider();

	try {
		reader = readerProvider.openReader(searchFactory.getDirectoryProviders(NodeDocumentVersion.class)[0]);
		dir = FSDirectory.open(spellCheckIndexDir);
		SpellChecker spell = new SpellChecker(dir);
		spell.clearIndex();
		spell.indexDictionary(new LuceneDictionary(reader, NodeDocument.TEXT_FIELD));
		spell.close();
		dir.close();
		dir = null;
		long _exit = System.currentTimeMillis();
		log.info("Took {1} (ms) to build SpellChecker index in {0}",
				spellCheckIndexDir.getAbsolutePath(), String.valueOf((_exit - _entr)));
	} catch (Exception exc) {
		log.error("Failed to build spell checker index!", exc);
	} finally {
		if (dir != null) {
			try {
				dir.close();
			} catch (Exception zzz) {
			}
		}
		if (reader != null) {
			readerProvider.closeReader(reader);
		}
	}
}
 
源代码5 项目: document-management-system   文件: SearchDAO.java
/**
 * Get Lucene index reader.
 */
@SuppressWarnings("rawtypes")
private IndexReader getReader(FullTextSession session, Class entity) {
	SearchFactory searchFactory = session.getSearchFactory();
	DirectoryProvider provider = searchFactory.getDirectoryProviders(entity)[0];
	ReaderProvider readerProvider = searchFactory.getReaderProvider();
	return readerProvider.openReader(provider);
}
 
源代码6 项目: webdsl   文件: AbstractEntitySearcher.java
public IndexReader getReader( ) {
    SearchFactory searchFactory = getFullTextSession( ).getSearchFactory( );
    DirectoryProvider<?>[] providers = searchFactory
            .getDirectoryProviders( entityClass );

    return searchFactory.getReaderProvider( ).openReader( providers );
}
 
源代码7 项目: document-management-system   文件: IndexHelper.java
public void updateSpellCheckerIndex(NodeDocumentVersion nDocVer) {
	log.info("Observed Wine added/updated event for {1} from Thread {0}",
			Thread.currentThread().getName(), String.valueOf(nDocVer));
	String text = (nDocVer != null) ? nDocVer.getText() : null;

	if (text != null) {
		Dictionary dictionary = null;

		try {
			FullTextEntityManager ftEm = (FullTextEntityManager) entityManager;
			SearchFactory searchFactory = ftEm.getSearchFactory();
			dictionary = new SetDictionary(text, searchFactory.getAnalyzer("wine_en"));
		} catch (IOException ioExc) {
			log.error("Failed to analyze dictionary text {0} from Wine {1} to update spell checker due to: {2}" +
					text + nDocVer.getUuid() + ioExc.toString());
		}

		if (dictionary != null) {
			Directory dir = null;
			// only allow one thread to update the index at a time ...
			// the Dictionary is pre-computed, so it should happen quickly
			// ...
			// this synchronized approach only works because this component
			// is application-scoped
			synchronized (this) {
				try {
					dir = FSDirectory.open(new File("lucene_index/spellcheck"));
					SpellChecker spell = new SpellChecker(dir);
					spell.indexDictionary(dictionary);
					spell.close();
					log.info("Successfully updated the spell checker index after Document added/updated.");
				} catch (Exception exc) {
					log.error("Failed to update the spell checker index!", exc);
				} finally {
					if (dir != null) {
						try {
							dir.close();
						} catch (Exception zzz) {
						}
					}
				}
			}
		}
	}
}
 
源代码8 项目: document-management-system   文件: SearchDAO.java
/**
 * Get Lucent document terms.
 */
@SuppressWarnings("unchecked")
public List<String> getTerms(Class<?> entityType, String nodeUuid) throws CorruptIndexException, IOException {
	List<String> terms = new ArrayList<String>();
	FullTextSession ftSession = null;
	IndexSearcher searcher = null;
	ReaderProvider provider = null;
	Session session = null;
	IndexReader reader = null;

	try {
		session = HibernateUtil.getSessionFactory().openSession();
		ftSession = Search.getFullTextSession(session);
		SearchFactory sFactory = ftSession.getSearchFactory();
		provider = sFactory.getReaderProvider();
		QueryBuilder builder = sFactory.buildQueryBuilder().forEntity(entityType).get();
		Query query = builder.keyword().onField("uuid").matching(nodeUuid).createQuery();

		DirectoryProvider<Directory>[] dirProv = sFactory.getDirectoryProviders(NodeDocument.class);
		reader = provider.openReader(dirProv[0]);
		searcher = new IndexSearcher(reader);
		TopDocs topDocs = searcher.search(query, 1);

		for (ScoreDoc sDoc : topDocs.scoreDocs) {
			if (!reader.isDeleted(sDoc.doc)) {
				for (TermEnum te = reader.terms(); te.next(); ) {
					Term t = te.term();

					if ("text".equals(t.field())) {
						for (TermDocs tds = reader.termDocs(t); tds.next(); ) {
							if (sDoc.doc == tds.doc()) {
								terms.add(t.text());
								//log.info("Field: {} - {}", t.field(), t.text());
							}
						}
					}
				}
			}
		}
	} finally {
		if (provider != null && reader != null) {
			provider.closeReader(reader);
		}

		if (searcher != null) {
			searcher.close();
		}
		HibernateUtil.close(ftSession);
		HibernateUtil.close(session);
	}

	return terms;
}
 
private void assertDocsInIndex(final Class<?> clazz, final String comment,
        final int expectedNumDocs, final int expectedNumIndexedAttributes,
        final List<String> expectedAttributes) throws Exception {
    Boolean evaluationTookPlace = runTX(new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            boolean evaluatedIndex = false;
            Session session = dm.getSession();
            if (session != null) {
                FullTextSession fullTextSession = Search
                        .getFullTextSession(session);
                SearchFactory searchFactory = fullTextSession
                        .getSearchFactory();
                IndexReader reader = searchFactory.getIndexReaderAccessor()
                        .open(clazz);

                try {
                    assertEquals(comment, expectedNumDocs,
                            reader.numDocs());
                    if (expectedNumDocs > 0) {
                        final FieldInfos indexedFieldNames = MultiFields
                                .getMergedFieldInfos(reader);
                        for (String expectedAttr : expectedAttributes) {
                            assertNotNull(
                                    "attribute " + expectedAttr
                                            + " does not exist in index: "
                                            + indexedFieldNames,
                                    indexedFieldNames
                                            .fieldInfo(expectedAttr));
                        }
                        assertNotNull(
                                "attribute \"key\" does not exist in index: "
                                        + indexedFieldNames,
                                indexedFieldNames.fieldInfo("key"));
                        assertNotNull(
                                "attribute \"_hibernate_class\" does not exist in index: "
                                        + indexedFieldNames,
                                indexedFieldNames
                                        .fieldInfo("_hibernate_class"));
                        assertEquals(
                                "More or less attributes indexed than expected, attributes retrieved from index: "
                                        + indexedFieldNames,
                                expectedNumIndexedAttributes + 2,
                                indexedFieldNames.size());
                        evaluatedIndex = true;
                    }
                } finally {
                    searchFactory.getIndexReaderAccessor().close(reader);
                }
            }

            return Boolean.valueOf(evaluatedIndex);
        }
    });

    if (expectedNumDocs > 0) {
        Assert.assertTrue("Index not found, no evaluation took place",
                evaluationTookPlace.booleanValue());
    }
}
 
源代码10 项目: webdsl   文件: AbstractIndexManager.java
protected static SearchFactory getSearchFactory() {
	return getFullTextSession().getSearchFactory();
}
 
 类所在包
 同包方法