下面列出了怎么用org.apache.commons.io.input.CharSequenceReader的API类实例代码及写法,或者点击链接到github查看源代码。
/**
* Add terms to the query for the synonyms.
*
* @param dmq
* {@link DisjunctionMaxQuery}
* @param original
* Original term to determine synonyms for.
*/
private void addSynonyms(DisjunctionMaxQuery dmq, CharSequence original) throws IOException {
try (TokenStream synonymTokens = optSynonymAnalyzer.tokenStream("querqy", new CharSequenceReader(original))) {
synonymTokens.reset();
CharTermAttribute generated = synonymTokens.addAttribute(CharTermAttribute.class);
while (synonymTokens.incrementToken()) {
// We need to copy "generated" per toString() here, because
// "generated" is transient.
dmq.addClause(new Term(dmq, generated.toString(), true));
}
synonymTokens.end();
}
}
@Test
public void givenUsingCommonsIO_whenWritingReaderContentsToFile_thenCorrect() throws IOException {
final Reader initialReader = new CharSequenceReader("CharSequenceReader extends Reader");
final File targetFile = new File("src/test/resources/targetFile.txt");
FileUtils.touch(targetFile);
final byte[] buffer = IOUtils.toByteArray(initialReader);
FileUtils.writeByteArrayToFile(targetFile, buffer);
initialReader.close();
}
@Test
public void givenUsingCommonsIO_whenConvertingStringIntoReader_thenCorrect() throws IOException {
final String initialString = "With Apache Commons IO";
final Reader targetReader = new CharSequenceReader(initialString);
targetReader.close();
}
@Test
public void givenUsingCommonsIO_whenConvertingByteArrayIntoReader_thenCorrect() throws IOException {
final byte[] initialArray = "With Commons IO".getBytes();
final Reader targetReader = new CharSequenceReader(new String(initialArray));
targetReader.close();
}
@Test
public void givenUsingCommonsIO_whenConvertingFileIntoReader_thenCorrect() throws IOException {
final File initialFile = new File("src/test/resources/initialFile.txt");
FileUtils.touch(initialFile);
FileUtils.write(initialFile, "With Commons IO");
final byte[] buffer = FileUtils.readFileToByteArray(initialFile);
final Reader targetReader = new CharSequenceReader(new String(buffer));
targetReader.close();
}
@Test
public void givenUsingCommonsIO_whenConvertingInputStreamIntoReader_thenCorrect() throws IOException {
final InputStream initialStream = IOUtils.toInputStream("With Commons IO");
final byte[] buffer = IOUtils.toByteArray(initialStream);
final Reader targetReader = new CharSequenceReader(new String(buffer));
targetReader.close();
}
public void parse(byte[] buffer) throws IOException {
read(new CharSequenceReader(new String(buffer)));
}
@Override
public Appendable append(CharSequence csq) throws IOException {
fbr.setReader(new CharSequenceReader(csq));
process();
return this;
}
@Override
public Appendable append(CharSequence csq, int start, int end) throws IOException {
fbr.setReader(new CharSequenceReader(csq.subSequence(start, end)));
process();
return this;
}
public TermSubQueryFactory termToFactory(final String fieldname, final Term sourceTerm, final FieldBoost boost)
throws IOException {
final CacheKey cacheKey;
if (termQueryCache != null) {
cacheKey = new CacheKey(fieldname, sourceTerm);
TermQueryCacheValue cacheValue = termQueryCache.get(cacheKey);
if (cacheValue != null) {
// The cache references factories with pre-analyzed terms, or cache entries without a
// query factory if the term does not exist in the index. cacheValue.hasQuery() returns
// true/false correspondingly.
// Cache entries don't have a boost factor, it is only added later via the queryFactory.
return (cacheValue.hasQuery()) ? new TermSubQueryFactory(cacheValue, boost) : null;
}
} else {
cacheKey = null;
}
final LuceneQueryFactoryAndPRMSQuery root;
TokenStream ts = null;
try {
ts = analyzer.tokenStream(fieldname, new CharSequenceReader(sourceTerm));
final CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
final PositionIncrementAttribute posIncAttr = ts.addAttribute(PositionIncrementAttribute.class);
ts.reset();
final PositionSequence<org.apache.lucene.index.Term> sequence = new PositionSequence<>();
while (ts.incrementToken()) {
final int inc = posIncAttr.getPositionIncrement();
if (inc > 0 || sequence.isEmpty()) {
sequence.nextPosition();
}
sequence.addElement(new org.apache.lucene.index.Term(fieldname, new BytesRef(termAttr)));
}
root = positionSequenceToQueryFactoryAndPRMS(sequence);
} finally {
if (ts != null) {
try {
ts.close();
} catch (IOException e) {
}
}
}
putQueryFactoryAndPRMSQueryIntoCache(cacheKey, root);
return root == null ? null : new TermSubQueryFactory(root, boost);
}