下面列出了java.io.StreamTokenizer#toString ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
public void test_harmonyRegressionTest2() {
byte[] data = new byte[] {(byte) '"',
(byte) 'H',
(byte) 'e',
(byte) 'l',
(byte) 'l',
(byte) 'o',
(byte) '"'};
StreamTokenizer tokenizer = new StreamTokenizer(new ByteArrayInputStream(data));
try {
tokenizer.nextToken();
} catch (Exception e) {
e.printStackTrace();
}
String result = tokenizer.toString();
Assert.assertEquals("Token[Hello], line 1", result);
}
/**
* Set the params (analyzerName only), Comma-separate list of Analyzer class names. If the Analyzer lives in
* org.apache.lucene.analysis, the name can be shortened by dropping the o.a.l.a part of the Fully Qualified Class Name.
* <p>
* Analyzer names may also refer to previously defined AnalyzerFactory's.
* <p>
* Example Declaration: {"NewAnalyzer" NewAnalyzer(WhitespaceAnalyzer, SimpleAnalyzer, StopAnalyzer, standard.StandardAnalyzer) >
* <p>
* Example AnalyzerFactory usage:
* <pre>
* -AnalyzerFactory(name:'whitespace tokenized',WhitespaceTokenizer)
* -NewAnalyzer('whitespace tokenized')
* </pre>
* @param params analyzerClassName, or empty for the StandardAnalyzer
*/
@Override
public void setParams(String params) {
super.setParams(params);
final StreamTokenizer stok = new StreamTokenizer(new StringReader(params));
stok.quoteChar('"');
stok.quoteChar('\'');
stok.eolIsSignificant(false);
stok.ordinaryChar(',');
try {
while (stok.nextToken() != StreamTokenizer.TT_EOF) {
switch (stok.ttype) {
case ',': {
// Do nothing
break;
}
case '\'':
case '\"':
case StreamTokenizer.TT_WORD: {
analyzerNames.add(stok.sval);
break;
}
default: {
throw new RuntimeException("Unexpected token: " + stok.toString());
}
}
}
} catch (RuntimeException e) {
if (e.getMessage().startsWith("Line #")) {
throw e;
} else {
throw new RuntimeException("Line #" + (stok.lineno() + getAlgLineNum()) + ": ", e);
}
} catch (Throwable t) {
throw new RuntimeException("Line #" + (stok.lineno() + getAlgLineNum()) + ": ", t);
}
}
public void test_harmonyRegressionTest() {
byte[] data = new byte[] {(byte) '-'};
StreamTokenizer tokenizer = new StreamTokenizer(new ByteArrayInputStream(data));
try {
tokenizer.nextToken();
} catch(Exception e) {
Assert.fail(e.getMessage());
}
String result = tokenizer.toString();
Assert.assertEquals("Token['-'], line 1", result);
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
public void testStreamTokenizer() throws Exception {
String str = "Testing 12345 \n alpha \r\n omega";
String strb = "-3.8 'BLIND mice' \r sEe /* how */ they run";
StringReader aa = new StringReader(str);
StringReader ba = new StringReader(strb);
StreamTokenizer a = new StreamTokenizer(aa);
StreamTokenizer b = new StreamTokenizer(ba);
assertEquals(1, a.lineno());
assertEquals(StreamTokenizer.TT_WORD, a.nextToken());
assertEquals("Token[Testing], line 1", a.toString());
assertEquals(StreamTokenizer.TT_NUMBER, a.nextToken());
assertEquals("Token[n=12345.0], line 1", a.toString());
assertEquals(StreamTokenizer.TT_WORD, a.nextToken());
assertEquals("Token[alpha], line 2", a.toString());
assertEquals(StreamTokenizer.TT_WORD, a.nextToken());
assertEquals("Token[omega], line 3", a.toString());
assertEquals(StreamTokenizer.TT_EOF, a.nextToken());
assertEquals("Token[EOF], line 3", a.toString());
b.commentChar('u');
b.eolIsSignificant(true);
b.lowerCaseMode(true);
b.ordinaryChar('y');
b.slashStarComments(true);
assertEquals(StreamTokenizer.TT_NUMBER, b.nextToken());
assertEquals(-3.8, b.nval);
assertEquals("Token[n=-3.8], line 1", b.toString());
assertEquals(39, b.nextToken()); // '
assertEquals("Token[BLIND mice], line 1", b.toString());
assertEquals(10, b.nextToken()); // \n
assertEquals("Token[EOL], line 2", b.toString());
assertEquals(StreamTokenizer.TT_WORD, b.nextToken());
assertEquals("Token[see], line 2", b.toString());
assertEquals(StreamTokenizer.TT_WORD, b.nextToken());
assertEquals("Token[the], line 2", b.toString());
assertEquals(121, b.nextToken()); // y
assertEquals("Token['y'], line 2", b.toString());
assertEquals(StreamTokenizer.TT_WORD, b.nextToken());
assertEquals("Token[r], line 2", b.toString());
assertEquals(StreamTokenizer.TT_EOF, b.nextToken());
assertEquals("Token[EOF], line 2", b.toString());
// A harmony regression test
byte[] data = new byte[]{(byte) '-'};
StreamTokenizer tokenizer = new StreamTokenizer(new ByteArrayInputStream(data));
tokenizer.nextToken();
String result = tokenizer.toString();
assertEquals("Token['-'], line 1", result);
// another harmony regression test
byte[] data2 = new byte[]{(byte) '"',
(byte) 'H',
(byte) 'e',
(byte) 'l',
(byte) 'l',
(byte) 'o',
(byte) '"'};
StreamTokenizer tokenizer2 = new StreamTokenizer(new ByteArrayInputStream(data2));
tokenizer2.nextToken();
result = tokenizer2.toString();
assertEquals("Token[Hello], line 1", result);
}
/**
* Throws error message with line number and last token read.
*
* @param theMsg the error message to be thrown
* @param tokenizer the stream tokenizer
* @throws IOException containing the error message
*/
public static void errms(StreamTokenizer tokenizer, String theMsg)
throws IOException {
throw new IOException(theMsg + ", read " + tokenizer.toString());
}
/**
* Throws error message with line number and last token read.
*
* @param theMsg the error message to be thrown
* @param tokenizer the stream tokenizer
* @throws IOException containing the error message
*/
public static void errms(StreamTokenizer tokenizer, String theMsg)
throws IOException {
throw new IOException(theMsg + ", read " + tokenizer.toString());
}
/**
* Throws error message with line number and last token read.
*
* @param theMsg
* the error message to be thrown
* @param tokenizer
* the stream tokenizer
* @throws IOExcpetion
* containing the error message
*/
protected void errms(StreamTokenizer tokenizer, String theMsg)
throws IOException {
throw new IOException(theMsg + ", read " + tokenizer.toString());
}