下面列出了java.io.StreamTokenizer#TT_WORD 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/**
* Print failure message and skip to the next test
*
* @exception IOException error reading file
*/
private void fail(String msg) throws IOException
{
System.out.println("FAILED - " + msg + " in line " + tkn.lineno());
// skip remainder of the test look for endtest or end of file
int val = tkn.nextToken();
while (val != StreamTokenizer.TT_EOF)
{
if (val == StreamTokenizer.TT_WORD && tkn.sval.toLowerCase(Locale.ENGLISH).equals("endtest"))
break;
val = tkn.nextToken();
}
failed = true;
// get ready for next test
reset();
// print out stack trace so we know where the failure occurred
Exception e = new Exception();
e.printStackTrace();
}
Token next() throws IOException {
int type = tok.nextToken();
switch (type) {
case StreamTokenizer.TT_EOF:
case StreamTokenizer.TT_EOL:
return null;
case StreamTokenizer.TT_NUMBER:
return new NumToken(tok.nval);
case StreamTokenizer.TT_WORD:
return new StrToken(TType.IDENT, tok.sval);
case '"':
return new StrToken(TType.QUOT, tok.sval);
default:
switch (type) {
case ',':
return new Token(TType.COMMA);
case '(':
return new Token(TType.LPAREN);
case ')':
return new Token(TType.RPAREN);
default:
throw new IOException("Unexpected: " + type);
}
}
}
public String toMessage() {
switch(ttype) {
case StreamTokenizer.TT_EOL:
return "\"EOL\"";
case StreamTokenizer.TT_EOF:
return "\"EOF\"";
case StreamTokenizer.TT_NUMBER:
return "NUMBER";
case StreamTokenizer.TT_WORD:
if (sval == null) {
return "IDENTIFIER";
} else {
return "IDENTIFIER " + sval;
}
default:
if (ttype == (int)'"') {
String msg = "QUOTED STRING";
if (sval != null)
msg = msg + " \"" + sval + "\"";
return msg;
} else {
return "CHARACTER \'" + (char)ttype + "\'";
}
}
}
String parseBenchName(StreamTokenizer tokens)
throws IOException, ConfigFormatException
{
String name;
switch (tokens.ttype) {
case StreamTokenizer.TT_WORD:
case '"':
name = tokens.sval;
tokens.nextToken();
return name;
default:
throw new ConfigFormatException("missing benchmark name on " +
"line " + tokens.lineno());
}
}
Token next() throws IOException {
int type = tok.nextToken();
switch (type) {
case StreamTokenizer.TT_EOF:
case StreamTokenizer.TT_EOL:
return null;
case StreamTokenizer.TT_NUMBER:
return new NumToken(tok.nval);
case StreamTokenizer.TT_WORD:
return new StrToken(TType.IDENT, tok.sval);
case '"':
return new StrToken(TType.QUOT, tok.sval);
default:
switch (type) {
case ',':
return new Token(TType.COMMA);
case '(':
return new Token(TType.LPAREN);
case ')':
return new Token(TType.RPAREN);
default:
throw new IOException("Unexpected: " + type);
}
}
}
public String toString() {
StringBuilder sb = new StringBuilder();
switch(ttype) {
case StreamTokenizer.TT_EOL:
sb.append("ttype=TT_EOL");
break;
case StreamTokenizer.TT_EOF:
sb.append("ttype=TT_EOF");
break;
case StreamTokenizer.TT_NUMBER:
sb.append("ttype=TT_NUM,").append("nval="+nval);
break;
case StreamTokenizer.TT_WORD:
if (sval == null) {
sb.append("ttype=TT_WORD:IDENTIFIER");
} else {
sb.append("ttype=TT_WORD:").append("sval="+sval);
}
break;
default:
if (ttype == (int)'"') {
sb.append("ttype=TT_STRING:").append("sval="+sval);
} else {
sb.append("ttype=TT_CHAR:").append((char)ttype);
}
break;
}
return sb.toString();
}
public String toString() {
StringBuilder sb = new StringBuilder();
switch(ttype) {
case StreamTokenizer.TT_EOL:
sb.append("ttype=TT_EOL");
break;
case StreamTokenizer.TT_EOF:
sb.append("ttype=TT_EOF");
break;
case StreamTokenizer.TT_NUMBER:
sb.append("ttype=TT_NUM,").append("nval="+nval);
break;
case StreamTokenizer.TT_WORD:
if (sval == null) {
sb.append("ttype=TT_WORD:IDENTIFIER");
} else {
sb.append("ttype=TT_WORD:").append("sval="+sval);
}
break;
default:
if (ttype == (int)'"') {
sb.append("ttype=TT_STRING:").append("sval="+sval);
} else {
sb.append("ttype=TT_CHAR:").append((char)ttype);
}
break;
}
return sb.toString();
}
/**
* Set the params (analyzerName only), Comma-separate list of Analyzer class names. If the Analyzer lives in
* org.apache.lucene.analysis, the name can be shortened by dropping the o.a.l.a part of the Fully Qualified Class Name.
* <p>
* Analyzer names may also refer to previously defined AnalyzerFactory's.
* <p>
* Example Declaration: {"NewAnalyzer" NewAnalyzer(WhitespaceAnalyzer, SimpleAnalyzer, StopAnalyzer, standard.StandardAnalyzer) >
* <p>
* Example AnalyzerFactory usage:
* <pre>
* -AnalyzerFactory(name:'whitespace tokenized',WhitespaceTokenizer)
* -NewAnalyzer('whitespace tokenized')
* </pre>
* @param params analyzerClassName, or empty for the StandardAnalyzer
*/
@Override
public void setParams(String params) {
super.setParams(params);
final StreamTokenizer stok = new StreamTokenizer(new StringReader(params));
stok.quoteChar('"');
stok.quoteChar('\'');
stok.eolIsSignificant(false);
stok.ordinaryChar(',');
try {
while (stok.nextToken() != StreamTokenizer.TT_EOF) {
switch (stok.ttype) {
case ',': {
// Do nothing
break;
}
case '\'':
case '\"':
case StreamTokenizer.TT_WORD: {
analyzerNames.add(stok.sval);
break;
}
default: {
throw new RuntimeException("Unexpected token: " + stok.toString());
}
}
}
} catch (RuntimeException e) {
if (e.getMessage().startsWith("Line #")) {
throw e;
} else {
throw new RuntimeException("Line #" + (stok.lineno() + getAlgLineNum()) + ": ", e);
}
} catch (Throwable t) {
throw new RuntimeException("Line #" + (stok.lineno() + getAlgLineNum()) + ": ", t);
}
}
/**
* Read an expected <code>word</code> token or generate a parse error.
*/
protected void eatWord(final String word) throws IOException {
final int tok = st.nextToken();
if (tok != StreamTokenizer.TT_WORD) {
parseError("expecting `" + word + "', " + gotWhat(tok));
}
if (!st.sval.equals(word)) {
parseError("expecting `" + word + "' got `" + st.sval + "'");
}
}
/**
* Reads an integer
*/
private int getInt() throws IOException {
st.nextToken();
if (st.ttype == StreamTokenizer.TT_WORD)
return Double.valueOf(st.sval).intValue();
else if (st.ttype == StreamTokenizer.TT_EOF)
throw new EOFException("End-of-File encountered during parsing");
else
throw new IOException("Unknown token found during parsing");
}
/**
* Gets next token, checking for a premature and of line.
*
* @param tokenizer
* the stream tokenizer
* @exception IOException
* if it finds a premature end of line
*/
protected void getNextToken(StreamTokenizer tokenizer) throws IOException {
if (tokenizer.nextToken() == StreamTokenizer.TT_EOL) {
errms(tokenizer, "premature end of line");
}
if (tokenizer.ttype == StreamTokenizer.TT_EOF) {
errms(tokenizer, "premature end of file");
} else if ((tokenizer.ttype == '\'') || (tokenizer.ttype == '"')) {
tokenizer.ttype = StreamTokenizer.TT_WORD;
} else if ((tokenizer.ttype == StreamTokenizer.TT_WORD)
&& (tokenizer.sval.equals("?"))) {
tokenizer.ttype = '?';
}
}
@Override
public ComputedAttributeParserState parse(StreamTokenizer st) throws IOException, ParseException
{
ComputedAttributeParserState nextState = null;
while(nextState == null && st.ttype != StreamTokenizer.TT_EOF)
{
int nextToken = st.nextToken();
if (nextToken != StreamTokenizer.TT_EOL && nextToken != StreamTokenizer.TT_EOF)
{
ArrayList<Expression> stack = this.getParser().getStateStack();
switch(nextToken)
{
case StreamTokenizer.TT_WORD:
String functionName = st.sval;
Expression expression = stack.remove(stack.size() - 1);
if (functionName.equals("case"))
{
stack.add(new CaseExpression(((FunctionExpression) expression).getSourceExpression()));
nextState = new CaseExpressionParameterBeginExpressionState(this.getParser());
}
else
{
FunctionExpression function = new FunctionExpression(expression);
stack.add(function);
function.setFunctionName(functionName);
nextState = new FunctionParameterBeginExpressionState(this.getParser());
}
break;
case StreamTokenizer.TT_NUMBER:
throw new ParseException("unexpected number "+st.nval+" in expression "+this.getParser().getFormula()+" in "+this.getParser().getDiagnosticMessage());
default:
char ch = (char)st.ttype;
throw createUnexpectedCharacterException(ch, "<functionName>");
}
}
}
return nextState;
}
public static NodeMatcher parse( final String s ) throws IOException {
final StreamTokenizer tokenizer = new StreamTokenizer( new StringReader( s ) );
tokenizer.wordChars( '0', '9' );
tokenizer.ordinaryChar( '.' );
tokenizer.ordinaryChar( ',' );
tokenizer.ordinaryChars( 0, ' ' );
ElementMatcher elementMatcher = null;
NodeMatcher n = null;
Type selectorType = Type.Start;
int token;
while ( ( token = tokenizer.nextToken() ) != StreamTokenizer.TT_EOF ) {
if ( token == StreamTokenizer.TT_WORD || token == '*' ) {
NodeMatcher matcher = null;
switch ( selectorType ) {
case Start:
elementMatcher = createMatcher( tokenizer );
matcher = elementMatcher;
break;
case Child:
n = new ChildMatcher( n );
elementMatcher = createMatcher( tokenizer );
matcher = elementMatcher;
break;
case Descendant:
n = new DescendantMatcher( n );
elementMatcher = createMatcher( tokenizer );
matcher = elementMatcher;
break;
case Id:
if ( elementMatcher == null ) {
if ( n != null ) {
n = new DescendantMatcher( n );
}
elementMatcher = createMatcher( tokenizer );
matcher = elementMatcher;
}
elementMatcher.add( new AttributeMatcher( AttributeNames.Xml.NAMESPACE, AttributeNames.Xml.ID,
tokenizer.sval ) );
break;
case Class:
if ( elementMatcher == null ) {
if ( n != null ) {
n = new DescendantMatcher( n );
}
elementMatcher = createMatcher( tokenizer );
matcher = elementMatcher;
}
elementMatcher.add( new AttributeMatcher( AttributeNames.Core.NAMESPACE, AttributeNames.Core.STYLE_CLASS,
tokenizer.sval ) );
break;
default:
throw new IOException();
}
selectorType = Type.Element;
if ( matcher != null ) {
if ( n != null ) {
n = new AndMatcher( matcher, n );
} else {
n = matcher;
}
}
} else {
if ( token == '>' ) {
selectorType = Type.Child;
}
if ( token == '.' ) {
selectorType = Type.Class;
}
if ( token == '#' ) {
selectorType = Type.Id;
}
if ( Character.isWhitespace( token ) ) {
if ( selectorType == Type.Class || selectorType == Type.Id ) {
throw new IllegalStateException();
}
if ( selectorType != Type.Child ) {
selectorType = Type.Descendant;
}
}
}
}
return n;
}
/**
* <p>Parse an incoming String of the form similar to an array initializer
* in the Java language into a {@code List} individual Strings
* for each element, according to the following rules.</p>
* <ul>
* <li>The string is expected to be a comma-separated list of values.</li>
* <li>The string may optionally have matching '{' and '}' delimiters
* around the list.</li>
* <li>Whitespace before and after each element is stripped.</li>
* <li>Elements in the list may be delimited by single or double quotes.
* Within a quoted elements, the normal Java escape sequences are valid.</li>
* </ul>
*
* @param type The type to convert the value to
* @param value String value to be parsed
* @return List of parsed elements.
*
* @throws ConversionException if the syntax of {@code value}
* is not syntactically valid
* @throws NullPointerException if {@code value}
* is {@code null}
*/
private List<String> parseElements(final Class<?> type, String value) {
if (log().isDebugEnabled()) {
log().debug("Parsing elements, delimiter=[" + delimiter + "], value=[" + value + "]");
}
// Trim any matching '{' and '}' delimiters
value = value.trim();
if (value.startsWith("{") && value.endsWith("}")) {
value = value.substring(1, value.length() - 1);
}
try {
// Set up a StreamTokenizer on the characters in this String
final StreamTokenizer st = new StreamTokenizer(new StringReader(value));
st.whitespaceChars(delimiter , delimiter); // Set the delimiters
st.ordinaryChars('0', '9'); // Needed to turn off numeric flag
st.wordChars('0', '9'); // Needed to make part of tokens
for (final char allowedChar : allowedChars) {
st.ordinaryChars(allowedChar, allowedChar);
st.wordChars(allowedChar, allowedChar);
}
// Split comma-delimited tokens into a List
List<String> list = null;
while (true) {
final int ttype = st.nextToken();
if (ttype == StreamTokenizer.TT_WORD || ttype > 0) {
if (st.sval != null) {
if (list == null) {
list = new ArrayList<>();
}
list.add(st.sval);
}
} else if (ttype == StreamTokenizer.TT_EOF) {
break;
} else {
throw new ConversionException("Encountered token of type "
+ ttype + " parsing elements to '" + toString(type) + ".");
}
}
if (list == null) {
list = Collections.emptyList();
}
if (log().isDebugEnabled()) {
log().debug(list.size() + " elements parsed");
}
// Return the completed list
return list;
} catch (final IOException e) {
throw new ConversionException("Error converting from String to '"
+ toString(type) + "': " + e.getMessage(), e);
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/** Create a Chemical model by parsing an input stream */
XYZChemModel(InputStream is) throws Exception {
this();
StreamTokenizer st = new StreamTokenizer(
new BufferedReader(new InputStreamReader(is, "UTF-8")));
st.eolIsSignificant(true);
st.commentChar('#');
try {
scan:
while (true) {
switch (st.nextToken()) {
case StreamTokenizer.TT_EOF:
break scan;
default:
break;
case StreamTokenizer.TT_WORD:
String name = st.sval;
double x = 0,
y = 0,
z = 0;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
x = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
y = st.nval;
if (st.nextToken() == StreamTokenizer.TT_NUMBER) {
z = st.nval;
}
}
}
addVert(name, (float) x, (float) y, (float) z);
while (st.ttype != StreamTokenizer.TT_EOL
&& st.ttype != StreamTokenizer.TT_EOF) {
st.nextToken();
}
} // end Switch
} // end while
is.close();
} // end Try
catch (IOException e) {
}
if (st.ttype != StreamTokenizer.TT_EOF) {
throw new Exception(st.toString());
}
}
/**
* Create new benchmark harness with given configuration and reporter.
* Throws ConfigFormatException if there was an error parsing the config
* file.
* <p>
* <b>Config file syntax:</b>
* <p>
* '#' marks the beginning of a comment. Blank lines are ignored. All
* other lines should adhere to the following format:
* <pre>
* <weight> <name> <class> [<args>]
* </pre>
* <weight> is a floating point value which is multiplied times the
* benchmark's execution time to determine its weighted score. The
* total score of the benchmark suite is the sum of all weighted scores
* of its benchmarks.
* <p>
* <name> is a name used to identify the benchmark on the benchmark
* report. If the name contains whitespace, the quote character '"' should
* be used as a delimiter.
* <p>
* <class> is the full name (including the package) of the class
* containing the benchmark implementation. This class must implement
* bench.Benchmark.
* <p>
* [<args>] is a variable-length list of runtime arguments to pass to
* the benchmark. Arguments containing whitespace should use the quote
* character '"' as a delimiter.
* <p>
* <b>Example:</b>
* <pre>
* 3.5 "My benchmark" bench.serial.Test first second "third arg"
* </pre>
*/
public Harness(InputStream in) throws IOException, ConfigFormatException {
Vector bvec = new Vector();
StreamTokenizer tokens = new StreamTokenizer(new InputStreamReader(in));
tokens.resetSyntax();
tokens.wordChars(0, 255);
tokens.whitespaceChars(0, ' ');
tokens.commentChar('#');
tokens.quoteChar('"');
tokens.eolIsSignificant(true);
tokens.nextToken();
while (tokens.ttype != StreamTokenizer.TT_EOF) {
switch (tokens.ttype) {
case StreamTokenizer.TT_WORD:
case '"': // parse line
bvec.add(parseBenchInfo(tokens));
break;
default: // ignore
tokens.nextToken();
break;
}
}
binfo = (BenchInfo[]) bvec.toArray(new BenchInfo[bvec.size()]);
}
/**
* Create new benchmark harness with given configuration and reporter.
* Throws ConfigFormatException if there was an error parsing the config
* file.
* <p>
* <b>Config file syntax:</b>
* <p>
* '#' marks the beginning of a comment. Blank lines are ignored. All
* other lines should adhere to the following format:
* <pre>
* <weight> <name> <class> [<args>]
* </pre>
* <weight> is a floating point value which is multiplied times the
* benchmark's execution time to determine its weighted score. The
* total score of the benchmark suite is the sum of all weighted scores
* of its benchmarks.
* <p>
* <name> is a name used to identify the benchmark on the benchmark
* report. If the name contains whitespace, the quote character '"' should
* be used as a delimiter.
* <p>
* <class> is the full name (including the package) of the class
* containing the benchmark implementation. This class must implement
* bench.Benchmark.
* <p>
* [<args>] is a variable-length list of runtime arguments to pass to
* the benchmark. Arguments containing whitespace should use the quote
* character '"' as a delimiter.
* <p>
* <b>Example:</b>
* <pre>
* 3.5 "My benchmark" bench.serial.Test first second "third arg"
* </pre>
*/
public Harness(InputStream in) throws IOException, ConfigFormatException {
Vector bvec = new Vector();
StreamTokenizer tokens = new StreamTokenizer(new InputStreamReader(in));
tokens.resetSyntax();
tokens.wordChars(0, 255);
tokens.whitespaceChars(0, ' ');
tokens.commentChar('#');
tokens.quoteChar('"');
tokens.eolIsSignificant(true);
tokens.nextToken();
while (tokens.ttype != StreamTokenizer.TT_EOF) {
switch (tokens.ttype) {
case StreamTokenizer.TT_WORD:
case '"': // parse line
bvec.add(parseBenchInfo(tokens));
break;
default: // ignore
tokens.nextToken();
break;
}
}
binfo = (BenchInfo[]) bvec.toArray(new BenchInfo[bvec.size()]);
}
/**
* Returns true if the current token is a legal identifier.
*
* @return true if the current token is an identifier
*/
public boolean matchId() {
return tok.ttype == StreamTokenizer.TT_WORD
&& !keywords.contains(tok.sval);
}