java.io.StreamTokenizer#TT_EOF源码实例Demo

下面列出了java.io.StreamTokenizer#TT_EOF 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: Bytecoder   文件: KeyStoreUtil.java
/**
 * Parses a option line likes
 *    -genkaypair -dname "CN=Me"
 * and add the results into a list
 * @param list the list to fill into
 * @param s the line
 */
private static void parseArgsLine(List<String> list, String s)
        throws IOException, PropertyExpander.ExpandException {
    StreamTokenizer st = new StreamTokenizer(new StringReader(s));

    st.resetSyntax();
    st.whitespaceChars(0x00, 0x20);
    st.wordChars(0x21, 0xFF);
    // Everything is a word char except for quotation and apostrophe
    st.quoteChar('"');
    st.quoteChar('\'');

    while (true) {
        if (st.nextToken() == StreamTokenizer.TT_EOF) {
            break;
        }
        list.add(PropertyExpander.expand(st.sval));
    }
}
 
源代码2 项目: openjdk-8   文件: CommandLine.java
private static void loadCmdFile(String name, List<String> args)
    throws IOException
{
    Reader r = new BufferedReader(new FileReader(name));
    StreamTokenizer st = new StreamTokenizer(r);
    st.resetSyntax();
    st.wordChars(' ', 255);
    st.whitespaceChars(0, ' ');
    st.commentChar('#');
    st.quoteChar('"');
    st.quoteChar('\'');
    while (st.nextToken() != StreamTokenizer.TT_EOF) {
        args.add(st.sval);
    }
    r.close();
}
 
源代码3 项目: openjdk-jdk9   文件: Harness.java
String[] parseBenchArgs(StreamTokenizer tokens)
    throws IOException, ConfigFormatException
{
    Vector vec = new Vector();
    for (;;) {
        switch (tokens.ttype) {
            case StreamTokenizer.TT_EOF:
            case StreamTokenizer.TT_EOL:
                return (String[]) vec.toArray(new String[vec.size()]);

            case StreamTokenizer.TT_WORD:
            case '"':
                vec.add(tokens.sval);
                tokens.nextToken();
                break;

            default:
                throw new ConfigFormatException("unrecognized arg token " +
                        "on line " + tokens.lineno());
        }
    }
}
 
源代码4 项目: jdk8u_jdk   文件: Token.java
public String toMessage() {
    switch(ttype) {
    case StreamTokenizer.TT_EOL:
        return "\"EOL\"";
    case StreamTokenizer.TT_EOF:
        return "\"EOF\"";
    case StreamTokenizer.TT_NUMBER:
        return "NUMBER";
    case StreamTokenizer.TT_WORD:
        if (sval == null) {
            return "IDENTIFIER";
        } else {
            return "IDENTIFIER " + sval;
        }
    default:
        if (ttype == (int)'"') {
            String msg = "QUOTED STRING";
            if (sval != null)
                msg = msg + " \"" + sval + "\"";
            return msg;
        } else {
            return "CHARACTER \'" + (char)ttype + "\'";
        }
    }
}
 
源代码5 项目: gemfirexd-oss   文件: TestProto.java
/**
 * Print failure message and skip to the next test
	 *
 * @exception 	IOException 	error reading file
 */
private void fail(String msg) throws IOException
{
	System.out.println("FAILED - " + msg + " in line " + tkn.lineno());
	// skip remainder of the test look for endtest or end of file
	int val = tkn.nextToken();
	while (val != StreamTokenizer.TT_EOF)
	{
		if (val == StreamTokenizer.TT_WORD && tkn.sval.toLowerCase(Locale.ENGLISH).equals("endtest"))
			break;

		val = tkn.nextToken();
	}
	failed = true;
	// get ready for next test
	reset();
	// print out stack trace so we know where the failure occurred
	Exception e = new Exception();
	e.printStackTrace();
}
 
源代码6 项目: KEEL   文件: Algorithm.java
/** Puts the tokenizer in the first token of the next line.
 *
 * @param tokenizer The tokenizer which reads this function.
 *
 * @return True if reaches the end of file. False otherwise.
 *
 * @throws Exception	If cannot read the tokenizer.
 */
protected boolean getNextToken( StreamTokenizer tokenizer ) throws Exception
{
  try
  {
    if ( tokenizer.nextToken() == StreamTokenizer.TT_EOF )
      return false;
    else
    {
      tokenizer.pushBack();
      while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL );
      while ( tokenizer.nextToken() == StreamTokenizer.TT_EOL );

      if ( tokenizer.sval == null )
        return false;
      else
        return true;
    }
  }
  catch( Exception e )
  {
    System.err.println( e.getMessage() );
    return false;
  }
}
 
源代码7 项目: jdk8u-jdk   文件: Token.java
public String toMessage() {
    switch(ttype) {
    case StreamTokenizer.TT_EOL:
        return "\"EOL\"";
    case StreamTokenizer.TT_EOF:
        return "\"EOF\"";
    case StreamTokenizer.TT_NUMBER:
        return "NUMBER";
    case StreamTokenizer.TT_WORD:
        if (sval == null) {
            return "IDENTIFIER";
        } else {
            return "IDENTIFIER " + sval;
        }
    default:
        if (ttype == (int)'"') {
            String msg = "QUOTED STRING";
            if (sval != null)
                msg = msg + " \"" + sval + "\"";
            return msg;
        } else {
            return "CHARACTER \'" + (char)ttype + "\'";
        }
    }
}
 
源代码8 项目: snap-desktop   文件: Launcher.java
private List<String> parseOptions(String defaultOptions) {
    LinkedList<String> defaultOptionList = new LinkedList<>();

    StreamTokenizer st = new StreamTokenizer(new StringReader(defaultOptions));
    st.resetSyntax();
    st.wordChars(' ' + 1, 255);
    st.whitespaceChars(0, ' ');
    st.quoteChar('"');
    st.quoteChar('\'');

    boolean firstArgQuoted;
    try {
        int tt = st.nextToken();
        firstArgQuoted = tt == '\'' || tt == '"';
        if (tt != StreamTokenizer.TT_EOF) {
            do {
                if (st.sval != null) {
                    defaultOptionList.add(st.sval);
                }
                tt = st.nextToken();
            } while (tt != StreamTokenizer.TT_EOF);
        }
    } catch (IOException e) {
        throw new IllegalStateException(e);
    }
    if (defaultOptionList.size() == 1 && firstArgQuoted) {
        return parseOptions(defaultOptionList.get(0));
    }

    return defaultOptionList;
}
 
源代码9 项目: consulo   文件: InspectionTestUtil.java
static boolean compareDescriptions(Element reportedProblem, Element expectedProblem) throws Exception {
  String expectedDescription = expectedProblem.getChildText("description");
  String reportedDescription = reportedProblem.getChildText("description");
  if (expectedDescription.equals(reportedDescription)) return true;

  StreamTokenizer tokenizer = new StreamTokenizer(new CharArrayReader(expectedDescription.toCharArray()));
  tokenizer.quoteChar('\'');

  int idx = 0;
  while (tokenizer.nextToken() != StreamTokenizer.TT_EOF) {
    String word;
    if (tokenizer.sval != null) {
      word = tokenizer.sval;
    } else if (tokenizer.ttype == StreamTokenizer.TT_NUMBER) {
      word = Double.toString(tokenizer.nval);
    }
    else {
      continue;
    }

    idx = reportedDescription.indexOf(word, idx);
    if (idx == -1) return false;
    idx += word.length();
  }

  return true;
}
 
源代码10 项目: dkpro-jwpl   文件: SQLFileParser.java
/**
 * Skip the sql statements for table creation and the prefix <br>
 * INSERT INTO TABLE .... VALUES for values insertion.<br>
 * Read tokens until the word 'VALUES' is reached or the EOF.
 *
 * @throws IOException
 *
 */
protected void skipStatements() throws IOException {
	while (true) {
		st.nextToken();
		if (null != st.sval && st.sval.equalsIgnoreCase("VALUES")) {
			// the next token is the begin of a value
			break;
		}
		if (st.ttype == StreamTokenizer.TT_EOF) {
			// the end of the file is reached
			EOF_reached = true;
			break;
		}
	}
}
 
源代码11 项目: openjdk-8-source   文件: Token.java
public String toString() {
    StringBuilder sb = new StringBuilder();
    switch(ttype) {
    case StreamTokenizer.TT_EOL:
        sb.append("ttype=TT_EOL");
        break;
    case StreamTokenizer.TT_EOF:
        sb.append("ttype=TT_EOF");
        break;
    case StreamTokenizer.TT_NUMBER:
        sb.append("ttype=TT_NUM,").append("nval="+nval);
        break;
    case StreamTokenizer.TT_WORD:
        if (sval == null) {
            sb.append("ttype=TT_WORD:IDENTIFIER");
        } else {
            sb.append("ttype=TT_WORD:").append("sval="+sval);
        }
        break;
    default:
        if (ttype == (int)'"') {
            sb.append("ttype=TT_STRING:").append("sval="+sval);
        } else {
            sb.append("ttype=TT_CHAR:").append((char)ttype);
        }
        break;
    }
    return sb.toString();
}
 
源代码12 项目: jdk8u-dev-jdk   文件: Token.java
public String toString() {
    StringBuilder sb = new StringBuilder();
    switch(ttype) {
    case StreamTokenizer.TT_EOL:
        sb.append("ttype=TT_EOL");
        break;
    case StreamTokenizer.TT_EOF:
        sb.append("ttype=TT_EOF");
        break;
    case StreamTokenizer.TT_NUMBER:
        sb.append("ttype=TT_NUM,").append("nval="+nval);
        break;
    case StreamTokenizer.TT_WORD:
        if (sval == null) {
            sb.append("ttype=TT_WORD:IDENTIFIER");
        } else {
            sb.append("ttype=TT_WORD:").append("sval="+sval);
        }
        break;
    default:
        if (ttype == (int)'"') {
            sb.append("ttype=TT_STRING:").append("sval="+sval);
        } else {
            sb.append("ttype=TT_CHAR:").append((char)ttype);
        }
        break;
    }
    return sb.toString();
}
 
源代码13 项目: openjdk-jdk8u   文件: ScriptingFunctions.java
/**
 * Break a string into tokens, honoring quoted arguments and escaped spaces.
 *
 * @param str a {@link String} to tokenize.
 * @return a {@link List} of {@link String}s representing the tokens that
 * constitute the string.
 */
public static List<String> tokenizeString(final String str) {
    final StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(str));
    tokenizer.resetSyntax();
    tokenizer.wordChars(0, 255);
    tokenizer.whitespaceChars(0, ' ');
    tokenizer.commentChar('#');
    tokenizer.quoteChar('"');
    tokenizer.quoteChar('\'');
    final List<String> tokenList = new ArrayList<>();
    final StringBuilder toAppend = new StringBuilder();
    while (nextToken(tokenizer) != StreamTokenizer.TT_EOF) {
        final String s = tokenizer.sval;
        // The tokenizer understands about honoring quoted strings and recognizes
        // them as one token that possibly contains multiple space-separated words.
        // It does not recognize quoted spaces, though, and will split after the
        // escaping \ character. This is handled here.
        if (s.endsWith("\\")) {
            // omit trailing \, append space instead
            toAppend.append(s.substring(0, s.length() - 1)).append(' ');
        } else {
            tokenList.add(toAppend.append(s).toString());
            toAppend.setLength(0);
        }
    }
    if (toAppend.length() != 0) {
        tokenList.add(toAppend.toString());
    }
    return tokenList;
}
 
源代码14 项目: LuckPerms   文件: BooleanExpressionCompiler.java
@Override
protected Token computeNext() {
    if (this.end) {
        return endOfData();
    }
    try {
        int token = this.tokenizer.nextToken();
        switch (token) {
            case StreamTokenizer.TT_EOF:
                this.end = true;
                return ConstantToken.EOF;
            case StreamTokenizer.TT_WORD:
                return new VariableToken(this.tokenizer.sval);
            case '(':
                return ConstantToken.OPEN_BRACKET;
            case ')':
                return ConstantToken.CLOSE_BRACKET;
            case '&':
                return ConstantToken.AND;
            case '|':
                return ConstantToken.OR;
            case '!':
                return ConstantToken.NOT;
            default:
                throw new LexerException("Unknown token: " + ((char) token) + "(" + token + ")");
        }
    } catch (IOException e) {
        throw new LexerException(e);
    }
}
 
源代码15 项目: matrix-toolkits-java   文件: MatrixVectorReader.java
/**
 * Reads a long
 */
private long getLong() throws IOException {
    st.nextToken();
    if (st.ttype == StreamTokenizer.TT_WORD)
        return Long.parseLong(st.sval);
    else if (st.ttype == StreamTokenizer.TT_EOF)
        throw new EOFException("End-of-File encountered during parsing");
    else
        throw new IOException("Unknown token found during parsing");
}
 
源代码16 项目: reladomo   文件: CaseSelectorMiddleParserState.java
@Override
public ComputedAttributeParserState parse(StreamTokenizer st) throws IOException, ParseException
{
    ComputedAttributeParserState nextState = null;
    while(nextState == null && st.ttype != StreamTokenizer.TT_EOF)
    {
        int nextToken = st.nextToken();
        if (nextToken != StreamTokenizer.TT_EOL && nextToken != StreamTokenizer.TT_EOF)
        {
            ArrayList<Expression> stack = this.getParser().getStateStack();
            switch(nextToken)
            {
                case ',':
                    nextState = new CaseSelectorBeginParserState(this.getParser());
                    break;
                case ':':
                    nextState = new ExpressionBeginState(this.getParser());
                    break;
                case StreamTokenizer.TT_NUMBER:
                    throw new ParseException("unexpected number "+st.nval+" in expression "+this.getParser().getFormula()+" in "+this.getParser().getDiagnosticMessage());
                case StreamTokenizer.TT_WORD:
                    throw new ParseException("unexpected word "+st.sval+" in expression "+this.getParser().getFormula()+" in "+this.getParser().getDiagnosticMessage());
                default:
                    char ch = (char)st.ttype;
                    throw createUnexpectedCharacterException(ch, ",:");
            }
        }
    }
    return nextState;
}
 
源代码17 项目: samoa   文件: ArffLoader.java
public Instance readInstanceDense() {
    Instance instance = new DenseInstance(this.instanceInformation.numAttributes() + 1);
    //System.out.println(this.instanceInformation.numAttributes());
    int numAttribute = 0;
    try {
        while (numAttribute == 0 && streamTokenizer.ttype != StreamTokenizer.TT_EOF) {
            //For each line
            while (streamTokenizer.ttype != StreamTokenizer.TT_EOL
                    && streamTokenizer.ttype != StreamTokenizer.TT_EOF) {
                //For each item
                if (streamTokenizer.ttype == StreamTokenizer.TT_NUMBER) {
                    //System.out.println(streamTokenizer.nval + "Num ");
                    this.setValue(instance, numAttribute, streamTokenizer.nval, true);
                    numAttribute++;

                } else if (streamTokenizer.sval != null && (streamTokenizer.ttype == StreamTokenizer.TT_WORD
                        || streamTokenizer.ttype == 34)) {
                    //System.out.println(streamTokenizer.sval + "Str");
                    boolean isNumeric = attributes.get(numAttribute).isNumeric();
                    double value;
                    if ("?".equals(streamTokenizer.sval)) {
                        value = Double.NaN; //Utils.missingValue();
                    } else if (isNumeric == true) {
                        value = Double.valueOf(streamTokenizer.sval).doubleValue();
                    } else {
                        value = this.instanceInformation.attribute(numAttribute).indexOfValue(streamTokenizer.sval);
                    }

                    this.setValue(instance, numAttribute, value, isNumeric);
                    numAttribute++;
                }
                streamTokenizer.nextToken();
            }
            streamTokenizer.nextToken();
            //System.out.println("EOL");
        }


    } catch (IOException ex) {
        Logger.getLogger(ArffLoader.class.getName()).log(Level.SEVERE, null, ex);
    }
    return (numAttribute > 0) ? instance : null;
}
 
源代码18 项目: incubator-samoa   文件: ArffLoader.java
private InstanceInformation getHeader() {
  //commented JD
  //this.range.setUpper(10000); //TO DO: Create a new range object with isInRange that does not need the upper limit
  String relation = "file stream";
  //System.out.println("RELATION " + relation);
  //inputAttributes = new ArrayList<Attribute>();
  //outputAttributes = new ArrayList<Attribute>();
  //ArrayList<Attribute>
  auxAttributes = new ArrayList<Attribute>();//JD
  int numAttributes = 0;
  try {
    streamTokenizer.nextToken();
    while (streamTokenizer.ttype != StreamTokenizer.TT_EOF) {
      //For each line
      //if (streamTokenizer.ttype == '@') {
      if (streamTokenizer.ttype == StreamTokenizer.TT_WORD && streamTokenizer.sval.startsWith("@") == true) {
        //streamTokenizer.nextToken();
        String token = streamTokenizer.sval.toUpperCase();
        if (token.startsWith("@RELATION")) {
          streamTokenizer.nextToken();
          relation = streamTokenizer.sval;
          //  System.out.println("RELATION " + relation);
        } else if (token.startsWith("@ATTRIBUTE")) {
          streamTokenizer.nextToken();
          String name = streamTokenizer.sval;
          //System.out.println("* " + name);
          if (name == null) {
            name = Double.toString(streamTokenizer.nval);
          }
          streamTokenizer.nextToken();
          String type = streamTokenizer.sval;
          // System.out.println("* " + name + ":" + type + " ");
          if (streamTokenizer.ttype == '{') {
            streamTokenizer.nextToken();
            List<String> attributeLabels = new ArrayList<String>();
            while (streamTokenizer.ttype != '}') {

              if (streamTokenizer.sval != null) {
                attributeLabels.add(streamTokenizer.sval);
                // System.out.print(streamTokenizer.sval + ",");
              } else {
                attributeLabels.add(Double.toString(streamTokenizer.nval));
                //System.out.print(streamTokenizer.nval + ",");
              }

              streamTokenizer.nextToken();
            }
            // System.out.println();
            //attributes.add(new Attribute(name, attributeLabels));
            //commented JD
            /* if (this.range.isInRange(numAttribute)) {
                           outputAttributes.add(new Attribute(name, attributeLabels));
                           } else {
                           inputAttributes.add(new Attribute(name, attributeLabels));
                           }*/
            auxAttributes.add(new Attribute(name, attributeLabels));
            ++numAttributes;
          } else {
            // Add attribute
            //commented JD
            /*if (this.range.isInRange(numAttribute)) {
                           outputAttributes.add(new Attribute(name));
                           } else {
                           inputAttributes.add(new Attribute(name));
                           }*/
            auxAttributes.add(new Attribute(name));
            ++numAttributes;
          }

        } else if (token.startsWith("@DATA")) {
          //System.out.print("END");
          streamTokenizer.nextToken();
          break;
        }
      }
      streamTokenizer.nextToken();
    }
    if (range != null) {
      this.range.setUpper(numAttributes);
    }
    /*if (range==null) //is single-target. All instances should go to inputAtrributes (see setClassIndex(int) from InstanceInformation )
           inputAttributes=auxAttributes;
           else//is multi-target
           {
           this.range.setUpper(numAttribute);
           for (int i=0; i<auxAttributes.size();i++)
           {
           //if (this.range.isInRange(i))
           //	outputAttributes.add(auxAttributes.get(i));
           //else
           inputAttributes.add(auxAttributes.get(i));

           }
           }*/

  } catch (IOException ex) {
    Logger.getLogger(ArffLoader.class.getName()).log(Level.SEVERE, null, ex);
  }
  // this.range.setUpper(inputAttributes.size()+outputAttributes.size());
  return new InstanceInformation(relation, auxAttributes);
}
 
源代码19 项目: moa   文件: ArffLoader.java
/**
 * Reads an instance sparse and returns a dense one.
 *
 * @return the instance
 */
private Instance readDenseInstanceSparse() {
    //Returns a dense instance
    Instance instance = newDenseInstance(this.instanceInformation.numAttributes());
    //System.out.println(this.instanceInformation.numAttributes());
    int numAttribute;
    try {
        //while (streamTokenizer.ttype != StreamTokenizer.TT_EOF) {
        streamTokenizer.nextToken(); // Remove the '{' char
        //For each line
        while (streamTokenizer.ttype != StreamTokenizer.TT_EOL
                && streamTokenizer.ttype != StreamTokenizer.TT_EOF) {
            while (streamTokenizer.ttype != '}') {
                //For each item
                //streamTokenizer.nextToken();
                //while (streamTokenizer.ttype != '}'){
                //System.out.print(streamTokenizer.nval+":");
                numAttribute = (int) streamTokenizer.nval;
                streamTokenizer.nextToken();

                if (streamTokenizer.ttype == StreamTokenizer.TT_NUMBER) {
                    //System.out.print(streamTokenizer.nval + " ");
                    this.setValue(instance, numAttribute, streamTokenizer.nval, true);
                    //numAttribute++;

                } else if (streamTokenizer.sval != null && (streamTokenizer.ttype == StreamTokenizer.TT_WORD
                        || streamTokenizer.ttype == 34)) {
                    //System.out.print(streamTokenizer.sval + "/"+this.instanceInformation.attribute(numAttribute).indexOfValue(streamTokenizer.sval)+" ");
                    if (this.auxAttributes.get(numAttribute).isNumeric()) {
                        this.setValue(instance, numAttribute, Double.valueOf(streamTokenizer.sval).doubleValue(), true);
                    } else {
                        this.setValue(instance, numAttribute, this.instanceInformation.attribute(numAttribute).indexOfValue(streamTokenizer.sval), false);
                        //numAttribute++;
                    }
                }
                streamTokenizer.nextToken();
            }
            streamTokenizer.nextToken(); //Remove the '}' char
        }
        streamTokenizer.nextToken();
        //System.out.println("EOL");
        //}

    } catch (IOException ex) {
        Logger.getLogger(ArffLoader.class.getName()).log(Level.SEVERE, null, ex);
    }
    return instance;
}
 
源代码20 项目: gama   文件: FileSourceBase.java
/**
 * Read a word or number or string or EOL/EOF or generate a parse error. If EOL is read the "EOL" string is
 * returned. If EOF is read the "EOF" string is returned. If a number is returned, it is converted to a string as
 * follows: if it is an integer, only the integer part is converted to a string without dot or comma and no leading
 * zeros. If it is a float the fractional part is also converted and the dot is used as separator.
 *
 * @return A string.
 */
protected String getWordOrNumberOrStringOrEolOrEof() throws IOException {
	final int tok = st.nextToken();

	if (tok == StreamTokenizer.TT_NUMBER) {
		if (st.nval - (int) st.nval != 0) { return Double.toString(st.nval); }

		return Integer.toString((int) st.nval);
	}

	if (tok == QUOTE_CHAR) { return st.sval; }

	if (tok == StreamTokenizer.TT_WORD) { return st.sval; }

	if (tok == StreamTokenizer.TT_EOF) { return "EOF"; }

	if (tok == StreamTokenizer.TT_EOL) { return "EOL"; }

	parseError("expecting a word, a number, a string, EOL or EOF, " + gotWhat(tok));
	return null; // Never happen, parseError throws unconditionally an
					// exception.
}