下面列出了怎么用org.eclipse.jface.text.rules.IToken的API类实例代码及写法,或者点击链接到github查看源代码。
public IToken evaluate(ICharacterScanner scanner) {
int c = scanner.read();
if ((c != ICharacterScanner.EOF) && wordDetector.isWordStart((char)c)) {
readLength = 1;
do {
c = scanner.read();
readLength++;
} while ((c != ICharacterScanner.EOF) && wordDetector.isWordPart((char)c));
for (char ch: endCharacters) {
if (c == ch) {
return successToken;
}
}
while (readLength > 1) {
readLength--;
scanner.unread();
}
}
scanner.unread();
return Token.UNDEFINED;
}
@Override
public IToken nextToken() {
if (currentRichTextToken != null) {
if (currentRichTextToken.hasNext())
return currentRichTextToken.nextToken();
else
currentRichTextToken = null;
}
if (!getIterator().hasNext())
return Token.EOF;
ILexerTokenRegion next = getIterator().next();
int tokenType = next.getLexerTokenType();
if (tokenType >= 0 && allTokenTypesAsString[tokenType] != null) {
currentRichTextToken = createRichTextToken(allTokenTypesAsString[tokenType], next);
return currentRichTextToken.nextToken();
} else {
setCurrentToken(next);
return createToken(next);
}
}
@Override
public IToken evaluate(ICharacterScanner scanner) {
int read = scanner.read();
if(read == ICharacterScanner.EOF) {
return Token.UNDEFINED;
}
switch (read) {
case ':':
case ';':
case '.':
case '(':
case ')':
case '[':
case ']':
case '{':
case '}':
return getSuccessToken();
default:
scanner.unread(); return Token.UNDEFINED;
}
}
public IToken evaluate(ICharacterScanner scanner, boolean resume) {
if (resume) {
boolean inString = false;
do {
int c = scanner.read();
if (((char) c) == ',' && !inString)
break;
else if (((char) c) == '@') {
scanner.unread();
return Token.UNDEFINED;
} else if (((char) c) == '"' && !inString)
inString = true;
else if (((char) c) == '"' && inString)
inString = false;
else if (c == ICharacterScanner.EOF)
return Token.UNDEFINED;
} while (true);
}
return doEvaluate(scanner, 1);
}
/**
* Creates the partitioner and sets up the appropriate rules.
*/
public SamplePartitionScanner() {
IToken tkString = new Token(LANG_STRING);
IToken tkRawString = new Token(LANG_RAW_STRING);
IToken tkCharacter = new Token(LANG_CHARACTER);
IToken tkSingleComment = new Token(LANG_SINGLE_COMMENT);
IToken tkMultiComment = new Token(LANG_MULTI_COMMENT);
List<IPredicateRule> rules = new ArrayList<IPredicateRule>();
rules.add(new MultiLineRule("`", "`", tkRawString, NO_ESCAPE, true));
rules.add(new MultiLineRule("\"", "\"", tkString, '\\', true));
rules.add(new SingleLineRule("'", "'", tkCharacter, '\\', true));
rules.add(new EndOfLineRule("//", tkSingleComment, NO_ESCAPE));
rules.add(new MultiLineRule("/*", "*/", tkMultiComment, NO_ESCAPE, true));
setPredicateRules(rules.toArray(new IPredicateRule[rules.size()]));
}
public static void configure(TextLayout textLayout) {
String text = textLayout.getText();
Document doc = new Document(text);
ITokenScanner scanner = getRecipeScanner(doc);
scanner.setRange(doc, 0, doc.getLength());
IToken token;
while ((token = scanner.nextToken()) != Token.EOF) {
int offset = scanner.getTokenOffset();
int length = scanner.getTokenLength();
Object data = token.getData();
if (data != null && data instanceof TextStyle) {
TextStyle textStyle = (TextStyle) data;
textLayout.setStyle(textStyle, offset, offset + length - 1);
}
}
}
@Override
protected IToken doEvaluate(ICharacterScanner scanner, boolean resume) {
if (resume) {
if (endSequenceDetected(scanner))
return fToken;
} else {
int c= scanner.read();
if (c == fStartSequence[0]) {
if (sequenceDetected(scanner, fStartSequence, false)) {
if (endSequenceDetected(scanner))
return fToken;
}
}
}
scanner.unread();
return Token.UNDEFINED;
}
/**
* Creates the partitioner and sets up the appropriate rules.
*/
public EditorConfigPartitionScanner() {
super();
IToken comment = new Token(COMMENT);
IToken sectionName = new Token(SECTION);
IToken propertyValue = new Token(PROPERTY_VALUE);
IToken key = new Token(IDocument.DEFAULT_CONTENT_TYPE);
List<IPredicateRule> rules = new ArrayList<IPredicateRule>();
// Add rule for leading white space.
rules.add(new LeadingWhitespacePredicateRule(key, "\t")); //$NON-NLS-1$
rules.add(new LeadingWhitespacePredicateRule(key, " ")); //$NON-NLS-1$
// Add rules for comments.
rules.add(new EndOfLineRule("#", comment, (char) 0, true)); //$NON-NLS-1$
// rules.add(new EndOfLineRule("!", comment, (char) 0, true));
// //$NON-NLS-1$
// Add rules for sections.
rules.add(new SingleLineRule("[", "]", sectionName, '\\', true, true)); //$NON-NLS-1$
// Add rules for property values.
rules.add(new SingleLineRule("=", null, propertyValue, '\\', true, true)); //$NON-NLS-1$
rules.add(new SingleLineRule(":", null, propertyValue, '\\', true, true)); //$NON-NLS-1$
rules.add(new SingleLineRule(" ", null, propertyValue, '\\', true, true)); //$NON-NLS-1$
rules.add(new SingleLineRule("\t", null, propertyValue, '\\', true, true)); //$NON-NLS-1$
// Add special case word rule.
EmptyCommentRule wordRule = new EmptyCommentRule(comment);
rules.add(wordRule);
IPredicateRule[] result = new IPredicateRule[rules.size()];
rules.toArray(result);
setPredicateRules(result);
}
protected IToken createToken() {
String attributeId = delimiter
? HighlightingConfiguration.TEMPLATE_DELIMITER_ID
: HighlightingConfiguration.TEMPLATE_ID;
Token token = new Token(getAttribute(attributeId));
return token;
}
public HPartitionScanner() {
// IToken key= new Token(IDocument.DEFAULT_CONTENT_TYPE);
IToken comment = new Token(COMMENT);
IToken propertyValue = new Token(PROPERTY_VALUE);
setPredicateRules(new IPredicateRule[] {
new SingleLineRule(AssistConstants.PARAM_DELIM_EQ, null, propertyValue, '\\', true, true),
new SingleLineRule("#", null, comment, (char) 0, true, true), });
}
public IToken evaluate(ICharacterScanner scanner) {
int character= scanner.read();
if (isOperator((char) character)) {
do {
character= scanner.read();
} while (isOperator((char) character));
scanner.unread();
return fToken;
} else {
scanner.unread();
return Token.UNDEFINED;
}
}
public IToken evaluate(ICharacterScanner scanner, boolean resume)
{
successToken = characterTokenMap.get((char) scanner.read());
if (successToken == null)
{
scanner.unread();
successToken = Token.UNDEFINED;
}
return successToken;
}
@Override
public void setLastToken(IToken token) {
super.setLastToken(token);
if (token == null) {
return;
}
if (!(token.getData() instanceof String)) {
current = TYPE_DEFAULT;
return;
}
String contentType = (String) token.getData();
if (XMLSourceConfiguration.DOCTYPE.equals(contentType)) {
if (token instanceof ExtendedToken && ((ExtendedToken) token).getContents().endsWith("[")) { //$NON-NLS-1$
current = TYPE_DTD;
resumeToken = token;
super.setLastToken(null);
}
} else if (XMLSourceConfiguration.DEFAULT.equals(contentType) || IDocument.DEFAULT_CONTENT_TYPE.equals(contentType)) {
current = TYPE_DEFAULT;
} else {
for (int i = 0; i < subPartitionScanners.length; ++i) {
if (subPartitionScanners[i].hasContentType(contentType)) {
current = i;
break;
}
}
}
}
protected void initialize(String[] keywords) {
ColorRegistry registry = JFaceResources.getColorRegistry();
IToken keyword = new Token(new TextAttribute(registry.get(KEYWORD_COLOR), null, SWT.BOLD));
IToken string = new Token(new TextAttribute(registry.get(STRING_COLOR)));
IToken number = new Token(new TextAttribute(registry.get(NUMBER_COLOR)));
IToken annotation = new Token(new TextAttribute(registry.get(ANNOTATION_COLOR)));
IToken defaultToken = new Token(new TextAttribute(registry.get(DEFAULT_COLOR)));
List<IRule> rules = new ArrayList<IRule>();
// strings
rules.add(new SingleLineRule("\"", "\"", string, '\\'));
// annotations
rules.add(new MultiLineRule("[", "]", annotation));
// numbers
rules.add(new NumberRule(number));
// keywords and normal (default) text
WordRule wordRule = new WordRule(new WordDetector(), defaultToken);
for (int i = 0; i < keywords.length; i++) {
wordRule.addWord(keywords[i], keyword);
}
rules.add(wordRule);
setRules(rules.toArray(new IRule[rules.size()]));
}
public IToken evaluate( ICharacterScanner scanner){
reinit();
int c = 0;
// carry on reading until we find a bad char
// int chars = 0;
while (isOK(c = read(scanner), scanner)) {
// add character to buffer
if (c == ICharacterScanner.EOF) {
return Token.UNDEFINED;
}
whiteSpaceOnly = whiteSpaceOnly && (Character.isWhitespace((char) c));
}
unread(scanner);
// if we have only read whitespace characters, go back to where evaluation
// started and return undefined token
if (whiteSpaceOnly) {
rewind(scanner, charsRead);
return Token.UNDEFINED;
}
return token;
}
@Override
public IToken nextToken() {
if (!tokenReturned) {
tokenReturned = true;
return new Token(getAttribute(HighlightingStyles.DEFAULT_ID));
}
return Token.EOF;
}
@Override
protected TextAttribute getTokenTextAttribute(IToken token)
{
Object data = token.getData();
if (data instanceof String)
{
String last = (String) data;
storeScope(last);
if (last.length() == 0)
{
last = scope;
}
else if (scope.length() > 0)
{
if (scope.endsWith(last))
{
last = scope;
}
else
{
last = scope + ' ' + last;
}
}
IToken converted = getThemeManager().getToken(last);
lastAttribute = super.getTokenTextAttribute(converted);
return lastAttribute;
}
else if (token.isWhitespace())
{
// WHat the hell was I smoking here? Just return the previous scope? Empty string?
return lastAttribute;
}
lastAttribute = super.getTokenTextAttribute(token);
return lastAttribute;
}
/**
* addWordsToRule
*
* @param wordRule
* @param words
* @param tokenType
*/
private void addWordsToRule(WordRule wordRule, String[] words, CSSTokenType tokenType)
{
IToken token = createToken(tokenType);
for (String word : words)
{
wordRule.addWord(word, token);
}
}
public boolean saveToken(IToken token) {
length = fTokenOffset - offset;
if (length == 0) {
return false;
}
this.token = token;
return true;
}
public IToken evaluate(ICharacterScanner scanner) {
int c = scanner.read();
if (((char) c) == '=' || ((char) c) == '#' || ((char) c) == ','
|| ((char) c) == '{' || ((char) c) == '}') {
return fToken;
} else if (((char) c) == '\\') {
c = scanner.read();
if (((char) c) == '"')
return fToken;
scanner.unread();
}
scanner.unread();
return Token.UNDEFINED;
}
public IToken evaluate(ICharacterScanner scanner) {
int c = scanner.read();
if (charSet.contains((char)c)) {
do {
c = scanner.read();
} while ((c != ICharacterScanner.EOF) && charSet.contains((char)c));
scanner.unread();
return successToken;
}
scanner.unread();
return Token.UNDEFINED;
}
private IToken checkForEnv() {
int o = checkForCommand(BEGIN, 1);
if (o == 0) {
fTokenLength += 2;
return fTokens[TEX];
}
int offsetEnd = fTokenOffset;
offsetEnd += 6;
int ch = fScanner.read();
offsetEnd++;
while (Character.isWhitespace(ch)) {
ch = fScanner.read();
offsetEnd++;
}
if (ch != '{'){
unReadScanner(offsetEnd - fTokenOffset - 2);
fTokenLength += 2;
return fTokens[TEX];
}
final StringBuilder b = new StringBuilder();
ch = fScanner.read();
offsetEnd++;
while (ch != '}' && ch != ICharacterScanner.EOF && ch != '{' && ch != '\\'){
b.append((char)ch);
ch = fScanner.read();
offsetEnd++;
}
String envName = b.toString();
if (getEnvIndex(envName) != TEX) {
return checkForEndEnv(envName, offsetEnd);
}
else {
unReadScanner(offsetEnd - fTokenOffset - 2);
fTokenLength += 2;
return fTokens[TEX];
}
}
private IToken scanBracket(int openChar, int closeChar, int type, int currentOffset) {
int ch;
int offsetEnd = currentOffset;
int stack = 0;
while (true) {
ch = fScanner.read();
offsetEnd++;
if (ch == closeChar) {
stack--;
if (stack < 0) {
fTokenLength = offsetEnd - fTokenOffset;
return fTokens[type];
}
}
else if (ch == openChar) {
stack++;
}
else if (ch == '%') {
offsetEnd += ignoreComment();
}
else if (ch == '\\') {
ch = fScanner.read();
offsetEnd++;
}
else if (ch == ICharacterScanner.EOF) {
fTokenLength = offsetEnd - fTokenOffset - 1;
return fTokens[type];
}
}
}
public IToken evaluate(ICharacterScanner scanner) {
if (!fIsVersionMatch)
return Token.UNDEFINED;
ResettableScanner resettable= new ResettableScanner(scanner);
if (resettable.read() == '@')
return readAnnotation(resettable);
resettable.reset();
return Token.UNDEFINED;
}
public IToken evaluate(ICharacterScanner scanner, boolean resume) {
if (scanner instanceof SequenceCharacterScanner) {
// when checking for the rule, do not search for potential sequence
SequenceCharacterScanner seqScanner = (SequenceCharacterScanner) scanner;
try {
seqScanner.setSequenceIgnored(true);
return rule.evaluate(scanner, resume);
} finally {
seqScanner.setSequenceIgnored(false);
}
}
return rule.evaluate(scanner, resume);
}
@Override
public IToken doEvaluateToken(ICharacterReader subReader) {
if(numberRule.doEvaluate(subReader)) {
return numberToken;
}
return null;
}
public TexEnvironmentRule(String[] envNames, boolean star, IToken token) {
fStartSequence = ("\\begin").toCharArray();
fEndSequence = ("\\end").toCharArray();
fToken = token;
fEnvName = new char[envNames.length][];
for (int i = 0; i < envNames.length; i++) {
fEnvName[i] = envNames[i].toCharArray();
}
fStar = star;
}
public void createPresentation(TextPresentation presentation, ITypedRegion region) {
if (fScanner == null) {
// will be removed if deprecated constructor will be removed
addRange(presentation, region.getOffset(), region.getLength(), fDefaultTextStyle);
return;
}
int lastStart = region.getOffset();
int length = 0;
boolean firstToken = true;
IToken lastToken = Token.UNDEFINED;
TextStyle lastTextStyle = getTokenTextStyle(lastToken);
fScanner.setRange(fDocument, lastStart, region.getLength());
while (true) {
IToken token = fScanner.nextToken();
if (token.isEOF())
break;
TextStyle textStyle = getTokenTextStyle(token);
if (lastTextStyle != null && lastTextStyle.equals(textStyle)) {
length += fScanner.getTokenLength();
firstToken = false;
} else {
if (!firstToken)
addRange(presentation, lastStart, length, lastTextStyle);
firstToken = false;
lastToken = token;
lastTextStyle = textStyle;
lastStart = fScanner.getTokenOffset();
length = fScanner.getTokenLength();
}
}
addRange(presentation, lastStart, length, lastTextStyle);
}
protected IToken currentOr(char altA, ICharacterScanner scanner) {
int second = scanner.read();
if(second == altA) {
return getSuccessToken();
}
scanner.unread();
return getSuccessToken();
}
@Override
public synchronized IToken evaluate(ICharacterScanner scanner, CombinedWordRule.CharacterBuffer word) {
if (fCaseSensitive)
return super.evaluate(scanner, word);
fBuffer.clear();
for (int i= 0, n= word.length(); i < n; i++)
fBuffer.append(Character.toUpperCase(word.charAt(i)));
IToken token= fUppercaseWords.get(fBuffer);
if (token != null)
return token;
return Token.UNDEFINED;
}