org.apache.hadoop.io.Text#equals ( )源码实例Demo

下面列出了org.apache.hadoop.io.Text#equals ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: big-c   文件: ClientHSTokenSelector.java
@SuppressWarnings("unchecked")
public Token<MRDelegationTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  LOG.debug("Looking for a token with service " + service.toString());
  for (Token<? extends TokenIdentifier> token : tokens) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Token kind is " + token.getKind().toString()
          + " and the token's service name is " + token.getService());
    }
    if (MRDelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
        && service.equals(token.getService())) {
      return (Token<MRDelegationTokenIdentifier>) token;
    }
  }
  return null;
}
 
源代码2 项目: hbase   文件: AuthenticationTokenSelector.java
@Override
public Token<AuthenticationTokenIdentifier> selectToken(Text serviceName,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (serviceName != null) {
    for (Token ident : tokens) {
      if (serviceName.equals(ident.getService()) &&
          AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Returning token "+ident);
        }
        return (Token<AuthenticationTokenIdentifier>)ident;
      }
    }
  }
  LOG.debug("No matching token found");
  return null;
}
 
源代码3 项目: hadoop   文件: NMTokenSelector.java
@SuppressWarnings("unchecked")
@Override
public Token<NMTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  for (Token<? extends TokenIdentifier> token : tokens) {
    if (LOG.isDebugEnabled()) {
      LOG.info("Looking for service: " + service + ". Current token is "
          + token);
    }
    if (NMTokenIdentifier.KIND.equals(token.getKind()) && 
        service.equals(token.getService())) {
      return (Token<NMTokenIdentifier>) token;
    }
  }
  return null;
}
 
源代码4 项目: hadoop   文件: ContainerTokenSelector.java
@SuppressWarnings("unchecked")
@Override
public Token<ContainerTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  for (Token<? extends TokenIdentifier> token : tokens) {
    if (LOG.isDebugEnabled()) {
      LOG.info("Looking for service: " + service + ". Current token is "
          + token);
    }
    if (ContainerTokenIdentifier.KIND.equals(token.getKind()) && 
        service.equals(token.getService())) {
      return (Token<ContainerTokenIdentifier>) token;
    }
  }
  return null;
}
 
源代码5 项目: datawave   文件: IndexStatsCombiningIterator.java
@Override
public void next() throws IOException {
    if (src.hasTop()) {
        Key srcTK = src.getTopKey();
        Text workingRow = srcTK.getRow();
        Text currentRow = srcTK.getRow();
        
        long sumUnique = 0;
        long sumCount = 0;
        
        while (workingRow.equals(currentRow)) {
            tuple.readFields(new DataInputStream(new ByteArrayInputStream(src.getTopValue().get())));
            sumUnique += tuple.getNumberOfUniqueWords().get();
            sumCount += tuple.getWordCount().get();
            
            src.next();
            if (src.hasTop()) {
                srcTK = src.getTopKey();
                srcTK.getRow(currentRow);
            } else {
                break;
            }
        }
        weight.set(((double) sumUnique) / ((double) sumCount));
        tk = new Key(workingRow);
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        weight.write(new DataOutputStream(baos));
        tv = new Value(baos.toByteArray());
    } else {
        tk = null;
        tv = null;
    }
}
 
源代码6 项目: datawave   文件: IndexStatsSummingIterator.java
@Override
public void next() throws IOException {
    if (src.hasTop()) {
        Key srcTK = src.getTopKey();
        Text workingRow = srcTK.getRow();
        Text currentRow = srcTK.getRow();
        
        long sumUnique = 0;
        long sumCount = 0;
        
        while (workingRow.equals(currentRow)) {
            tuple.readFields(new DataInputStream(new ByteArrayInputStream(src.getTopValue().get())));
            sumUnique += tuple.getNumberOfUniqueWords().get();
            sumCount += tuple.getWordCount().get();
            
            src.next();
            if (src.hasTop()) {
                srcTK = src.getTopKey();
                srcTK.getRow(currentRow);
            } else {
                break;
            }
        }
        summedValues.setNumberOfUniqueWords(sumUnique);
        summedValues.setWordCount(sumCount);
        tk = new Key(workingRow);
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        summedValues.write(new DataOutputStream(baos));
        tv = new Value(baos.toByteArray());
    } else {
        tk = null;
        tv = null;
    }
}
 
源代码7 项目: datawave   文件: IdentityAggregator.java
protected boolean samePointer(Text row, ByteSequence pointer, Key key) {
    if (row.equals(key.getRow())) {
        ByteSequence pointer2 = parsePointer(key.getColumnQualifierData());
        return (pointer.equals(pointer2));
    }
    return false;
}
 
源代码8 项目: NNAnalytics   文件: TokenExtractor.java
/**
 * Extract the last seen DelegationTokens from FSNamesystem.
 *
 * @return map of user names to last timestamp of token seen
 */
public Map<String, Long> getTokenLastLogins() {
  if (fsn == null || dtsm == null) {
    return new HashMap<String, Long>() {
      {
        put("hdfs", System.currentTimeMillis());
        put("n/a", -1L);
      }
    };
  }
  Map<String, Long> lastLogins = new HashMap<>();
  fsn.writeLock();
  try {
    Set<Map.Entry<DelegationTokenIdentifier, DelegationTokenInformation>> entries =
        dtsm.currentTokens.entrySet();
    for (Map.Entry<DelegationTokenIdentifier, DelegationTokenInformation> entry : entries) {
      Text owner = entry.getKey().getOwner();
      Text realUser = entry.getKey().getRealUser();
      String ownerStr = new KerberosName(owner.toString()).getServiceName();
      long time = entry.getKey().getIssueDate();
      lastLogins.put(ownerStr, time);
      if ((realUser != null) && (!realUser.toString().isEmpty()) && !realUser.equals(owner)) {
        String realUserStr = new KerberosName(realUser.toString()).getServiceName();
        lastLogins.put(realUserStr, time);
      }
    }
    return lastLogins;
  } finally {
    fsn.writeUnlock();
  }
}
 
源代码9 项目: circus-train   文件: CopyListing.java
/**
 * Validate the final resulting path listing. Checks if there are duplicate entries. If preserving ACLs, checks that
 * file system can support ACLs. If preserving XAttrs, checks that file system can support XAttrs.
 *
 * @param pathToListFile path listing build by doBuildListing
 * @param options Input options to S3MapReduceCp
 * @throws IOException Any issues while checking for duplicates and throws
 * @throws DuplicateFileException if there are duplicates
 */
private void validateFinalListing(Path pathToListFile, S3MapReduceCpOptions options)
  throws DuplicateFileException, IOException {

  Configuration config = getConf();
  FileSystem fs = pathToListFile.getFileSystem(config);

  Path sortedList = sortListing(fs, config, pathToListFile);

  SequenceFile.Reader reader = new SequenceFile.Reader(config, SequenceFile.Reader.file(sortedList));
  try {
    Text lastKey = new Text("*"); // source relative path can never hold *
    CopyListingFileStatus lastFileStatus = new CopyListingFileStatus();

    Text currentKey = new Text();
    while (reader.next(currentKey)) {
      if (currentKey.equals(lastKey)) {
        CopyListingFileStatus currentFileStatus = new CopyListingFileStatus();
        reader.getCurrentValue(currentFileStatus);
        throw new DuplicateFileException("File "
            + lastFileStatus.getPath()
            + " and "
            + currentFileStatus.getPath()
            + " would cause duplicates. Aborting");
      }
      reader.getCurrentValue(lastFileStatus);
      lastKey.set(currentKey);
    }
  } finally {
    IOUtils.closeStream(reader);
  }
}
 
源代码10 项目: big-c   文件: LoadGeneratorMR.java
@Override
public void reduce(Text key, Iterator<IntWritable> values,
    OutputCollector<Text, IntWritable> output, Reporter reporter)
    throws IOException {
  int sum = 0;
  while (values.hasNext()) {
    sum += values.next().get();
  }
  if (key.equals(OPEN_EXECTIME)){
    executionTime[OPEN] = sum;
  } else if (key.equals(NUMOPS_OPEN)){
    numOfOps[OPEN] = sum;
  } else if (key.equals(LIST_EXECTIME)){
    executionTime[LIST] = sum;
  } else if (key.equals(NUMOPS_LIST)){
    numOfOps[LIST] = sum;
  } else if (key.equals(DELETE_EXECTIME)){
    executionTime[DELETE] = sum;
  } else if (key.equals(NUMOPS_DELETE)){
    numOfOps[DELETE] = sum;
  } else if (key.equals(CREATE_EXECTIME)){
    executionTime[CREATE] = sum;
  } else if (key.equals(NUMOPS_CREATE)){
    numOfOps[CREATE] = sum;
  } else if (key.equals(WRITE_CLOSE_EXECTIME)){
    System.out.println(WRITE_CLOSE_EXECTIME + " = " + sum);
    executionTime[WRITE_CLOSE]= sum;
  } else if (key.equals(NUMOPS_WRITE_CLOSE)){
    numOfOps[WRITE_CLOSE] = sum;
  } else if (key.equals(TOTALOPS)){
    totalOps = sum;
  } else if (key.equals(ELAPSED_TIME)){
    totalTime = sum;
  }
  result.set(sum);
  output.collect(key, result);
  // System.out.println("Key = " + key + " Sum is =" + sum);
  // printResults(System.out);
}
 
源代码11 项目: hadoop   文件: LoadGeneratorMR.java
@Override
public void reduce(Text key, Iterator<IntWritable> values,
    OutputCollector<Text, IntWritable> output, Reporter reporter)
    throws IOException {
  int sum = 0;
  while (values.hasNext()) {
    sum += values.next().get();
  }
  if (key.equals(OPEN_EXECTIME)){
    executionTime[OPEN] = sum;
  } else if (key.equals(NUMOPS_OPEN)){
    numOfOps[OPEN] = sum;
  } else if (key.equals(LIST_EXECTIME)){
    executionTime[LIST] = sum;
  } else if (key.equals(NUMOPS_LIST)){
    numOfOps[LIST] = sum;
  } else if (key.equals(DELETE_EXECTIME)){
    executionTime[DELETE] = sum;
  } else if (key.equals(NUMOPS_DELETE)){
    numOfOps[DELETE] = sum;
  } else if (key.equals(CREATE_EXECTIME)){
    executionTime[CREATE] = sum;
  } else if (key.equals(NUMOPS_CREATE)){
    numOfOps[CREATE] = sum;
  } else if (key.equals(WRITE_CLOSE_EXECTIME)){
    System.out.println(WRITE_CLOSE_EXECTIME + " = " + sum);
    executionTime[WRITE_CLOSE]= sum;
  } else if (key.equals(NUMOPS_WRITE_CLOSE)){
    numOfOps[WRITE_CLOSE] = sum;
  } else if (key.equals(TOTALOPS)){
    totalOps = sum;
  } else if (key.equals(ELAPSED_TIME)){
    totalTime = sum;
  }
  result.set(sum);
  output.collect(key, result);
  // System.out.println("Key = " + key + " Sum is =" + sum);
  // printResults(System.out);
}
 
@Override
public Token<StramDelegationTokenIdentifier> selectToken(Text text, Collection<Token<? extends TokenIdentifier>> clctn)
{
  Token<StramDelegationTokenIdentifier> token = null;
  if (text != null) {
    for (Token<? extends TokenIdentifier> ctoken : clctn) {
      if (StramDelegationTokenIdentifier.IDENTIFIER_KIND.equals(ctoken.getKind()) && text.equals(ctoken.getService())) {
        token = (Token<StramDelegationTokenIdentifier>)ctoken;
      }
    }
  }
  return token;
}
 
源代码13 项目: hadoop   文件: JobTokenSelector.java
@SuppressWarnings("unchecked")
@Override
public Token<JobTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  for (Token<? extends TokenIdentifier> token : tokens) {
    if (JobTokenIdentifier.KIND_NAME.equals(token.getKind())
        && service.equals(token.getService())) {
      return (Token<JobTokenIdentifier>) token;
    }
  }
  return null;
}
 
源代码14 项目: hadoop   文件: TokenAspect.java
@Override
public boolean handleKind(Text kind) {
  return kind.equals(HftpFileSystem.TOKEN_KIND)
      || kind.equals(HsftpFileSystem.TOKEN_KIND)
      || kind.equals(WebHdfsFileSystem.TOKEN_KIND)
      || kind.equals(SWebHdfsFileSystem.TOKEN_KIND);
}
 
源代码15 项目: hadoop   文件: TokenAspect.java
private static String getSchemeByKind(Text kind) {
  if (kind.equals(HftpFileSystem.TOKEN_KIND)) {
    return HftpFileSystem.SCHEME;
  } else if (kind.equals(HsftpFileSystem.TOKEN_KIND)) {
    return HsftpFileSystem.SCHEME;
  } else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
    return WebHdfsFileSystem.SCHEME;
  } else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) {
    return SWebHdfsFileSystem.SCHEME;
  } else {
    throw new IllegalArgumentException("Unsupported scheme");
  }
}
 
源代码16 项目: tez   文件: JobTokenSelector.java
@SuppressWarnings("unchecked")
@Override
public Token<JobTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  for (Token<? extends TokenIdentifier> token : tokens) {
    if (JobTokenIdentifier.KIND_NAME.equals(token.getKind())
        && service.equals(token.getService())) {
      return (Token<JobTokenIdentifier>) token;
    }
  }
  return null;
}
 
源代码17 项目: incubator-tez   文件: JobTokenSelector.java
@SuppressWarnings("unchecked")
@Override
public Token<JobTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  for (Token<? extends TokenIdentifier> token : tokens) {
    if (JobTokenIdentifier.KIND_NAME.equals(token.getKind())
        && service.equals(token.getService())) {
      return (Token<JobTokenIdentifier>) token;
    }
  }
  return null;
}
 
源代码18 项目: jumbune   文件: JsonDataValidationReducer.java
public void reduce(Text key, Iterable<FileKeyViolationBean> values, Context context)
		throws IOException, InterruptedException {
	 
	if(!key.equals(ok)){
		 	createDirectory(key);
	 }
	ArrayList <ReducerViolationBean>  list = new ArrayList<ReducerViolationBean>();
	Integer totalKey = 0, totalLine = 0;
	
	for(FileKeyViolationBean value:values){
		Integer size = value.getViolationList().size();
		Long splitEndOffset = value.getSplitEndOffset().get();
		Long totalRecEmiByMap = value.getTotalRecordsEmittByMap().get();
		String fileName = value.getFileName().toString();
		FileOffsetKey fileOffsetKey = new FileOffsetKey(fileName,splitEndOffset);
		
		offsetLinesMap.put(fileOffsetKey, totalRecEmiByMap);
		ArrayListWritable <JsonLineViolationBean> alw = value.getViolationList();
		Integer keyViolationSize = 0;
		for(JsonLineViolationBean jslb :alw){
		 Integer ViolationSize = jslb.getJsonKeyViolationList().size();
		 List<JsonKeyViolationBean> jsonKVB=jslb.getJsonKeyViolationList();
		 for (JsonKeyViolationBean jskvb :jsonKVB){
			 if(!key.equals(ok)){
				 ViolationPersistenceBean bean =null;
				 if (key.equals(regexKey)){
					 bean = new ViolationPersistenceBean(Integer.parseInt(jskvb.getLineNumber().toString()),jskvb.getJsonNode().toString(),jskvb.getExpectedValue().toString(),
							 jskvb.getActualValue().toString(),regexKey.toString() ,value.getFileName().toString(),splitEndOffset);
					 regexArray.add(bean);
				 }else if (key.equals(missingKey)){
					 bean = new ViolationPersistenceBean(Integer.parseInt(jskvb.getLineNumber().toString()),jskvb.getJsonNode().toString(),jskvb.getExpectedValue().toString(),
							 jskvb.getActualValue().toString(),missingKey.toString() ,value.getFileName().toString(),splitEndOffset);
				 	 missingArray.add(bean);
				 }else if (key.equals(dataKey)){
					 bean = new ViolationPersistenceBean(Integer.parseInt(jskvb.getLineNumber().toString()),jskvb.getJsonNode().toString(),jskvb.getExpectedValue().toString(),
							 jskvb.getActualValue().toString(),dataKey.toString() ,value.getFileName().toString(),splitEndOffset);
					 dataArray.add(bean);
				 }else if (key.equals(jsonSchemaKey)){
					 bean = new ViolationPersistenceBean(Integer.parseInt(jskvb.getLineNumber().toString()),jskvb.getJsonNode().toString(),jskvb.getExpectedValue().toString(),
							 jskvb.getActualValue().toString(),jsonSchemaKey.toString() ,value.getFileName().toString(),splitEndOffset);
					schemaArray.add(bean);
				 
				}else if (key.equals(nullKey)){
					 bean = new ViolationPersistenceBean(Integer.parseInt(jskvb.getLineNumber().toString()),jskvb.getJsonNode().toString(),jskvb.getExpectedValue().toString(),
							 jskvb.getActualValue().toString(),nullKey.toString() ,value.getFileName().toString(),splitEndOffset);
					 nullTypeArray.add(bean);
				 
				}
			 }
			
		 }
		 keyViolationSize = keyViolationSize + ViolationSize;
		}

		totalKey = totalKey + keyViolationSize;
		totalLine = totalLine + size;	
		
		Text text = new Text(value.getFileName());
		ReducerViolationBean reducerViolationBean = new ReducerViolationBean();
		reducerViolationBean.setFileName(text);
		reducerViolationBean.setSize(new IntWritable(keyViolationSize));
		list.add(reducerViolationBean);
	}
	ArrayListWritable<ReducerViolationBean> awb = new ArrayListWritable<ReducerViolationBean>(list);
	TotalReducerViolationBean totalReducerViolationBean= new TotalReducerViolationBean();
	totalReducerViolationBean.setReducerViolationBeanList(awb);
	totalReducerViolationBean.setTotalLineViolation(new IntWritable(totalLine));
	totalReducerViolationBean.setTotalKeyViolation(new IntWritable(totalKey));
	
	context.write(key, totalReducerViolationBean);
}
 
源代码19 项目: accumulo-examples   文件: ContinuousQuery.java
private static ArrayList<Text[]> findRandomTerms(Scanner scanner, int numTerms) {

    Text currentRow = null;

    ArrayList<Text> words = new ArrayList<>();
    ArrayList<Text[]> ret = new ArrayList<>();

    Random rand = new Random();

    for (Entry<Key,Value> entry : scanner) {
      Key key = entry.getKey();

      if (currentRow == null)
        currentRow = key.getRow();

      if (!currentRow.equals(key.getRow())) {
        selectRandomWords(words, ret, rand, numTerms);
        words.clear();
        currentRow = key.getRow();
      }

      words.add(key.getColumnFamily());
    }

    selectRandomWords(words, ret, rand, numTerms);

    return ret;
  }
 
源代码20 项目: accumulo-examples   文件: QueryUtil.java
/**
 * Returns either the {@link #DIR_COLF} or a decoded string version of the colf.
 *
 * @param colf
 *          the column family
 */
public static String getType(Text colf) {
  if (colf.equals(DIR_COLF))
    return colf.toString() + ":";
  return Long.toString(Ingest.encoder.decode(colf.getBytes())) + ":";
}