org.apache.hadoop.fs.FSDataInputStream#readInt ( )源码实例Demo

下面列出了org.apache.hadoop.fs.FSDataInputStream#readInt ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: RDFS   文件: CompletedJobStatusStore.java
private TaskCompletionEvent[] readEvents(FSDataInputStream dataIn,
                                         int offset, int len)
        throws IOException {
  int size = dataIn.readInt();
  if (offset > size) {
    return TaskCompletionEvent.EMPTY_ARRAY;
  }
  if (offset + len > size) {
    len = size - offset;
  }
  TaskCompletionEvent[] events = new TaskCompletionEvent[len];
  for (int i = 0; i < (offset + len); i++) {
    TaskCompletionEvent event = new TaskCompletionEvent();
    event.readFields(dataIn);
    if (i >= offset) {
      events[i - offset] = event;
    }
  }
  return events;
}
 
源代码2 项目: spliceengine   文件: SIObserver.java
private byte[] getToken(String path) throws IOException{
    byte[] token = null;
    FSDataInputStream in = null;
    try {
        FileSystem fs = FileSystem.get(new URI(path), HConfiguration.unwrapDelegate());
        Path p = new Path(path, "_token");
        if (fs.exists(p)) {
            in = fs.open(p);
            int len = in.readInt();
            token = new byte[len];
            in.readFully(token);
        }
        return token;
    } catch (Exception e) {
        throw new IOException(e);
    }
    finally {
        if (in != null) {
            in.close();
        }
    }
}
 
源代码3 项目: hadoop-gpu   文件: CompletedJobStatusStore.java
private TaskCompletionEvent[] readEvents(FSDataInputStream dataIn,
                                         int offset, int len)
        throws IOException {
  int size = dataIn.readInt();
  if (offset > size) {
    return TaskCompletionEvent.EMPTY_ARRAY;
  }
  if (offset + len > size) {
    len = size - offset;
  }
  TaskCompletionEvent[] events = new TaskCompletionEvent[len];
  for (int i = 0; i < (offset + len); i++) {
    TaskCompletionEvent event = new TaskCompletionEvent();
    event.readFields(dataIn);
    if (i >= offset) {
      events[i - offset] = event;
    }
  }
  return events;
}
 
public FSInputGeneralColumnDataReader(FSDataInputStream fsInputStream, int dataStartOffset, int dataLength)
        throws IOException {
    this.fsInputStream = fsInputStream;
    fsInputStream.seek(dataStartOffset + dataLength - 4L);
    this.numOfVals = fsInputStream.readInt();
    fsInputStream.seek(dataStartOffset);
}
 
public FSInputLZ4CompressedColumnReader(FSDataInputStream fsInputStream, int columnDataStartOffset,
        int columnDataLength, int rowCount) throws IOException {
    this.rowCount = rowCount;
    this.fsInputStream = fsInputStream;
    int footStartOffset = columnDataStartOffset + columnDataLength - 8;
    fsInputStream.seek(footStartOffset);
    this.numValInBlock = fsInputStream.readInt();
    this.valLen = fsInputStream.readInt();

    fsInputStream.seek(columnDataStartOffset);
    this.currBlockNum = -1;

    this.deCompressor = LZ4Factory.fastestInstance().safeDecompressor();
    this.maxDecompressedLength = numValInBlock * valLen;
}
 
public FSInputRLECompressedColumnReader(FSDataInputStream fsInputStream, int columnDataStartOffset,
        int columnDataLength, int rowCount) throws IOException {
    this.rowCount = rowCount;
    this.fsInputStream = fsInputStream;
    int footStartOffset = columnDataStartOffset + columnDataLength - 8;
    fsInputStream.seek(footStartOffset);
    this.numValInBlock = fsInputStream.readInt();
    this.valLen = fsInputStream.readInt();
    this.fsInputStream.seek(columnDataStartOffset);
    this.currBlockNum = -1;
}
 
源代码7 项目: kylin   文件: FSInputGeneralColumnDataReader.java
public FSInputGeneralColumnDataReader(FSDataInputStream fsInputStream, int dataStartOffset, int dataLength)
        throws IOException {
    this.fsInputStream = fsInputStream;
    fsInputStream.seek(dataStartOffset + dataLength - 4L);
    this.numOfVals = fsInputStream.readInt();
    fsInputStream.seek(dataStartOffset);
}
 
源代码8 项目: kylin   文件: FSInputLZ4CompressedColumnReader.java
public FSInputLZ4CompressedColumnReader(FSDataInputStream fsInputStream, int columnDataStartOffset,
        int columnDataLength, int rowCount) throws IOException {
    this.rowCount = rowCount;
    this.fsInputStream = fsInputStream;
    int footStartOffset = columnDataStartOffset + columnDataLength - 8;
    fsInputStream.seek(footStartOffset);
    this.numValInBlock = fsInputStream.readInt();
    this.valLen = fsInputStream.readInt();

    fsInputStream.seek(columnDataStartOffset);
    this.currBlockNum = -1;

    this.deCompressor = LZ4Factory.fastestInstance().safeDecompressor();
    this.maxDecompressedLength = numValInBlock * valLen;
}
 
源代码9 项目: kylin   文件: FSInputRLECompressedColumnReader.java
public FSInputRLECompressedColumnReader(FSDataInputStream fsInputStream, int columnDataStartOffset,
        int columnDataLength, int rowCount) throws IOException {
    this.rowCount = rowCount;
    this.fsInputStream = fsInputStream;
    int footStartOffset = columnDataStartOffset + columnDataLength - 8;
    fsInputStream.seek(footStartOffset);
    this.numValInBlock = fsInputStream.readInt();
    this.valLen = fsInputStream.readInt();
    this.fsInputStream.seek(columnDataStartOffset);
    this.currBlockNum = -1;
}
 
源代码10 项目: succinct   文件: SuccinctIndexedFileStream.java
/**
 * Constructor to map a file containing Succinct data structures via stream.
 *
 * @param filePath Path of the file.
 * @param conf     Configuration for the filesystem.
 * @throws IOException
 */
public SuccinctIndexedFileStream(Path filePath, Configuration conf) throws IOException {
  super(filePath, conf);
  FSDataInputStream is = getStream(filePath);
  is.seek(endOfFileStream);
  int len = is.readInt();
  offsets = new int[len];
  for (int i = 0; i < len; i++) {
    offsets[i] = is.readInt();
  }
  endOfIndexedFileStream = is.getPos();
  is.close();
}
 
源代码11 项目: incubator-tajo   文件: BSTIndex.java
private void fillRootIndex(int entryNum, FSDataInputStream in)
    throws IOException {
  this.dataIndex = new Tuple[entryNum];
  this.offsetIndex = new long[entryNum];
  Tuple keyTuple;
  byte[] buf;
  for (int i = 0; i < entryNum; i++) {
    buf = new byte[in.readInt()];
    Bytes.readFully(in, buf, 0, buf.length);
    keyTuple = RowStoreUtil.RowStoreDecoder.toTuple(keySchema, buf);
    dataIndex[i] = keyTuple;
    this.offsetIndex[i] = in.readLong();
  }
}
 
源代码12 项目: incubator-retired-blur   文件: HdfsDirectory.java
private void loadCacheFromManifest(Path manifest) throws IOException {
  FSDataInputStream inputStream = _fileSystem.open(manifest);
  int count = inputStream.readInt();
  for (int i = 0; i < count; i++) {
    String name = readString(inputStream);
    long lastMod = inputStream.readLong();
    long length = inputStream.readLong();
    FStat fstat = new FStat(lastMod, length);
    _cache.put(name, fstat);
  }
  inputStream.close();
}
 
源代码13 项目: incubator-retired-blur   文件: HdfsKeyValueStore.java
private void loadIndex(Path path) throws IOException {
  FSDataInputStream inputStream = _fileSystem.open(path);
  byte[] buf = new byte[MAGIC.length];
  inputStream.readFully(buf);
  if (!Arrays.equals(MAGIC, buf)) {
    throw new IOException("File [" + path + "] not a " + BLUR_KEY_VALUE + " file.");
  }
  int version = inputStream.readInt();
  if (version == 1) {
    long fileLength = HdfsUtils.getFileLength(_fileSystem, path, inputStream);
    Operation operation = new Operation();
    try {
      while (inputStream.getPos() < fileLength) {
        try {
          operation.readFields(inputStream);
        } catch (IOException e) {
          // End of sync point found
          return;
        }
        loadIndex(path, operation);
      }
    } finally {
      inputStream.close();
    }
  } else {
    throw new IOException("Unknown version [" + version + "]");
  }
}
 
源代码14 项目: spliceengine   文件: BackupUtils.java
/**
 *
 * @param fs HBase file system
 * @param rootPath HBase root directory
 * @return True if there exists a successful database backup
 * @throws IOException
 */
public static boolean existsDatabaseBackup(FileSystem fs, Path rootPath) throws IOException {
    boolean ret = false;
    FSDataInputStream in = null;
    try {
        // Open backup record file from file system
        Path backupPath = new Path(rootPath, BackupRestoreConstants.BACKUP_DIR);
        Path p = new Path(backupPath, BackupRestoreConstants.BACKUP_RECORD_FILE_NAME);
        if (fs.exists(p)) {
            in = fs.open(p);
            int n = in.readInt();  // number of records
            BackupDescriptor bd = new BackupDescriptor();
            while (n-- > 0) {
                bd.readExternal(in);
                if (bd.getScope().compareToIgnoreCase("DATABASE") == 0) {
                    ret = true;
                    break;
                }
            }
        }
        return ret;
    }
    finally {
        if (in != null)
            in.close();
    }
}
 
源代码15 项目: Cubert   文件: RubixFile.java
@SuppressWarnings("unchecked")
public List<KeyData<K>> getKeyData() throws IOException,
        ClassNotFoundException
{
    if (keyData != null)
        return keyData;

    final FileSystem fs = FileSystem.get(conf);
    keyData = new ArrayList<KeyData<K>>();

    final long filesize = fs.getFileStatus(path).getLen();
    FSDataInputStream in = fs.open(path);

    /* The last long in the file is the start position of the trailer section */
    in.seek(filesize - 8);
    long metaDataStartPos = in.readLong();

    in.seek(metaDataStartPos);

    ObjectMapper mapper = new ObjectMapper();
    metadataJson = mapper.readValue(in.readUTF(), JsonNode.class);

    int keySectionSize = in.readInt();

    // load the key section
    byte[] keySection = new byte[keySectionSize];

    in.seek(filesize - keySectionSize - 8);
    in.read(keySection, 0, keySectionSize);
    in.close();

    ByteArrayInputStream bis = new ByteArrayInputStream(keySection);
    DataInput dataInput = new DataInputStream(bis);

    int numberOfBlocks = metadataJson.get("numberOfBlocks").getIntValue();

    // load the key section
    keyClass = (Class<K>) ClassCache.forName(JsonUtils.getText(metadataJson, "keyClass"));
    valueClass =
            (Class<V>) ClassCache.forName(JsonUtils.getText(metadataJson, "valueClass"));

    SerializationFactory serializationFactory = new SerializationFactory(conf);
    Deserializer<K> deserializer = serializationFactory.getDeserializer(keyClass);

    deserializer.open(bis);

    while (bis.available() > 0 && numberOfBlocks > 0)
    {
        K key = deserializer.deserialize(null);

        long offset = dataInput.readLong();
        long blockId = dataInput.readLong();
        long numRecords = dataInput.readLong();

        keyData.add(new KeyData<K>(key, offset, 0, numRecords, blockId));
        numberOfBlocks--;
    }

    // Assign length to each keydata entry
    int numEntries = keyData.size();
    for (int i = 1; i < numEntries; i++)
    {
        KeyData<K> prev = keyData.get(i - 1);
        KeyData<K> current = keyData.get(i);

        prev.setLength(current.getOffset() - prev.getOffset());
    }

    if (numEntries > 0)
    {
        KeyData<K> last = keyData.get(numEntries - 1);
        last.setLength(metaDataStartPos - last.offset);
    }

    return keyData;
}
 
源代码16 项目: tajo   文件: HistoryReader.java
public org.apache.tajo.worker.TaskHistory getTaskHistory(String taskAttemptId, long startTime) throws IOException {
  FileSystem fs = HistoryWriter.getNonCrcFileSystem(taskHistoryParentPath, tajoConf);

  SimpleDateFormat df = new SimpleDateFormat("yyyyMMddHH");

  Calendar cal = Calendar.getInstance();
  cal.setTime(new Date(startTime));

  //current, current-1, current+1 hour
  String[] targetHistoryFileDates = new String[3];
  targetHistoryFileDates[0] = df.format(cal.getTime());

  cal.add(Calendar.HOUR_OF_DAY, -1);
  targetHistoryFileDates[1] = df.format(cal.getTime());

  cal.setTime(new Date(startTime));
  cal.add(Calendar.HOUR_OF_DAY, 1);
  targetHistoryFileDates[2] = df.format(cal.getTime());

  for (String historyFileDate : targetHistoryFileDates) {
    Path fileParent = new Path(taskHistoryParentPath, historyFileDate.substring(0, 8) + "/tasks/" + processName);
    String hour = historyFileDate.substring(8, 10);

    if (!fs.exists(fileParent)) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Task history parent not exists:" + fileParent);
      }
      continue;
    }

    FileStatus[] files = fs.listStatus(fileParent);
    if (files == null || files.length == 0) {
      return null;
    }

    String filePrefix = processName + "_" + hour + "_";

    for (FileStatus eachFile : files) {
      if (eachFile.getPath().getName().indexOf(filePrefix) != 0) {
        continue;
      }

      FSDataInputStream in = null;
      TaskHistoryProto.Builder builder = TaskHistoryProto.newBuilder();
      try {
        FileStatus status = fs.getFileStatus(eachFile.getPath());
        LOG.info("Finding TaskHistory from " + status.getLen() + "," + eachFile.getPath());

        in = fs.open(eachFile.getPath());
        while (true) {
          int len = in.readInt();
          byte[] buf = new byte[len];
          in.readFully(buf, 0, len);

          builder.clear();
          TaskHistoryProto taskHistoryProto = builder.mergeFrom(buf).build();
          TaskAttemptId attemptId = new TaskAttemptId(taskHistoryProto.getTaskAttemptId());
          if (attemptId.toString().equals(taskAttemptId)) {
            return new org.apache.tajo.worker.TaskHistory(taskHistoryProto);
          }
        }
      } catch (EOFException e) {
      } finally {
        if (in != null) {
          in.close();
        }
      }
    }
  }
  return null;
}
 
源代码17 项目: incubator-retired-blur   文件: HdfsDirectory.java
private String readString(FSDataInputStream inputStream) throws IOException {
  int length = inputStream.readInt();
  byte[] buf = new byte[length];
  inputStream.readFully(buf);
  return new String(buf, UTF_8);
}