org.apache.hadoop.fs.FileUtil#replaceFile ( )源码实例Demo

下面列出了org.apache.hadoop.fs.FileUtil#replaceFile ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: Journal.java
/**
 * In the case the node crashes in between downloading a log segment
 * and persisting the associated paxos recovery data, the log segment
 * will be left in its temporary location on disk. Given the paxos data,
 * we can check if this was indeed the case, and "roll forward"
 * the atomic operation.
 * 
 * See the inline comments in
 * {@link #acceptRecovery(RequestInfo, SegmentStateProto, URL)} for more
 * details.
 *
 * @throws IOException if the temporary file is unable to be renamed into
 * place
 */
private void completeHalfDoneAcceptRecovery(
    PersistedRecoveryPaxosData paxosData) throws IOException {
  if (paxosData == null) {
    return;
  }

  long segmentId = paxosData.getSegmentState().getStartTxId();
  long epoch = paxosData.getAcceptedInEpoch();
  
  File tmp = storage.getSyncLogTemporaryFile(segmentId, epoch);
  
  if (tmp.exists()) {
    File dst = storage.getInProgressEditLog(segmentId);
    LOG.info("Rolling forward previously half-completed synchronization: " +
        tmp + " -> " + dst);
    FileUtil.replaceFile(tmp, dst);
  }
}
 
源代码2 项目: big-c   文件: Journal.java
/**
 * In the case the node crashes in between downloading a log segment
 * and persisting the associated paxos recovery data, the log segment
 * will be left in its temporary location on disk. Given the paxos data,
 * we can check if this was indeed the case, and "roll forward"
 * the atomic operation.
 * 
 * See the inline comments in
 * {@link #acceptRecovery(RequestInfo, SegmentStateProto, URL)} for more
 * details.
 *
 * @throws IOException if the temporary file is unable to be renamed into
 * place
 */
private void completeHalfDoneAcceptRecovery(
    PersistedRecoveryPaxosData paxosData) throws IOException {
  if (paxosData == null) {
    return;
  }

  long segmentId = paxosData.getSegmentState().getStartTxId();
  long epoch = paxosData.getAcceptedInEpoch();
  
  File tmp = storage.getSyncLogTemporaryFile(segmentId, epoch);
  
  if (tmp.exists()) {
    File dst = storage.getInProgressEditLog(segmentId);
    LOG.info("Rolling forward previously half-completed synchronization: " +
        tmp + " -> " + dst);
    FileUtil.replaceFile(tmp, dst);
  }
}
 
源代码3 项目: RDFS   文件: DatanodeBlockInfo.java
/**
 * Copy specified file into a temporary file. Then rename the
 * temporary file to the original name. This will cause any
 * hardlinks to the original file to be removed. The temporary
 * files are created in the detachDir. The temporary files will
 * be recovered (especially on Windows) on datanode restart.
 */
private void detachFile(int namespaceId, File file, Block b) throws IOException {
  File tmpFile = volume.createDetachFile(namespaceId, b, file.getName());
  try {
    IOUtils.copyBytes(new FileInputStream(file),
                      new FileOutputStream(tmpFile),
                      16*1024, true);
    if (file.length() != tmpFile.length()) {
      throw new IOException("Copy of file " + file + " size " + file.length()+
                            " into file " + tmpFile +
                            " resulted in a size of " + tmpFile.length());
    }
    FileUtil.replaceFile(tmpFile, file);
  } catch (IOException e) {
    boolean done = tmpFile.delete();
    if (!done) {
      DataNode.LOG.info("detachFile failed to delete temporary file " +
                        tmpFile);
    }
    throw e;
  }
}
 
源代码4 项目: hadoop-gpu   文件: DatanodeBlockInfo.java
/**
 * Copy specified file into a temporary file. Then rename the
 * temporary file to the original name. This will cause any
 * hardlinks to the original file to be removed. The temporary
 * files are created in the detachDir. The temporary files will
 * be recovered (especially on Windows) on datanode restart.
 */
private void detachFile(File file, Block b) throws IOException {
  File tmpFile = volume.createDetachFile(b, file.getName());
  try {
    IOUtils.copyBytes(new FileInputStream(file),
                      new FileOutputStream(tmpFile),
                      16*1024, true);
    if (file.length() != tmpFile.length()) {
      throw new IOException("Copy of file " + file + " size " + file.length()+
                            " into file " + tmpFile +
                            " resulted in a size of " + tmpFile.length());
    }
    FileUtil.replaceFile(tmpFile, file);
  } catch (IOException e) {
    boolean done = tmpFile.delete();
    if (!done) {
      DataNode.LOG.info("detachFile failed to delete temporary file " +
                        tmpFile);
    }
    throw e;
  }
}
 
源代码5 项目: hadoop   文件: ReplicaInfo.java
/**
 * Copy specified file into a temporary file. Then rename the
 * temporary file to the original name. This will cause any
 * hardlinks to the original file to be removed. The temporary
 * files are created in the same directory. The temporary files will
 * be recovered (especially on Windows) on datanode restart.
 */
private void unlinkFile(File file, Block b) throws IOException {
  File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
  try {
    FileInputStream in = new FileInputStream(file);
    try {
      FileOutputStream out = new FileOutputStream(tmpFile);
      try {
        IOUtils.copyBytes(in, out, 16*1024);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
    if (file.length() != tmpFile.length()) {
      throw new IOException("Copy of file " + file + " size " + file.length()+
                            " into file " + tmpFile +
                            " resulted in a size of " + tmpFile.length());
    }
    FileUtil.replaceFile(tmpFile, file);
  } catch (IOException e) {
    boolean done = tmpFile.delete();
    if (!done) {
      DataNode.LOG.info("detachFile failed to delete temporary file " +
                        tmpFile);
    }
    throw e;
  }
}
 
源代码6 项目: big-c   文件: ReplicaInfo.java
/**
 * Copy specified file into a temporary file. Then rename the
 * temporary file to the original name. This will cause any
 * hardlinks to the original file to be removed. The temporary
 * files are created in the same directory. The temporary files will
 * be recovered (especially on Windows) on datanode restart.
 */
private void unlinkFile(File file, Block b) throws IOException {
  File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
  try {
    FileInputStream in = new FileInputStream(file);
    try {
      FileOutputStream out = new FileOutputStream(tmpFile);
      try {
        IOUtils.copyBytes(in, out, 16*1024);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
    if (file.length() != tmpFile.length()) {
      throw new IOException("Copy of file " + file + " size " + file.length()+
                            " into file " + tmpFile +
                            " resulted in a size of " + tmpFile.length());
    }
    FileUtil.replaceFile(tmpFile, file);
  } catch (IOException e) {
    boolean done = tmpFile.delete();
    if (!done) {
      DataNode.LOG.info("detachFile failed to delete temporary file " +
                        tmpFile);
    }
    throw e;
  }
}