类org.apache.hadoop.fs.UnresolvedLinkException源码实例Demo

下面列出了怎么用org.apache.hadoop.fs.UnresolvedLinkException的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: TestClientReportBadBlock.java
/**
 * DFS client read bytes starting from the specified position.
 */
private void dfsClientReadFileFromPosition(Path corruptedFile)
    throws UnresolvedLinkException, IOException {
  DFSInputStream in = dfs.dfs.open(corruptedFile.toUri().getPath());
  byte[] buf = new byte[buffersize];
  int startPosition = 2;
  int nRead = 0; // total number of bytes read
  try {
    do {
      nRead = in.read(startPosition, buf, 0, buf.length);
      startPosition += buf.length;
    } while (nRead > 0);
  } catch (BlockMissingException bme) {
    LOG.debug("DfsClientReadFile caught BlockMissingException.");
  }
}
 
@Override
public void rename2(String src, String dst, Rename... options)
    throws AccessControlException, DSQuotaExceededException,
    FileAlreadyExistsException, FileNotFoundException,
    NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
    UnresolvedLinkException, IOException {
  boolean overwrite = false;
  if (options != null) {
    for (Rename option : options) {
      if (option == Rename.OVERWRITE) {
        overwrite = true;
      }
    }
  }
  Rename2RequestProto req = Rename2RequestProto.newBuilder().
      setSrc(src).
      setDst(dst).setOverwriteDest(overwrite).
      build();
  try {
    rpcProxy.rename2(null, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }

}
 
@Override
public DirectoryListing getListing(String src, byte[] startAfter,
    boolean needLocation) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  GetListingRequestProto req = GetListingRequestProto.newBuilder()
      .setSrc(src)
      .setStartAfter(ByteString.copyFrom(startAfter))
      .setNeedLocation(needLocation).build();
  try {
    GetListingResponseProto result = rpcProxy.getListing(null, req);
    
    if (result.hasDirList()) {
      return PBHelper.convert(result.getDirList());
    }
    return null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
源代码4 项目: hadoop   文件: DFSClient.java
/**
 * Get block location info about file
 * 
 * getBlockLocations() returns a list of hostnames that store 
 * data for a specific file region.  It returns a set of hostnames
 * for every block within the indicated region.
 *
 * This function is very useful when writing code that considers
 * data-placement when performing operations.  For example, the
 * MapReduce system tries to schedule tasks on the same machines
 * as the data-block the task processes. 
 */
public BlockLocation[] getBlockLocations(String src, long start, 
      long length) throws IOException, UnresolvedLinkException {
  TraceScope scope = getPathTraceScope("getBlockLocations", src);
  try {
    LocatedBlocks blocks = getLocatedBlocks(src, start, length);
    BlockLocation[] locations =  DFSUtil.locatedBlocks2Locations(blocks);
    HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
    for (int i = 0; i < locations.length; i++) {
      hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
    }
    return hdfsLocations;
  } finally {
    scope.close();
  }
}
 
源代码5 项目: hadoop   文件: DFSClient.java
/**
 * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
 *  Progressable, int, ChecksumOpt)} except that the permission
 *  is absolute (ie has already been masked with umask.
 */
public DFSOutputStream primitiveCreate(String src, 
                           FsPermission absPermission,
                           EnumSet<CreateFlag> flag,
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt)
    throws IOException, UnresolvedLinkException {
  checkOpen();
  CreateFlag.validate(flag);
  DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
  if (result == null) {
    DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
    result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
        flag, createParent, replication, blockSize, progress, buffersize,
        checksum, null);
  }
  beginFileLease(result.getFileId(), result);
  return result;
}
 
@Override
public boolean mkdirs(String src, FsPermission masked, boolean createParent)
    throws AccessControlException, FileAlreadyExistsException,
    FileNotFoundException, NSQuotaExceededException,
    ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
    IOException {
  MkdirsRequestProto req = MkdirsRequestProto.newBuilder()
      .setSrc(src)
      .setMasked(PBHelper.convert(masked))
      .setCreateParent(createParent).build();

  try {
    return rpcProxy.mkdirs(null, req).getResult();
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
源代码7 项目: hadoop   文件: ViewFs.java
@Override
public FileStatus getFileStatus(final Path f) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res = 
    fsState.resolve(getUriPath(f), true);

  //  FileStatus#getPath is a fully qualified path relative to the root of 
  // target file system.
  // We need to change it to viewfs URI - relative to root of mount table.
  
  // The implementors of RawLocalFileSystem were trying to be very smart.
  // They implement FileStatus#getOwener lazily -- the object
  // returned is really a RawLocalFileSystem that expect the
  // FileStatus#getPath to be unchanged so that it can get owner when needed.
  // Hence we need to interpose a new ViewFsFileStatus that works around.
  
  
  FileStatus status =  res.targetFileSystem.getFileStatus(res.remainingPath);
  return new ViewFsFileStatus(status, this.makeQualified(f));
}
 
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
@Override
public void fsync(String src, long fileId, String client,
                  long lastBlockLength)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  FsyncRequestProto req = FsyncRequestProto.newBuilder().setSrc(src)
      .setClient(client).setLastBlockLength(lastBlockLength)
          .setFileId(fileId).build();
  try {
    rpcProxy.fsync(null, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
@Override
public void setPermission(String src, FsPermission permission)
    throws AccessControlException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  SetPermissionRequestProto req = SetPermissionRequestProto.newBuilder()
      .setSrc(src)
      .setPermission(PBHelper.convert(permission))
      .build();
  try {
    rpcProxy.setPermission(null, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
@Override
public boolean delete(String src, boolean recursive)
    throws AccessControlException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  DeleteRequestProto req = DeleteRequestProto.newBuilder().setSrc(src).setRecursive(recursive).build();
  try {
    return rpcProxy.delete(null, req).getResult();
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
源代码12 项目: hadoop   文件: ViewFs.java
@Override
public boolean truncate(final Path f, final long newLength)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res =
      fsState.resolve(getUriPath(f), true);
  return res.targetFileSystem.truncate(res.remainingPath, newLength);
}
 
源代码13 项目: hadoop   文件: ViewFs.java
@Override
public BlockLocation[] getFileBlockLocations(final Path f, final long start,
    final long len) throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res = 
    fsState.resolve(getUriPath(f), true);
  return
    res.targetFileSystem.getFileBlockLocations(res.remainingPath, start, len);
}
 
源代码14 项目: hadoop   文件: ViewFs.java
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res =
    fsState.resolve(getUriPath(path), true);
  res.targetFileSystem.access(res.remainingPath, mode);
}
 
源代码15 项目: hadoop   文件: TestFsck.java
public void removeBlocks(MiniDFSCluster cluster)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  for (int corruptIdx : blocksToCorrupt) {
    // Corrupt a block by deleting it
    ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
        name, blockSize * corruptIdx, Long.MAX_VALUE).get(0).getBlock();
    for (int i = 0; i < numDataNodes; i++) {
      File blockFile = cluster.getBlockFile(i, block);
      if(blockFile != null && blockFile.exists()) {
        assertTrue(blockFile.delete());
      }
    }
  }
}
 
@Override
public boolean isFileClosed(String src) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  IsFileClosedRequestProto req = IsFileClosedRequestProto.newBuilder()
      .setSrc(src).build();
  try {
    return rpcProxy.isFileClosed(null, req).getResult();
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
源代码17 项目: hadoop   文件: ViewFs.java
@Override
public Path resolvePath(final Path f) throws FileNotFoundException,
        AccessControlException, UnresolvedLinkException, IOException {
  final InodeTree.ResolveResult<AbstractFileSystem> res;
    res = fsState.resolve(getUriPath(f), true);
  if (res.isInternalDir()) {
    return f;
  }
  return res.targetFileSystem.resolvePath(res.remainingPath);

}
 
@Override
public HdfsFileStatus getFileLinkInfo(String src)
    throws AccessControlException, UnresolvedLinkException, IOException {
  GetFileLinkInfoRequestProto req = GetFileLinkInfoRequestProto.newBuilder()
      .setSrc(src).build();
  try {
    GetFileLinkInfoResponseProto result = rpcProxy.getFileLinkInfo(null, req);
    return result.hasFs() ?  
        PBHelper.convert(rpcProxy.getFileLinkInfo(null, req).getFs()) : null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
源代码19 项目: hadoop   文件: ViewFs.java
@Override
public void setPermission(final Path f, final FsPermission permission)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res = 
    fsState.resolve(getUriPath(f), true);
  res.targetFileSystem.setPermission(res.remainingPath, permission); 
  
}
 
@Override
public void concat(String trg, String[] srcs) throws IOException,
    UnresolvedLinkException {
  ConcatRequestProto req = ConcatRequestProto.newBuilder().
      setTrg(trg).
      addAllSrcs(Arrays.asList(srcs)).build();
  try {
    rpcProxy.concat(null, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
源代码21 项目: hadoop   文件: ViewFs.java
@Override
public boolean delete(final Path f, final boolean recursive)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res = 
    fsState.resolve(getUriPath(f), true);
  // If internal dir or target is a mount link (ie remainingPath is Slash)
  if (res.isInternalDir() || res.remainingPath == InodeTree.SlashPath) {
    throw new AccessControlException(
        "Cannot delete internal mount table directory: " + f);
  }
  return res.targetFileSystem.delete(res.remainingPath, recursive);
}
 
源代码22 项目: hadoop   文件: FSDirStatAndListingOp.java
/**
 * Currently we only support "ls /xxx/.snapshot" which will return all the
 * snapshots of a directory. The FSCommand Ls will first call getFileInfo to
 * make sure the file/directory exists (before the real getListing call).
 * Since we do not have a real INode for ".snapshot", we return an empty
 * non-null HdfsFileStatus here.
 */
private static HdfsFileStatus getFileInfo4DotSnapshot(
    FSDirectory fsd, String src)
    throws UnresolvedLinkException {
  if (fsd.getINode4DotSnapshot(src) != null) {
    return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
        HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
        BlockStoragePolicySuite.ID_UNSPECIFIED);
  }
  return null;
}
 
源代码23 项目: hadoop   文件: NamenodeFsck.java
boolean hdfsPathExists(String path)
    throws AccessControlException, UnresolvedLinkException, IOException {
  try {
    HdfsFileStatus hfs = namenode.getRpcServer().getFileInfo(path);
    return (hfs != null);
  } catch (FileNotFoundException e) {
    return false;
  }
}
 
源代码24 项目: hadoop   文件: FSDirSymlinkOp.java
static INodeSymlink unprotectedAddSymlink(FSDirectory fsd, INodesInPath iip,
    byte[] localName, long id, String target, long mtime, long atime,
    PermissionStatus perm)
    throws UnresolvedLinkException, QuotaExceededException {
  assert fsd.hasWriteLock();
  final INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime,
      target);
  symlink.setLocalName(localName);
  return fsd.addINode(iip, symlink) != null ? symlink : null;
}
 
源代码25 项目: hadoop   文件: FSDirectory.java
/**
 * Add the given filename to the fs.
 * @return the new INodesInPath instance that contains the new INode
 */
INodesInPath addFile(INodesInPath existing, String localName, PermissionStatus
    permissions, short replication, long preferredBlockSize,
    String clientName, String clientMachine)
  throws FileAlreadyExistsException, QuotaExceededException,
    UnresolvedLinkException, SnapshotAccessControlException, AclException {

  long modTime = now();
  INodeFile newNode = newINodeFile(allocateNewInodeId(), permissions, modTime,
      modTime, replication, preferredBlockSize);
  newNode.setLocalName(localName.getBytes(Charsets.UTF_8));
  newNode.toUnderConstruction(clientName, clientMachine);

  INodesInPath newiip;
  writeLock();
  try {
    newiip = addINode(existing, newNode);
  } finally {
    writeUnlock();
  }
  if (newiip == null) {
    NameNode.stateChangeLog.info("DIR* addFile: failed to add " +
        existing.getPath() + "/" + localName);
    return null;
  }

  if(NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* addFile: " + localName + " is added");
  }
  return newiip;
}
 
源代码26 项目: hadoop   文件: FSDirectory.java
/**
 * Check whether the path specifies a directory
 */
boolean isDir(String src) throws UnresolvedLinkException {
  src = normalizePath(src);
  readLock();
  try {
    INode node = getINode(src, false);
    return node != null && node.isDirectory();
  } finally {
    readUnlock();
  }
}
 
源代码27 项目: hadoop   文件: FSDirectory.java
/** Updates namespace, storagespace and typespaces consumed for all
 * directories until the parent directory of file represented by path.
 *
 * @param iip the INodesInPath instance containing all the INodes for
 *            updating quota usage
 * @param nsDelta the delta change of namespace
 * @param ssDelta the delta change of storage space consumed without replication
 * @param replication the replication factor of the block consumption change
 * @throws QuotaExceededException if the new count violates any quota limit
 * @throws FileNotFoundException if path does not exist.
 */
void updateSpaceConsumed(INodesInPath iip, long nsDelta, long ssDelta, short replication)
  throws QuotaExceededException, FileNotFoundException,
  UnresolvedLinkException, SnapshotAccessControlException {
  writeLock();
  try {
    if (iip.getLastINode() == null) {
      throw new FileNotFoundException("Path not found: " + iip.getPath());
    }
    updateCount(iip, nsDelta, ssDelta, replication, true);
  } finally {
    writeUnlock();
  }
}
 
源代码28 项目: hadoop   文件: FSDirectory.java
/**
 * Add the given child to the namespace.
 * @param existing the INodesInPath containing all the ancestral INodes
 * @param child the new INode to add
 * @return a new INodesInPath instance containing the new child INode. Null
 * if the adding fails.
 * @throws QuotaExceededException is thrown if it violates quota limit
 */
INodesInPath addINode(INodesInPath existing, INode child)
    throws QuotaExceededException, UnresolvedLinkException {
  cacheName(child);
  writeLock();
  try {
    return addLastINode(existing, child, true);
  } finally {
    writeUnlock();
  }
}
 
源代码29 项目: hadoop   文件: FSDirectory.java
/**
 * FSEditLogLoader implementation.
 * Unlike FSNamesystem.truncate, this will not schedule block recovery.
 */
void unprotectedTruncate(String src, String clientName, String clientMachine,
                         long newLength, long mtime, Block truncateBlock)
    throws UnresolvedLinkException, QuotaExceededException,
    SnapshotAccessControlException, IOException {
  INodesInPath iip = getINodesInPath(src, true);
  INodeFile file = iip.getLastINode().asFile();
  BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
  boolean onBlockBoundary =
      unprotectedTruncate(iip, newLength, collectedBlocks, mtime, null);

  if(! onBlockBoundary) {
    BlockInfoContiguous oldBlock = file.getLastBlock();
    Block tBlk =
    getFSNamesystem().prepareFileForTruncate(iip,
        clientName, clientMachine, file.computeFileSize() - newLength,
        truncateBlock);
    assert Block.matchingIdAndGenStamp(tBlk, truncateBlock) &&
        tBlk.getNumBytes() == truncateBlock.getNumBytes() :
        "Should be the same block.";
    if(oldBlock.getBlockId() != tBlk.getBlockId() &&
       !file.isBlockInLatestSnapshot(oldBlock)) {
      getBlockManager().removeBlockFromMap(oldBlock);
    }
  }
  assert onBlockBoundary == (truncateBlock == null) :
    "truncateBlock is null iff on block boundary: " + truncateBlock;
  getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}
 
源代码30 项目: hadoop   文件: FSDirectory.java
boolean isInAnEZ(INodesInPath iip)
    throws UnresolvedLinkException, SnapshotAccessControlException {
  readLock();
  try {
    return ezManager.isInAnEZ(iip);
  } finally {
    readUnlock();
  }
}
 
 类所在包
 同包方法