org.apache.hadoop.fs.VolumeId#org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException源码实例Demo

下面列出了org.apache.hadoop.fs.VolumeId#org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: DFSInputStream.java
/**
 * Should the block access token be refetched on an exception
 * 
 * @param ex Exception received
 * @param targetAddr Target datanode address from where exception was received
 * @return true if block access token has expired or invalid and it should be
 *         refetched
 */
private static boolean tokenRefetchNeeded(IOException ex,
    InetSocketAddress targetAddr) {
  /*
   * Get a new access token and retry. Retry is needed in 2 cases. 1)
   * When both NN and DN re-started while DFSClient holding a cached
   * access token. 2) In the case that NN fails to update its
   * access key at pre-set interval (by a wide margin) and
   * subsequently restarts. In this case, DN re-registers itself with
   * NN and receives a new access key, but DN will delete the old
   * access key from its memory since it's considered expired based on
   * the estimated expiration date.
   */
  if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
    DFSClient.LOG.info("Access token was invalid when connecting to "
        + targetAddr + " : " + ex);
    return true;
  }
  return false;
}
 
源代码2 项目: hadoop   文件: DataTransferProtoUtil.java
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
 
源代码3 项目: big-c   文件: DFSInputStream.java
/**
 * Should the block access token be refetched on an exception
 * 
 * @param ex Exception received
 * @param targetAddr Target datanode address from where exception was received
 * @return true if block access token has expired or invalid and it should be
 *         refetched
 */
private static boolean tokenRefetchNeeded(IOException ex,
    InetSocketAddress targetAddr) {
  /*
   * Get a new access token and retry. Retry is needed in 2 cases. 1)
   * When both NN and DN re-started while DFSClient holding a cached
   * access token. 2) In the case that NN fails to update its
   * access key at pre-set interval (by a wide margin) and
   * subsequently restarts. In this case, DN re-registers itself with
   * NN and receives a new access key, but DN will delete the old
   * access key from its memory since it's considered expired based on
   * the estimated expiration date.
   */
  if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
    DFSClient.LOG.info("Access token was invalid when connecting to "
        + targetAddr + " : " + ex);
    return true;
  }
  return false;
}
 
源代码4 项目: big-c   文件: DataTransferProtoUtil.java
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
 
源代码5 项目: hadoop   文件: DFSClient.java
/**
 * Get block location information about a list of {@link HdfsBlockLocation}.
 * Used by {@link DistributedFileSystem#getFileBlockStorageLocations(List)} to
 * get {@link BlockStorageLocation}s for blocks returned by
 * {@link DistributedFileSystem#getFileBlockLocations(org.apache.hadoop.fs.FileStatus, long, long)}
 * .
 * 
 * This is done by making a round of RPCs to the associated datanodes, asking
 * the volume of each block replica. The returned array of
 * {@link BlockStorageLocation} expose this information as a
 * {@link VolumeId}.
 * 
 * @param blockLocations
 *          target blocks on which to query volume location information
 * @return volumeBlockLocations original block array augmented with additional
 *         volume location information for each replica.
 */
public BlockStorageLocation[] getBlockStorageLocations(
    List<BlockLocation> blockLocations) throws IOException,
    UnsupportedOperationException, InvalidBlockTokenException {
  if (!getConf().getHdfsBlocksMetadataEnabled) {
    throw new UnsupportedOperationException("Datanode-side support for " +
        "getVolumeBlockLocations() must also be enabled in the client " +
        "configuration.");
  }
  // Downcast blockLocations and fetch out required LocatedBlock(s)
  List<LocatedBlock> blocks = new ArrayList<LocatedBlock>();
  for (BlockLocation loc : blockLocations) {
    if (!(loc instanceof HdfsBlockLocation)) {
      throw new ClassCastException("DFSClient#getVolumeBlockLocations " +
          "expected to be passed HdfsBlockLocations");
    }
    HdfsBlockLocation hdfsLoc = (HdfsBlockLocation) loc;
    blocks.add(hdfsLoc.getLocatedBlock());
  }
  
  // Re-group the LocatedBlocks to be grouped by datanodes, with the values
  // a list of the LocatedBlocks on the datanode.
  Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks = 
      new LinkedHashMap<DatanodeInfo, List<LocatedBlock>>();
  for (LocatedBlock b : blocks) {
    for (DatanodeInfo info : b.getLocations()) {
      if (!datanodeBlocks.containsKey(info)) {
        datanodeBlocks.put(info, new ArrayList<LocatedBlock>());
      }
      List<LocatedBlock> l = datanodeBlocks.get(info);
      l.add(b);
    }
  }
      
  // Make RPCs to the datanodes to get volume locations for its replicas
  TraceScope scope =
    Trace.startSpan("getBlockStorageLocations", traceSampler);
  Map<DatanodeInfo, HdfsBlocksMetadata> metadatas;
  try {
    metadatas = BlockStorageLocationUtil.
        queryDatanodesForHdfsBlocksMetadata(conf, datanodeBlocks,
            getConf().getFileBlockStorageLocationsNumThreads,
            getConf().getFileBlockStorageLocationsTimeoutMs,
            getConf().connectToDnViaHostname);
    if (LOG.isTraceEnabled()) {
      LOG.trace("metadata returned: "
          + Joiner.on("\n").withKeyValueSeparator("=").join(metadatas));
    }
  } finally {
    scope.close();
  }
  
  // Regroup the returned VolumeId metadata to again be grouped by
  // LocatedBlock rather than by datanode
  Map<LocatedBlock, List<VolumeId>> blockVolumeIds = BlockStorageLocationUtil
      .associateVolumeIdsWithBlocks(blocks, metadatas);
  
  // Combine original BlockLocations with new VolumeId information
  BlockStorageLocation[] volumeBlockLocations = BlockStorageLocationUtil
      .convertToVolumeBlockLocations(blocks, blockVolumeIds);

  return volumeBlockLocations;
}
 
源代码6 项目: hadoop   文件: TestBlockTokenWithDFS.java
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
 
源代码7 项目: big-c   文件: DFSClient.java
/**
 * Get block location information about a list of {@link HdfsBlockLocation}.
 * Used by {@link DistributedFileSystem#getFileBlockStorageLocations(List)} to
 * get {@link BlockStorageLocation}s for blocks returned by
 * {@link DistributedFileSystem#getFileBlockLocations(org.apache.hadoop.fs.FileStatus, long, long)}
 * .
 * 
 * This is done by making a round of RPCs to the associated datanodes, asking
 * the volume of each block replica. The returned array of
 * {@link BlockStorageLocation} expose this information as a
 * {@link VolumeId}.
 * 
 * @param blockLocations
 *          target blocks on which to query volume location information
 * @return volumeBlockLocations original block array augmented with additional
 *         volume location information for each replica.
 */
public BlockStorageLocation[] getBlockStorageLocations(
    List<BlockLocation> blockLocations) throws IOException,
    UnsupportedOperationException, InvalidBlockTokenException {
  if (!getConf().getHdfsBlocksMetadataEnabled) {
    throw new UnsupportedOperationException("Datanode-side support for " +
        "getVolumeBlockLocations() must also be enabled in the client " +
        "configuration.");
  }
  // Downcast blockLocations and fetch out required LocatedBlock(s)
  List<LocatedBlock> blocks = new ArrayList<LocatedBlock>();
  for (BlockLocation loc : blockLocations) {
    if (!(loc instanceof HdfsBlockLocation)) {
      throw new ClassCastException("DFSClient#getVolumeBlockLocations " +
          "expected to be passed HdfsBlockLocations");
    }
    HdfsBlockLocation hdfsLoc = (HdfsBlockLocation) loc;
    blocks.add(hdfsLoc.getLocatedBlock());
  }
  
  // Re-group the LocatedBlocks to be grouped by datanodes, with the values
  // a list of the LocatedBlocks on the datanode.
  Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks = 
      new LinkedHashMap<DatanodeInfo, List<LocatedBlock>>();
  for (LocatedBlock b : blocks) {
    for (DatanodeInfo info : b.getLocations()) {
      if (!datanodeBlocks.containsKey(info)) {
        datanodeBlocks.put(info, new ArrayList<LocatedBlock>());
      }
      List<LocatedBlock> l = datanodeBlocks.get(info);
      l.add(b);
    }
  }
      
  // Make RPCs to the datanodes to get volume locations for its replicas
  TraceScope scope =
    Trace.startSpan("getBlockStorageLocations", traceSampler);
  Map<DatanodeInfo, HdfsBlocksMetadata> metadatas;
  try {
    metadatas = BlockStorageLocationUtil.
        queryDatanodesForHdfsBlocksMetadata(conf, datanodeBlocks,
            getConf().getFileBlockStorageLocationsNumThreads,
            getConf().getFileBlockStorageLocationsTimeoutMs,
            getConf().connectToDnViaHostname);
    if (LOG.isTraceEnabled()) {
      LOG.trace("metadata returned: "
          + Joiner.on("\n").withKeyValueSeparator("=").join(metadatas));
    }
  } finally {
    scope.close();
  }
  
  // Regroup the returned VolumeId metadata to again be grouped by
  // LocatedBlock rather than by datanode
  Map<LocatedBlock, List<VolumeId>> blockVolumeIds = BlockStorageLocationUtil
      .associateVolumeIdsWithBlocks(blocks, metadatas);
  
  // Combine original BlockLocations with new VolumeId information
  BlockStorageLocation[] volumeBlockLocations = BlockStorageLocationUtil
      .convertToVolumeBlockLocations(blocks, blockVolumeIds);

  return volumeBlockLocations;
}
 
源代码8 项目: big-c   文件: TestBlockTokenWithDFS.java
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
 
源代码9 项目: hadoop   文件: BlockReaderFactory.java
/**
 * Determine if an exception is security-related.
 *
 * We need to handle these exceptions differently than other IOExceptions.
 * They don't indicate a communication problem.  Instead, they mean that there
 * is some action the client needs to take, such as refetching block tokens,
 * renewing encryption keys, etc.
 *
 * @param ioe    The exception
 * @return       True only if the exception is security-related.
 */
private static boolean isSecurityException(IOException ioe) {
  return (ioe instanceof InvalidToken) ||
          (ioe instanceof InvalidEncryptionKeyException) ||
          (ioe instanceof InvalidBlockTokenException) ||
          (ioe instanceof AccessControlException);
}
 
源代码10 项目: hadoop   文件: DistributedFileSystem.java
/**
 * Used to query storage location information for a list of blocks. This list
 * of blocks is normally constructed via a series of calls to
 * {@link DistributedFileSystem#getFileBlockLocations(Path, long, long)} to
 * get the blocks for ranges of a file.
 * 
 * The returned array of {@link BlockStorageLocation} augments
 * {@link BlockLocation} with a {@link VolumeId} per block replica. The
 * VolumeId specifies the volume on the datanode on which the replica resides.
 * The VolumeId associated with a replica may be null because volume
 * information can be unavailable if the corresponding datanode is down or
 * if the requested block is not found.
 * 
 * This API is unstable, and datanode-side support is disabled by default. It
 * can be enabled by setting "dfs.datanode.hdfs-blocks-metadata.enabled" to
 * true.
 * 
 * @param blocks
 *          List of target BlockLocations to query volume location information
 * @return volumeBlockLocations Augmented array of
 *         {@link BlockStorageLocation}s containing additional volume location
 *         information for each replica of each block.
 */
@InterfaceStability.Unstable
public BlockStorageLocation[] getFileBlockStorageLocations(
    List<BlockLocation> blocks) throws IOException, 
    UnsupportedOperationException, InvalidBlockTokenException {
  return dfs.getBlockStorageLocations(blocks);
}
 
源代码11 项目: big-c   文件: BlockReaderFactory.java
/**
 * Determine if an exception is security-related.
 *
 * We need to handle these exceptions differently than other IOExceptions.
 * They don't indicate a communication problem.  Instead, they mean that there
 * is some action the client needs to take, such as refetching block tokens,
 * renewing encryption keys, etc.
 *
 * @param ioe    The exception
 * @return       True only if the exception is security-related.
 */
private static boolean isSecurityException(IOException ioe) {
  return (ioe instanceof InvalidToken) ||
          (ioe instanceof InvalidEncryptionKeyException) ||
          (ioe instanceof InvalidBlockTokenException) ||
          (ioe instanceof AccessControlException);
}
 
源代码12 项目: big-c   文件: DistributedFileSystem.java
/**
 * Used to query storage location information for a list of blocks. This list
 * of blocks is normally constructed via a series of calls to
 * {@link DistributedFileSystem#getFileBlockLocations(Path, long, long)} to
 * get the blocks for ranges of a file.
 * 
 * The returned array of {@link BlockStorageLocation} augments
 * {@link BlockLocation} with a {@link VolumeId} per block replica. The
 * VolumeId specifies the volume on the datanode on which the replica resides.
 * The VolumeId associated with a replica may be null because volume
 * information can be unavailable if the corresponding datanode is down or
 * if the requested block is not found.
 * 
 * This API is unstable, and datanode-side support is disabled by default. It
 * can be enabled by setting "dfs.datanode.hdfs-blocks-metadata.enabled" to
 * true.
 * 
 * @param blocks
 *          List of target BlockLocations to query volume location information
 * @return volumeBlockLocations Augmented array of
 *         {@link BlockStorageLocation}s containing additional volume location
 *         information for each replica of each block.
 */
@InterfaceStability.Unstable
public BlockStorageLocation[] getFileBlockStorageLocations(
    List<BlockLocation> blocks) throws IOException, 
    UnsupportedOperationException, InvalidBlockTokenException {
  return dfs.getBlockStorageLocations(blocks);
}