类org.apache.hadoop.io.EnumSetWritable源码实例Demo

下面列出了怎么用org.apache.hadoop.io.EnumSetWritable的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: DFSClient.java
/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, int buffersize,
    EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes)
    throws IOException {
  CreateFlag.validateForAppend(flag);
  try {
    LastBlockWithStatus blkWithStatus = namenode.append(src, clientName,
        new EnumSetWritable<>(flag, CreateFlag.class));
    return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize,
        progress, blkWithStatus.getLastBlock(),
        blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(),
        favoredNodes);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   SafeModeException.class,
                                   DSQuotaExceededException.class,
                                   UnsupportedOperationException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  }
}
 
源代码2 项目: hadoop   文件: PBHelper.java
public static int convertCreateFlag(EnumSetWritable<CreateFlag> flag) {
  int value = 0;
  if (flag.contains(CreateFlag.APPEND)) {
    value |= CreateFlagProto.APPEND.getNumber();
  }
  if (flag.contains(CreateFlag.CREATE)) {
    value |= CreateFlagProto.CREATE.getNumber();
  }
  if (flag.contains(CreateFlag.OVERWRITE)) {
    value |= CreateFlagProto.OVERWRITE.getNumber();
  }
  if (flag.contains(CreateFlag.LAZY_PERSIST)) {
    value |= CreateFlagProto.LAZY_PERSIST.getNumber();
  }
  if (flag.contains(CreateFlag.NEW_BLOCK)) {
    value |= CreateFlagProto.NEW_BLOCK.getNumber();
  }
  return value;
}
 
源代码3 项目: hadoop   文件: PBHelper.java
public static EnumSetWritable<CreateFlag> convertCreateFlag(int flag) {
  EnumSet<CreateFlag> result = 
     EnumSet.noneOf(CreateFlag.class);   
  if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) {
    result.add(CreateFlag.APPEND);
  }
  if ((flag & CreateFlagProto.CREATE_VALUE) == CreateFlagProto.CREATE_VALUE) {
    result.add(CreateFlag.CREATE);
  }
  if ((flag & CreateFlagProto.OVERWRITE_VALUE) 
      == CreateFlagProto.OVERWRITE_VALUE) {
    result.add(CreateFlag.OVERWRITE);
  }
  if ((flag & CreateFlagProto.LAZY_PERSIST_VALUE)
      == CreateFlagProto.LAZY_PERSIST_VALUE) {
    result.add(CreateFlag.LAZY_PERSIST);
  }
  if ((flag & CreateFlagProto.NEW_BLOCK_VALUE)
      == CreateFlagProto.NEW_BLOCK_VALUE) {
    result.add(CreateFlag.NEW_BLOCK);
  }
  return new EnumSetWritable<CreateFlag>(result, CreateFlag.class);
}
 
@Override
public AppendResponseProto append(RpcController controller,
    AppendRequestProto req) throws ServiceException {
  try {
    EnumSetWritable<CreateFlag> flags = req.hasFlag() ?
        PBHelper.convertCreateFlag(req.getFlag()) :
        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND));
    LastBlockWithStatus result = server.append(req.getSrc(),
        req.getClientName(), flags);
    AppendResponseProto.Builder builder = AppendResponseProto.newBuilder();
    if (result.getLastBlock() != null) {
      builder.setBlock(PBHelper.convert(result.getLastBlock()));
    }
    if (result.getFileStatus() != null) {
      builder.setStat(PBHelper.convert(result.getFileStatus()));
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
源代码6 项目: hadoop   文件: NNThroughputBenchmark.java
/**
 * Do file create.
 */
@Override
long executeOp(int daemonId, int inputIdx, String clientName) 
throws IOException {
  long start = Time.now();
  // dummyActionNoSynch(fileIdx);
  nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
                  clientName, new EnumSetWritable<CreateFlag>(EnumSet
          .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, 
      replication, BLOCK_SIZE, null);
  long end = Time.now();
  for(boolean written = !closeUponCreate; !written; 
    written = nameNodeProto.complete(fileNames[daemonId][inputIdx],
                                clientName, null, INodeId.GRANDFATHER_INODE_ID));
  return end-start;
}
 
源代码7 项目: hadoop   文件: TestNamenodeRetryCache.java
/**
 * Test for create file
 */
@Test
public void testCreate() throws Exception {
  String src = "/testNamenodeRetryCache/testCreate/file";
  // Two retried calls succeed
  newCall();
  HdfsFileStatus status = nnRpc.create(src, perm, "holder",
    new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
    (short) 1, BlockSize, null);
  Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
  Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
  
  // A non-retried call fails
  newCall();
  try {
    nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
    Assert.fail("testCreate - expected exception is not thrown");
  } catch (IOException e) {
    // expected
  }
}
 
源代码8 项目: hadoop   文件: TestAddBlockRetry.java
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations()
    throws Exception {
  final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  // create file
  nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
      (short) 3, 1024, null);
  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertTrue("Block locations should be present",
      lb1.getLocations().length > 0);

  cluster.restartNameNode();
  nameNodeRpc = cluster.getNameNodeRpc();
  LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
  assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
 
源代码9 项目: hadoop   文件: TestEncryptionZones.java
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
    CipherSuite suite, CryptoProtocolVersion version) throws Exception {
  Mockito.doReturn(
      new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
          (short) 777), "owner", "group", new byte[0], new byte[0],
          1010, 0, new FileEncryptionInfo(suite,
          version, new byte[suite.getAlgorithmBlockSize()],
          new byte[suite.getAlgorithmBlockSize()],
          "fakeKey", "fakeVersion"),
          (byte) 0))
      .when(mcp)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
 
源代码10 项目: big-c   文件: DFSClient.java
/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, int buffersize,
    EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes)
    throws IOException {
  CreateFlag.validateForAppend(flag);
  try {
    LastBlockWithStatus blkWithStatus = namenode.append(src, clientName,
        new EnumSetWritable<>(flag, CreateFlag.class));
    return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize,
        progress, blkWithStatus.getLastBlock(),
        blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(),
        favoredNodes);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   SafeModeException.class,
                                   DSQuotaExceededException.class,
                                   UnsupportedOperationException.class,
                                   UnresolvedPathException.class,
                                   SnapshotAccessControlException.class);
  }
}
 
源代码11 项目: big-c   文件: PBHelper.java
public static int convertCreateFlag(EnumSetWritable<CreateFlag> flag) {
  int value = 0;
  if (flag.contains(CreateFlag.APPEND)) {
    value |= CreateFlagProto.APPEND.getNumber();
  }
  if (flag.contains(CreateFlag.CREATE)) {
    value |= CreateFlagProto.CREATE.getNumber();
  }
  if (flag.contains(CreateFlag.OVERWRITE)) {
    value |= CreateFlagProto.OVERWRITE.getNumber();
  }
  if (flag.contains(CreateFlag.LAZY_PERSIST)) {
    value |= CreateFlagProto.LAZY_PERSIST.getNumber();
  }
  if (flag.contains(CreateFlag.NEW_BLOCK)) {
    value |= CreateFlagProto.NEW_BLOCK.getNumber();
  }
  return value;
}
 
源代码12 项目: big-c   文件: PBHelper.java
public static EnumSetWritable<CreateFlag> convertCreateFlag(int flag) {
  EnumSet<CreateFlag> result = 
     EnumSet.noneOf(CreateFlag.class);   
  if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) {
    result.add(CreateFlag.APPEND);
  }
  if ((flag & CreateFlagProto.CREATE_VALUE) == CreateFlagProto.CREATE_VALUE) {
    result.add(CreateFlag.CREATE);
  }
  if ((flag & CreateFlagProto.OVERWRITE_VALUE) 
      == CreateFlagProto.OVERWRITE_VALUE) {
    result.add(CreateFlag.OVERWRITE);
  }
  if ((flag & CreateFlagProto.LAZY_PERSIST_VALUE)
      == CreateFlagProto.LAZY_PERSIST_VALUE) {
    result.add(CreateFlag.LAZY_PERSIST);
  }
  if ((flag & CreateFlagProto.NEW_BLOCK_VALUE)
      == CreateFlagProto.NEW_BLOCK_VALUE) {
    result.add(CreateFlag.NEW_BLOCK);
  }
  return new EnumSetWritable<CreateFlag>(result, CreateFlag.class);
}
 
@Override
public AppendResponseProto append(RpcController controller,
    AppendRequestProto req) throws ServiceException {
  try {
    EnumSetWritable<CreateFlag> flags = req.hasFlag() ?
        PBHelper.convertCreateFlag(req.getFlag()) :
        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND));
    LastBlockWithStatus result = server.append(req.getSrc(),
        req.getClientName(), flags);
    AppendResponseProto.Builder builder = AppendResponseProto.newBuilder();
    if (result.getLastBlock() != null) {
      builder.setBlock(PBHelper.convert(result.getLastBlock()));
    }
    if (result.getFileStatus() != null) {
      builder.setStat(PBHelper.convert(result.getFileStatus()));
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
源代码14 项目: big-c   文件: ClientNamenodeProtocolTranslatorPB.java
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
源代码15 项目: big-c   文件: NNThroughputBenchmark.java
/**
 * Do file create.
 */
@Override
long executeOp(int daemonId, int inputIdx, String clientName) 
throws IOException {
  long start = Time.now();
  // dummyActionNoSynch(fileIdx);
  nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
                  clientName, new EnumSetWritable<CreateFlag>(EnumSet
          .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, 
      replication, BLOCK_SIZE, null);
  long end = Time.now();
  for(boolean written = !closeUponCreate; !written; 
    written = nameNodeProto.complete(fileNames[daemonId][inputIdx],
                                clientName, null, INodeId.GRANDFATHER_INODE_ID));
  return end-start;
}
 
源代码16 项目: big-c   文件: TestNamenodeRetryCache.java
/**
 * Test for create file
 */
@Test
public void testCreate() throws Exception {
  String src = "/testNamenodeRetryCache/testCreate/file";
  // Two retried calls succeed
  newCall();
  HdfsFileStatus status = nnRpc.create(src, perm, "holder",
    new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
    (short) 1, BlockSize, null);
  Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
  Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
  
  // A non-retried call fails
  newCall();
  try {
    nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
    Assert.fail("testCreate - expected exception is not thrown");
  } catch (IOException e) {
    // expected
  }
}
 
源代码17 项目: big-c   文件: TestAddBlockRetry.java
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations()
    throws Exception {
  final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  // create file
  nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
      (short) 3, 1024, null);
  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertTrue("Block locations should be present",
      lb1.getLocations().length > 0);

  cluster.restartNameNode();
  nameNodeRpc = cluster.getNameNodeRpc();
  LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
  assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
 
源代码18 项目: big-c   文件: TestEncryptionZones.java
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
    CipherSuite suite, CryptoProtocolVersion version) throws Exception {
  Mockito.doReturn(
      new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
          (short) 777), "owner", "group", new byte[0], new byte[0],
          1010, 0, new FileEncryptionInfo(suite,
          version, new byte[suite.getAlgorithmBlockSize()],
          new byte[suite.getAlgorithmBlockSize()],
          "fakeKey", "fakeVersion"),
          (byte) 0))
      .when(mcp)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
 
源代码19 项目: hadoop   文件: NameNodeRpcServer.java
@Override // ClientProtocol
public HdfsFileStatus create(String src, FsPermission masked,
    String clientName, EnumSetWritable<CreateFlag> flag,
    boolean createParent, short replication, long blockSize, 
    CryptoProtocolVersion[] supportedVersions)
    throws IOException {
  checkNNStartup();
  String clientMachine = getClientMachine();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*DIR* NameNode.create: file "
        +src+" for "+clientName+" at "+clientMachine);
  }
  if (!checkPathLength(src)) {
    throw new IOException("create: Pathname too long.  Limit "
        + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
  }

  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (HdfsFileStatus) cacheEntry.getPayload();
  }

  HdfsFileStatus status = null;
  try {
    PermissionStatus perm = new PermissionStatus(getRemoteUser()
        .getShortUserName(), null, masked);
    status = namesystem.startFile(src, perm, clientName, clientMachine,
        flag.get(), createParent, replication, blockSize, supportedVersions,
        cacheEntry != null);
  } finally {
    RetryCache.setState(cacheEntry, status != null, status);
  }

  metrics.incrFilesCreated();
  metrics.incrCreateFileOps();
  return status;
}
 
源代码20 项目: hadoop   文件: NameNodeRpcServer.java
@Override // ClientProtocol
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws IOException {
  checkNNStartup();
  String clientMachine = getClientMachine();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*DIR* NameNode.append: file "
        +src+" for "+clientName+" at "+clientMachine);
  }
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
      null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (LastBlockWithStatus) cacheEntry.getPayload();
  }

  LastBlockWithStatus info = null;
  boolean success = false;
  try {
    info = namesystem.appendFile(src, clientName, clientMachine, flag.get(),
        cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success, info);
  }
  metrics.incrFilesAppended();
  return info;
}
 
@Override
public HdfsFileStatus create(String src, FsPermission masked,
    String clientName, EnumSetWritable<CreateFlag> flag,
    boolean createParent, short replication, long blockSize, 
    CryptoProtocolVersion[] supportedVersions)
    throws AccessControlException, AlreadyBeingCreatedException,
    DSQuotaExceededException, FileAlreadyExistsException,
    FileNotFoundException, NSQuotaExceededException,
    ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
    IOException {
  CreateRequestProto.Builder builder = CreateRequestProto.newBuilder()
      .setSrc(src)
      .setMasked(PBHelper.convert(masked))
      .setClientName(clientName)
      .setCreateFlag(PBHelper.convertCreateFlag(flag))
      .setCreateParent(createParent)
      .setReplication(replication)
      .setBlockSize(blockSize);
  builder.addAllCryptoProtocolVersion(PBHelper.convert(supportedVersions));
  CreateRequestProto req = builder.build();
  try {
    CreateResponseProto res = rpcProxy.create(null, req);
    return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }

}
 
源代码22 项目: hadoop   文件: TestLease.java
@SuppressWarnings("unchecked")
@Test
public void testFactory() throws Exception {
  final String[] groups = new String[]{"supergroup"};
  final UserGroupInformation[] ugi = new UserGroupInformation[3];
  for(int i = 0; i < ugi.length; i++) {
    ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
  }

  Mockito.doReturn(
      new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
          (short) 777), "owner", "group", new byte[0], new byte[0],
          1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString());
  Mockito
      .doReturn(
          new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
              (short) 777), "owner", "group", new byte[0], new byte[0],
              1010, 0, null, (byte) 0))
      .when(mcp)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());

  final Configuration conf = new Configuration();
  final DFSClient c1 = createDFSClientAs(ugi[0], conf);
  FSDataOutputStream out1 = createFsOut(c1, "/out1");
  final DFSClient c2 = createDFSClientAs(ugi[0], conf);
  FSDataOutputStream out2 = createFsOut(c2, "/out2");
  Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
  final DFSClient c3 = createDFSClientAs(ugi[1], conf);
  FSDataOutputStream out3 = createFsOut(c3, "/out3");
  Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
  final DFSClient c4 = createDFSClientAs(ugi[1], conf);
  FSDataOutputStream out4 = createFsOut(c4, "/out4");
  Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
  final DFSClient c5 = createDFSClientAs(ugi[2], conf);
  FSDataOutputStream out5 = createFsOut(c5, "/out5");
  Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
  Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
 
源代码23 项目: hadoop   文件: TestRetryCacheWithHA.java
@Override
void invoke() throws Exception {
  EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.CREATE);
  this.status = client.getNamenode().create(fileName,
      FsPermission.getFileDefault(), client.getClientName(),
      new EnumSetWritable<CreateFlag>(createFlag), false, DataNodes,
      BlockSize,
      new CryptoProtocolVersion[] {CryptoProtocolVersion.ENCRYPTION_ZONES});
}
 
源代码24 项目: hadoop   文件: TestNamenodeRetryCache.java
/**
 * Test for rename1
 */
@Test
public void testAppend() throws Exception {
  String src = "/testNamenodeRetryCache/testAppend/src";
  resetCall();
  // Create a file with partial block
  DFSTestUtil.createFile(filesystem, new Path(src), 128, (short)1, 0L);
  
  // Retried append requests succeed
  newCall();
  LastBlockWithStatus b = nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
  Assert.assertEquals(b, nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
  Assert.assertEquals(b, nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
  
  // non-retried call fails
  newCall();
  try {
    nnRpc.append(src, "holder",
        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
    Assert.fail("testAppend - expected exception is not thrown");
  } catch (Exception e) {
    // Expected
  }
}
 
源代码25 项目: big-c   文件: NameNodeRpcServer.java
@Override // ClientProtocol
public HdfsFileStatus create(String src, FsPermission masked,
    String clientName, EnumSetWritable<CreateFlag> flag,
    boolean createParent, short replication, long blockSize, 
    CryptoProtocolVersion[] supportedVersions)
    throws IOException {
  checkNNStartup();
  String clientMachine = getClientMachine();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*DIR* NameNode.create: file "
        +src+" for "+clientName+" at "+clientMachine);
  }
  if (!checkPathLength(src)) {
    throw new IOException("create: Pathname too long.  Limit "
        + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
  }

  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (HdfsFileStatus) cacheEntry.getPayload();
  }

  HdfsFileStatus status = null;
  try {
    PermissionStatus perm = new PermissionStatus(getRemoteUser()
        .getShortUserName(), null, masked);
    status = namesystem.startFile(src, perm, clientName, clientMachine,
        flag.get(), createParent, replication, blockSize, supportedVersions,
        cacheEntry != null);
  } finally {
    RetryCache.setState(cacheEntry, status != null, status);
  }

  metrics.incrFilesCreated();
  metrics.incrCreateFileOps();
  return status;
}
 
源代码26 项目: big-c   文件: NameNodeRpcServer.java
@Override // ClientProtocol
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws IOException {
  checkNNStartup();
  String clientMachine = getClientMachine();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*DIR* NameNode.append: file "
        +src+" for "+clientName+" at "+clientMachine);
  }
  CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
      null);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return (LastBlockWithStatus) cacheEntry.getPayload();
  }

  LastBlockWithStatus info = null;
  boolean success = false;
  try {
    info = namesystem.appendFile(src, clientName, clientMachine, flag.get(),
        cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success, info);
  }
  metrics.incrFilesAppended();
  return info;
}
 
源代码27 项目: big-c   文件: ClientNamenodeProtocolTranslatorPB.java
@Override
public HdfsFileStatus create(String src, FsPermission masked,
    String clientName, EnumSetWritable<CreateFlag> flag,
    boolean createParent, short replication, long blockSize, 
    CryptoProtocolVersion[] supportedVersions)
    throws AccessControlException, AlreadyBeingCreatedException,
    DSQuotaExceededException, FileAlreadyExistsException,
    FileNotFoundException, NSQuotaExceededException,
    ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
    IOException {
  CreateRequestProto.Builder builder = CreateRequestProto.newBuilder()
      .setSrc(src)
      .setMasked(PBHelper.convert(masked))
      .setClientName(clientName)
      .setCreateFlag(PBHelper.convertCreateFlag(flag))
      .setCreateParent(createParent)
      .setReplication(replication)
      .setBlockSize(blockSize);
  builder.addAllCryptoProtocolVersion(PBHelper.convert(supportedVersions));
  CreateRequestProto req = builder.build();
  try {
    CreateResponseProto res = rpcProxy.create(null, req);
    return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }

}
 
源代码28 项目: big-c   文件: TestLease.java
@SuppressWarnings("unchecked")
@Test
public void testFactory() throws Exception {
  final String[] groups = new String[]{"supergroup"};
  final UserGroupInformation[] ugi = new UserGroupInformation[3];
  for(int i = 0; i < ugi.length; i++) {
    ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
  }

  Mockito.doReturn(
      new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
          (short) 777), "owner", "group", new byte[0], new byte[0],
          1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString());
  Mockito
      .doReturn(
          new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
              (short) 777), "owner", "group", new byte[0], new byte[0],
              1010, 0, null, (byte) 0))
      .when(mcp)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());

  final Configuration conf = new Configuration();
  final DFSClient c1 = createDFSClientAs(ugi[0], conf);
  FSDataOutputStream out1 = createFsOut(c1, "/out1");
  final DFSClient c2 = createDFSClientAs(ugi[0], conf);
  FSDataOutputStream out2 = createFsOut(c2, "/out2");
  Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
  final DFSClient c3 = createDFSClientAs(ugi[1], conf);
  FSDataOutputStream out3 = createFsOut(c3, "/out3");
  Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
  final DFSClient c4 = createDFSClientAs(ugi[1], conf);
  FSDataOutputStream out4 = createFsOut(c4, "/out4");
  Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
  final DFSClient c5 = createDFSClientAs(ugi[2], conf);
  FSDataOutputStream out5 = createFsOut(c5, "/out5");
  Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
  Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
 
源代码29 项目: big-c   文件: TestRetryCacheWithHA.java
@Override
void invoke() throws Exception {
  EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.CREATE);
  this.status = client.getNamenode().create(fileName,
      FsPermission.getFileDefault(), client.getClientName(),
      new EnumSetWritable<CreateFlag>(createFlag), false, DataNodes,
      BlockSize,
      new CryptoProtocolVersion[] {CryptoProtocolVersion.ENCRYPTION_ZONES});
}
 
源代码30 项目: big-c   文件: TestNamenodeRetryCache.java
/**
 * Test for rename1
 */
@Test
public void testAppend() throws Exception {
  String src = "/testNamenodeRetryCache/testAppend/src";
  resetCall();
  // Create a file with partial block
  DFSTestUtil.createFile(filesystem, new Path(src), 128, (short)1, 0L);
  
  // Retried append requests succeed
  newCall();
  LastBlockWithStatus b = nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
  Assert.assertEquals(b, nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
  Assert.assertEquals(b, nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
  
  // non-retried call fails
  newCall();
  try {
    nnRpc.append(src, "holder",
        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
    Assert.fail("testAppend - expected exception is not thrown");
  } catch (Exception e) {
    // Expected
  }
}
 
 类所在包
 类方法
 同包方法