org.apache.hadoop.fs.InvalidRequestException#org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry源码实例Demo

下面列出了org.apache.hadoop.fs.InvalidRequestException#org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

@Override
public ListCacheDirectivesResponseProto listCacheDirectives(
    RpcController controller, ListCacheDirectivesRequestProto request)
        throws ServiceException {
  try {
    CacheDirectiveInfo filter =
        PBHelper.convert(request.getFilter());
    BatchedEntries<CacheDirectiveEntry> entries =
      server.listCacheDirectives(request.getPrevId(), filter);
    ListCacheDirectivesResponseProto.Builder builder =
        ListCacheDirectivesResponseProto.newBuilder();
    builder.setHasMore(entries.hasMore());
    for (int i=0, n=entries.size(); i<n; i++) {
      builder.addElements(PBHelper.convert(entries.get(i)));
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
@Override
public BatchedEntries<CacheDirectiveEntry>
    listCacheDirectives(long prevId,
        CacheDirectiveInfo filter) throws IOException {
  if (filter == null) {
    filter = new CacheDirectiveInfo.Builder().build();
  }
  try {
    return new BatchedCacheEntries(
      rpcProxy.listCacheDirectives(null,
        ListCacheDirectivesRequestProto.newBuilder().
          setPrevId(prevId).
          setFilter(PBHelper.convert(filter)).
          build()));
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
源代码3 项目: hadoop   文件: TestRetryCacheWithHA.java
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(
            new CacheDirectiveInfo.Builder().
                setPool(directive.getPool()).
                setPath(directive.getPath()).
                build());
    if (iter.hasNext()) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
源代码4 项目: hadoop   文件: TestRetryCacheWithHA.java
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(
            new CacheDirectiveInfo.Builder().
                setPool(directive.getPool()).
                setPath(directive.getPath()).
                build());
    while (iter.hasNext()) {
      CacheDirectiveInfo result = iter.next().getInfo();
      if ((result.getId() == id) &&
          (result.getReplication().shortValue() == newReplication)) {
        return true;
      }
    }
    Thread.sleep(1000);
  }
  return false;
}
 
源代码5 项目: hadoop   文件: TestRetryCacheWithHA.java
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(
            new CacheDirectiveInfo.Builder().
              setPool(directive.getPool()).
              setPath(directive.getPath()).
              build());
    if (!iter.hasNext()) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
源代码6 项目: hadoop   文件: TestRetryCacheWithHA.java
@SuppressWarnings("unchecked")
private void listCacheDirectives(
    HashSet<String> poolNames, int active) throws Exception {
  HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
  RemoteIterator<CacheDirectiveEntry> directives = dfs.listCacheDirectives(null);
  int poolCount = poolNames.size();
  for (int i=0; i<poolCount; i++) {
    CacheDirectiveEntry directive = directives.next();
    String pollName = directive.getInfo().getPool();
    assertTrue("The pool name should be expected", tmpNames.remove(pollName));
    if (i % 2 == 0) {
      int standby = active;
      active = (standby == 0) ? 1 : 0;
      cluster.transitionToStandby(standby);
      cluster.transitionToActive(active);
      cluster.waitActive(active);
    }
  }
  assertTrue("All pools must be found", tmpNames.isEmpty());
}
 
源代码7 项目: nnproxy   文件: CacheRegistry.java
List<CacheDirectiveEntry> getAllCacheDirectives(UpstreamManager.Upstream upstream) throws IOException {
    CacheDirectiveInfo filter = new CacheDirectiveInfo.Builder().build();
    List<CacheDirectiveEntry> directives = new ArrayList<>();
    long prevId = -1;
    while (true) {
        BatchedRemoteIterator.BatchedEntries<CacheDirectiveEntry> it =
                upstream.protocol.listCacheDirectives(prevId, filter);
        if (it.size() == 0) {
            break;
        }
        for (int i = 0; i < it.size(); i++) {
            CacheDirectiveEntry entry = it.get(i);
            prevId = entry.getInfo().getId();
            directives.add(entry);
        }
    }
    return directives;
}
 
@Override
public ListCacheDirectivesResponseProto listCacheDirectives(
    RpcController controller, ListCacheDirectivesRequestProto request)
        throws ServiceException {
  try {
    CacheDirectiveInfo filter =
        PBHelper.convert(request.getFilter());
    BatchedEntries<CacheDirectiveEntry> entries =
      server.listCacheDirectives(request.getPrevId(), filter);
    ListCacheDirectivesResponseProto.Builder builder =
        ListCacheDirectivesResponseProto.newBuilder();
    builder.setHasMore(entries.hasMore());
    for (int i=0, n=entries.size(); i<n; i++) {
      builder.addElements(PBHelper.convert(entries.get(i)));
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
@Override
public BatchedEntries<CacheDirectiveEntry>
    listCacheDirectives(long prevId,
        CacheDirectiveInfo filter) throws IOException {
  if (filter == null) {
    filter = new CacheDirectiveInfo.Builder().build();
  }
  try {
    return new BatchedCacheEntries(
      rpcProxy.listCacheDirectives(null,
        ListCacheDirectivesRequestProto.newBuilder().
          setPrevId(prevId).
          setFilter(PBHelper.convert(filter)).
          build()));
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
源代码10 项目: big-c   文件: TestRetryCacheWithHA.java
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(
            new CacheDirectiveInfo.Builder().
                setPool(directive.getPool()).
                setPath(directive.getPath()).
                build());
    if (iter.hasNext()) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
源代码11 项目: big-c   文件: TestRetryCacheWithHA.java
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(
            new CacheDirectiveInfo.Builder().
                setPool(directive.getPool()).
                setPath(directive.getPath()).
                build());
    while (iter.hasNext()) {
      CacheDirectiveInfo result = iter.next().getInfo();
      if ((result.getId() == id) &&
          (result.getReplication().shortValue() == newReplication)) {
        return true;
      }
    }
    Thread.sleep(1000);
  }
  return false;
}
 
源代码12 项目: big-c   文件: TestRetryCacheWithHA.java
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(
            new CacheDirectiveInfo.Builder().
              setPool(directive.getPool()).
              setPath(directive.getPath()).
              build());
    if (!iter.hasNext()) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
源代码13 项目: big-c   文件: TestRetryCacheWithHA.java
@SuppressWarnings("unchecked")
private void listCacheDirectives(
    HashSet<String> poolNames, int active) throws Exception {
  HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
  RemoteIterator<CacheDirectiveEntry> directives = dfs.listCacheDirectives(null);
  int poolCount = poolNames.size();
  for (int i=0; i<poolCount; i++) {
    CacheDirectiveEntry directive = directives.next();
    String pollName = directive.getInfo().getPool();
    assertTrue("The pool name should be expected", tmpNames.remove(pollName));
    if (i % 2 == 0) {
      int standby = active;
      active = (standby == 0) ? 1 : 0;
      cluster.transitionToStandby(standby);
      cluster.transitionToActive(active);
      cluster.waitActive(active);
    }
  }
  assertTrue("All pools must be found", tmpNames.isEmpty());
}
 
源代码14 项目: hadoop   文件: DistributedFileSystem.java
/**
 * List cache directives.  Incrementally fetches results from the server.
 * 
 * @param filter Filter parameters to use when listing the directives, null to
 *               list all directives visible to us.
 * @return A RemoteIterator which returns CacheDirectiveInfo objects.
 */
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
    CacheDirectiveInfo filter) throws IOException {
  if (filter == null) {
    filter = new CacheDirectiveInfo.Builder().build();
  }
  if (filter.getPath() != null) {
    filter = new CacheDirectiveInfo.Builder(filter).
        setPath(new Path(getPathName(fixRelativePart(filter.getPath())))).
        build();
  }
  final RemoteIterator<CacheDirectiveEntry> iter =
      dfs.listCacheDirectives(filter);
  return new RemoteIterator<CacheDirectiveEntry>() {
    @Override
    public boolean hasNext() throws IOException {
      return iter.hasNext();
    }

    @Override
    public CacheDirectiveEntry next() throws IOException {
      // Although the paths we get back from the NameNode should always be
      // absolute, we call makeQualified to add the scheme and authority of
      // this DistributedFilesystem.
      CacheDirectiveEntry desc = iter.next();
      CacheDirectiveInfo info = desc.getInfo();
      Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory());
      return new CacheDirectiveEntry(
          new CacheDirectiveInfo.Builder(info).setPath(p).build(),
          desc.getStats());
    }
  };
}
 
源代码15 项目: hadoop   文件: NameNodeRpcServer.java
@Override // ClientProtocol
public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId,
    CacheDirectiveInfo filter) throws IOException {
  checkNNStartup();
  if (filter == null) {
    filter = new CacheDirectiveInfo.Builder().build();
  }
  return namesystem.listCacheDirectives(prevId, filter);
}
 
源代码16 项目: hadoop   文件: PBHelper.java
public static CacheDirectiveEntryProto convert(CacheDirectiveEntry entry) {
  CacheDirectiveEntryProto.Builder builder = 
      CacheDirectiveEntryProto.newBuilder();
  builder.setInfo(PBHelper.convert(entry.getInfo()));
  builder.setStats(PBHelper.convert(entry.getStats()));
  return builder.build();
}
 
源代码17 项目: hadoop   文件: TestCacheDirectives.java
@After
public void teardown() throws Exception {
  // Remove cache directives left behind by tests so that we release mmaps.
  RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(null);
  while (iter.hasNext()) {
    dfs.removeCacheDirective(iter.next().getInfo().getId());
  }
  waitForCachedBlocks(namenode, 0, 0, "teardown");
  if (cluster != null) {
    cluster.shutdown();
  }
  // Restore the original CacheManipulator
  NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
}
 
源代码18 项目: hadoop   文件: TestCacheDirectives.java
private static void validateListAll(
    RemoteIterator<CacheDirectiveEntry> iter,
    Long... ids) throws Exception {
  for (Long id: ids) {
    assertTrue("Unexpectedly few elements", iter.hasNext());
    assertEquals("Unexpected directive ID", id,
        iter.next().getInfo().getId());
  }
  assertFalse("Unexpectedly many list elements", iter.hasNext());
}
 
源代码19 项目: nnproxy   文件: CacheRegistry.java
List<CacheDirectiveEntry> maskWithFsIndex(List<CacheDirectiveEntry> entries, int fsIndex) {
    List<CacheDirectiveEntry> masked = new ArrayList<>(entries.size());
    for (CacheDirectiveEntry entry : entries) {
        CacheDirectiveInfo info = new CacheDirectiveInfo.Builder()
                .setId(maskDirectiveId(entry.getInfo().getId(), fsIndex))
                .setPath(entry.getInfo().getPath())
                .setReplication(entry.getInfo().getReplication())
                .setPool(entry.getInfo().getPool())
                .setExpiration(entry.getInfo().getExpiration())
                .build();
        masked.add(new CacheDirectiveEntry(info, entry.getStats()));
    }
    return masked;
}
 
源代码20 项目: big-c   文件: DistributedFileSystem.java
/**
 * List cache directives.  Incrementally fetches results from the server.
 * 
 * @param filter Filter parameters to use when listing the directives, null to
 *               list all directives visible to us.
 * @return A RemoteIterator which returns CacheDirectiveInfo objects.
 */
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
    CacheDirectiveInfo filter) throws IOException {
  if (filter == null) {
    filter = new CacheDirectiveInfo.Builder().build();
  }
  if (filter.getPath() != null) {
    filter = new CacheDirectiveInfo.Builder(filter).
        setPath(new Path(getPathName(fixRelativePart(filter.getPath())))).
        build();
  }
  final RemoteIterator<CacheDirectiveEntry> iter =
      dfs.listCacheDirectives(filter);
  return new RemoteIterator<CacheDirectiveEntry>() {
    @Override
    public boolean hasNext() throws IOException {
      return iter.hasNext();
    }

    @Override
    public CacheDirectiveEntry next() throws IOException {
      // Although the paths we get back from the NameNode should always be
      // absolute, we call makeQualified to add the scheme and authority of
      // this DistributedFilesystem.
      CacheDirectiveEntry desc = iter.next();
      CacheDirectiveInfo info = desc.getInfo();
      Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory());
      return new CacheDirectiveEntry(
          new CacheDirectiveInfo.Builder(info).setPath(p).build(),
          desc.getStats());
    }
  };
}
 
源代码21 项目: big-c   文件: NameNodeRpcServer.java
@Override // ClientProtocol
public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId,
    CacheDirectiveInfo filter) throws IOException {
  checkNNStartup();
  if (filter == null) {
    filter = new CacheDirectiveInfo.Builder().build();
  }
  return namesystem.listCacheDirectives(prevId, filter);
}
 
源代码22 项目: big-c   文件: PBHelper.java
public static CacheDirectiveEntryProto convert(CacheDirectiveEntry entry) {
  CacheDirectiveEntryProto.Builder builder = 
      CacheDirectiveEntryProto.newBuilder();
  builder.setInfo(PBHelper.convert(entry.getInfo()));
  builder.setStats(PBHelper.convert(entry.getStats()));
  return builder.build();
}
 
源代码23 项目: big-c   文件: TestCacheDirectives.java
@After
public void teardown() throws Exception {
  // Remove cache directives left behind by tests so that we release mmaps.
  RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(null);
  while (iter.hasNext()) {
    dfs.removeCacheDirective(iter.next().getInfo().getId());
  }
  waitForCachedBlocks(namenode, 0, 0, "teardown");
  if (cluster != null) {
    cluster.shutdown();
  }
  // Restore the original CacheManipulator
  NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
}
 
源代码24 项目: big-c   文件: TestCacheDirectives.java
private static void validateListAll(
    RemoteIterator<CacheDirectiveEntry> iter,
    Long... ids) throws Exception {
  for (Long id: ids) {
    assertTrue("Unexpectedly few elements", iter.hasNext());
    assertEquals("Unexpected directive ID", id,
        iter.next().getInfo().getId());
  }
  assertFalse("Unexpectedly many list elements", iter.hasNext());
}
 
源代码25 项目: hadoop   文件: DFSClient.java
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
    CacheDirectiveInfo filter) throws IOException {
  return new CacheDirectiveIterator(namenode, filter, traceSampler);
}
 
源代码26 项目: hadoop   文件: FSNDNCacheOp.java
static BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(
    FSNamesystem fsn, CacheManager cacheManager,
    long startId, CacheDirectiveInfo filter) throws IOException {
  final FSPermissionChecker pc = getFsPermissionChecker(fsn);
  return cacheManager.listCacheDirectives(startId, filter, pc);
}
 
源代码27 项目: hadoop   文件: CacheManager.java
public BatchedListEntries<CacheDirectiveEntry> 
      listCacheDirectives(long prevId,
          CacheDirectiveInfo filter,
          FSPermissionChecker pc) throws IOException {
  assert namesystem.hasReadLock();
  final int NUM_PRE_ALLOCATED_ENTRIES = 16;
  String filterPath = null;
  if (filter.getPath() != null) {
    filterPath = validatePath(filter);
  }
  if (filter.getReplication() != null) {
    throw new InvalidRequestException(
        "Filtering by replication is unsupported.");
  }

  // Querying for a single ID
  final Long id = filter.getId();
  if (id != null) {
    if (!directivesById.containsKey(id)) {
      throw new InvalidRequestException("Did not find requested id " + id);
    }
    // Since we use a tailMap on directivesById, setting prev to id-1 gets
    // us the directive with the id (if present)
    prevId = id - 1;
  }

  ArrayList<CacheDirectiveEntry> replies =
      new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
  int numReplies = 0;
  SortedMap<Long, CacheDirective> tailMap =
    directivesById.tailMap(prevId + 1);
  for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
    if (numReplies >= maxListCacheDirectivesNumResponses) {
      return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
    }
    CacheDirective curDirective = cur.getValue();
    CacheDirectiveInfo info = cur.getValue().toInfo();

    // If the requested ID is present, it should be the first item.
    // Hitting this case means the ID is not present, or we're on the second
    // item and should break out.
    if (id != null &&
        !(info.getId().equals(id))) {
      break;
    }
    if (filter.getPool() != null && 
        !info.getPool().equals(filter.getPool())) {
      continue;
    }
    if (filterPath != null &&
        !info.getPath().toUri().getPath().equals(filterPath)) {
      continue;
    }
    boolean hasPermission = true;
    if (pc != null) {
      try {
        pc.checkPermission(curDirective.getPool(), FsAction.READ);
      } catch (AccessControlException e) {
        hasPermission = false;
      }
    }
    if (hasPermission) {
      replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
      numReplies++;
    }
  }
  return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
}
 
源代码28 项目: hadoop   文件: PBHelper.java
public static CacheDirectiveEntry convert(CacheDirectiveEntryProto proto) {
  CacheDirectiveInfo info = PBHelper.convert(proto.getInfo());
  CacheDirectiveStats stats = PBHelper.convert(proto.getStats());
  return new CacheDirectiveEntry(info, stats);
}
 
@Override
public CacheDirectiveEntry get(int i) {
  return PBHelper.convert(response.getElements(i));
}
 
源代码30 项目: hadoop   文件: CacheAdmin.java
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  CacheDirectiveInfo.Builder builder =
      new CacheDirectiveInfo.Builder();
  String pathFilter = StringUtils.popOptionWithArgument("-path", args);
  if (pathFilter != null) {
    builder.setPath(new Path(pathFilter));
  }
  String poolFilter = StringUtils.popOptionWithArgument("-pool", args);
  if (poolFilter != null) {
    builder.setPool(poolFilter);
  }
  boolean printStats = StringUtils.popOption("-stats", args);
  String idFilter = StringUtils.popOptionWithArgument("-id", args);
  if (idFilter != null) {
    builder.setId(Long.parseLong(idFilter));
  }
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }
  TableListing.Builder tableBuilder = new TableListing.Builder().
      addField("ID", Justification.RIGHT).
      addField("POOL", Justification.LEFT).
      addField("REPL", Justification.RIGHT).
      addField("EXPIRY", Justification.LEFT).
      addField("PATH", Justification.LEFT);
  if (printStats) {
    tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
                addField("BYTES_CACHED", Justification.RIGHT).
                addField("FILES_NEEDED", Justification.RIGHT).
                addField("FILES_CACHED", Justification.RIGHT);
  }
  TableListing tableListing = tableBuilder.build();
  try {
    DistributedFileSystem dfs = AdminHelper.getDFS(conf);
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(builder.build());
    int numEntries = 0;
    while (iter.hasNext()) {
      CacheDirectiveEntry entry = iter.next();
      CacheDirectiveInfo directive = entry.getInfo();
      CacheDirectiveStats stats = entry.getStats();
      List<String> row = new LinkedList<String>();
      row.add("" + directive.getId());
      row.add(directive.getPool());
      row.add("" + directive.getReplication());
      String expiry;
      // This is effectively never, round for nice printing
      if (directive.getExpiration().getMillis() >
          Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
        expiry = "never";
      } else {
        expiry = directive.getExpiration().toString();
      }
      row.add(expiry);
      row.add(directive.getPath().toUri().getPath());
      if (printStats) {
        row.add("" + stats.getBytesNeeded());
        row.add("" + stats.getBytesCached());
        row.add("" + stats.getFilesNeeded());
        row.add("" + stats.getFilesCached());
      }
      tableListing.addRow(row.toArray(new String[row.size()]));
      numEntries++;
    }
    System.out.print(String.format("Found %d entr%s%n",
        numEntries, numEntries == 1 ? "y" : "ies"));
    if (numEntries > 0) {
      System.out.print(tableListing);
    }
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}