org.apache.hadoop.fs.Path#depth ( )源码实例Demo

下面列出了org.apache.hadoop.fs.Path#depth ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

/**
 * Returns the relative path of the child that does not include the filename or the root path.
 *
 * @param root
 *            the path to relativize from
 * @param child
 *            the path to relativize
 * @return the relative path
 */
public static String getPathDifference(final Path root, final Path child) {
    final int depthDiff = child.depth() - root.depth();
    if (depthDiff <= 1) {
        return "".intern();
    }
    String lastRoot = root.getName();
    Path childsParent = child.getParent();
    final StringBuilder builder = new StringBuilder();
    builder.append(childsParent.getName());
    for (int i = (depthDiff - 3); i >= 0; i--) {
        childsParent = childsParent.getParent();
        String name = childsParent.getName();
        if (name.equals(lastRoot) && childsParent.toString().endsWith(root.toString())) {
            break;
        }
        builder.insert(0, Path.SEPARATOR).insert(0, name);
    }
    return builder.toString();
}
 
源代码2 项目: nifi   文件: AbstractHadoopProcessor.java
/**
 * Returns the relative path of the child that does not include the filename or the root path.
 *
 * @param root
 *            the path to relativize from
 * @param child
 *            the path to relativize
 * @return the relative path
 */
public static String getPathDifference(final Path root, final Path child) {
    final int depthDiff = child.depth() - root.depth();
    if (depthDiff <= 1) {
        return "".intern();
    }
    String lastRoot = root.getName();
    Path childsParent = child.getParent();
    final StringBuilder builder = new StringBuilder();
    builder.append(childsParent.getName());
    for (int i = (depthDiff - 3); i >= 0; i--) {
        childsParent = childsParent.getParent();
        String name = childsParent.getName();
        if (name.equals(lastRoot) && childsParent.toString().endsWith(root.toString())) {
            break;
        }
        builder.insert(0, Path.SEPARATOR).insert(0, name);
    }
    return builder.toString();
}
 
源代码3 项目: hbase   文件: CommonFSUtils.java
/**
 * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
 * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true.  Does not consider
 * schema; i.e. if schemas different but path or subpath matches, the two will equate.
 * @param pathToSearch Path we will be trying to match agains against
 * @param pathTail what to match
 * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
 */
public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
  if (pathToSearch.depth() != pathTail.depth()) {
    return false;
  }
  Path tailPath = pathTail;
  String tailName;
  Path toSearch = pathToSearch;
  String toSearchName;
  boolean result = false;
  do {
    tailName = tailPath.getName();
    if (tailName == null || tailName.length() <= 0) {
      result = true;
      break;
    }
    toSearchName = toSearch.getName();
    if (toSearchName == null || toSearchName.length() <= 0) {
      break;
    }
    // Move up a parent on each path for next go around.  Path doesn't let us go off the end.
    tailPath = tailPath.getParent();
    toSearch = toSearch.getParent();
  } while(tailName.equals(toSearchName));
  return result;
}
 
源代码4 项目: incubator-gobblin   文件: GitConfigMonitor.java
/**
   * check whether the file has the proper naming and hierarchy
   * @param configFilePath the relative path from the repo root
   * @return false if the file does not conform
   */
private boolean checkConfigFilePath(String configFilePath) {
  // The config needs to stored at configDir/flowGroup/flowName.(pull|job|json|conf)
  Path configFile = new Path(configFilePath);
  String fileExtension = Files.getFileExtension(configFile.getName());

  if (configFile.depth() != CONFIG_FILE_DEPTH
      || !configFile.getParent().getParent().getName().equals(folderName)
      || !(PullFileLoader.DEFAULT_JAVA_PROPS_PULL_FILE_EXTENSIONS.contains(fileExtension)
      || PullFileLoader.DEFAULT_JAVA_PROPS_PULL_FILE_EXTENSIONS.contains(fileExtension))) {
    log.warn("Changed file does not conform to directory structure and file name format, skipping: "
        + configFilePath);

    return false;
  }

  return true;
}
 
源代码5 项目: incubator-gobblin   文件: GitFlowGraphMonitor.java
/**
 * check whether the file has the proper naming and hierarchy
 * @param file the relative path from the repo root
 * @return false if the file does not conform
 */
private boolean checkFilePath(String file, int depth) {
  // The file is either a node file or an edge file and needs to be stored at either:
  // flowGraphDir/nodeName/nodeName.properties (if it is a node file), or
  // flowGraphDir/nodeName/nodeName/edgeName.properties (if it is an edge file)

  Path filePath = new Path(file);
  String fileExtension = Files.getFileExtension(filePath.getName());
  if (filePath.depth() != depth || !checkFileLevelRelativeToRoot(filePath, depth)
      || !(this.javaPropsExtensions.contains(fileExtension))) {
    log.warn("Changed file does not conform to directory structure and file name format, skipping: "
        + filePath);
    return false;
  }
  return true;
}
 
源代码6 项目: RDFS   文件: BlockIntegrityMonitor.java
static boolean doesParityDirExist(FileSystem parityFs, String path)
    throws IOException {
  // Check if it is impossible to have a parity file. We check if the
  // parent directory of the lost file exists under a parity path.
  // If the directory does not exist, the parity file cannot exist.
  Path fileRaidParent = new Path(path).getParent();
  Path dirRaidParent = (fileRaidParent != null)? fileRaidParent.getParent(): null;
  boolean parityCanExist = false;
  for (Codec codec: Codec.getCodecs()) {
    Path parityDir = null;
    if (codec.isDirRaid) {
      if (dirRaidParent == null) 
        continue;
      parityDir = (dirRaidParent.depth() == 0)?
        new Path(codec.getParityPrefix()):
        new Path(codec.getParityPrefix(),
            RaidNode.makeRelative(dirRaidParent));
    } else {
      parityDir = (fileRaidParent.depth() == 0)?
        new Path(codec.getParityPrefix()):
        new Path(codec.getParityPrefix(),
            RaidNode.makeRelative(fileRaidParent));
    }
    if (parityFs.exists(parityDir)) {
      parityCanExist = true;
      break;
    }
  }
  return parityCanExist;
}
 
源代码7 项目: RDFS   文件: HadoopArchives.java
private Path largestDepth(List<Path> paths) {
  Path deepest = paths.get(0);
  for (Path p: paths) {
    if (p.depth() > deepest.depth()) {
      deepest = p;
    }
  }
  return deepest;
}
 
源代码8 项目: presto   文件: SemiTransactionalHiveMetastore.java
private static boolean isSameOrParent(Path parent, Path child)
{
    int parentDepth = parent.depth();
    int childDepth = child.depth();
    if (parentDepth > childDepth) {
        return false;
    }
    for (int i = childDepth; i > parentDepth; i--) {
        child = child.getParent();
    }
    return parent.equals(child);
}
 
源代码9 项目: hadoop   文件: TestNameNodeMetrics.java
/** Test metrics associated with addition of a file */
@Test
public void testFileAdd() throws Exception {
  // Add files with 100 blocks
  final Path file = getTestPath("testFileAdd");
  createFile(file, 3200, (short)3);
  final long blockCount = 32;
  int blockCapacity = namesystem.getBlockCapacity();
  updateMetrics();
  assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));

  MetricsRecordBuilder rb = getMetrics(NN_METRICS);
  // File create operations is 1
  // Number of files created is depth of <code>file</code> path
  assertCounter("CreateFileOps", 1L, rb);
  assertCounter("FilesCreated", (long)file.depth(), rb);

  updateMetrics();
  long filesTotal = file.depth() + 1; // Add 1 for root
  rb = getMetrics(NS_METRICS);
  assertGauge("FilesTotal", filesTotal, rb);
  assertGauge("BlocksTotal", blockCount, rb);
  fs.delete(file, true);
  filesTotal--; // reduce the filecount for deleted file

  rb = waitForDnMetricValue(NS_METRICS, "FilesTotal", filesTotal);
  assertGauge("BlocksTotal", 0L, rb);
  assertGauge("PendingDeletionBlocks", 0L, rb);

  rb = getMetrics(NN_METRICS);
  // Delete file operations and number of files deleted must be 1
  assertCounter("DeleteFileOps", 1L, rb);
  assertCounter("FilesDeleted", 1L, rb);
}
 
源代码10 项目: RDFS   文件: HadoopArchives.java
private boolean checkValidName(String name) {
  Path tmp = new Path(name);
  if (tmp.depth() != 1) {
    return false;
  }
  if (name.endsWith(".har")) 
    return true;
  return false;
}
 
源代码11 项目: hadoop   文件: HadoopArchives.java
private Path largestDepth(List<Path> paths) {
  Path deepest = paths.get(0);
  for (Path p: paths) {
    if (p.depth() > deepest.depth()) {
      deepest = p;
    }
  }
  return deepest;
}
 
源代码12 项目: big-c   文件: TestNameNodeMetrics.java
/** Test metrics associated with addition of a file */
@Test
public void testFileAdd() throws Exception {
  // Add files with 100 blocks
  final Path file = getTestPath("testFileAdd");
  createFile(file, 3200, (short)3);
  final long blockCount = 32;
  int blockCapacity = namesystem.getBlockCapacity();
  updateMetrics();
  assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));

  MetricsRecordBuilder rb = getMetrics(NN_METRICS);
  // File create operations is 1
  // Number of files created is depth of <code>file</code> path
  assertCounter("CreateFileOps", 1L, rb);
  assertCounter("FilesCreated", (long)file.depth(), rb);

  updateMetrics();
  long filesTotal = file.depth() + 1; // Add 1 for root
  rb = getMetrics(NS_METRICS);
  assertGauge("FilesTotal", filesTotal, rb);
  assertGauge("BlocksTotal", blockCount, rb);
  fs.delete(file, true);
  filesTotal--; // reduce the filecount for deleted file

  rb = waitForDnMetricValue(NS_METRICS, "FilesTotal", filesTotal);
  assertGauge("BlocksTotal", 0L, rb);
  assertGauge("PendingDeletionBlocks", 0L, rb);

  rb = getMetrics(NN_METRICS);
  // Delete file operations and number of files deleted must be 1
  assertCounter("DeleteFileOps", 1L, rb);
  assertCounter("FilesDeleted", 1L, rb);
}
 
源代码13 项目: big-c   文件: HadoopArchives.java
private boolean checkValidName(String name) {
  Path tmp = new Path(name);
  if (tmp.depth() != 1) {
    return false;
  }
  if (name.endsWith(".har")) 
    return true;
  return false;
}
 
源代码14 项目: big-c   文件: HadoopArchives.java
private Path largestDepth(List<Path> paths) {
  Path deepest = paths.get(0);
  for (Path p: paths) {
    if (p.depth() > deepest.depth()) {
      deepest = p;
    }
  }
  return deepest;
}
 
源代码15 项目: zeppelin   文件: SubmarineJob.java
public void cleanJobDefaultCheckpointPath() {
  String jobCheckpointPath = getJobDefaultCheckpointPath();
  Path notePath = new Path(jobCheckpointPath);
  if (notePath.depth() <= 3) {
    submarineUI.outputLog("ERROR", "Checkpoint path depth must be greater than 3");
    return;
  }
  try {
    String message = "Clean up the checkpoint directory: " + jobCheckpointPath;
    submarineUI.outputLog("", message);
    hdfsClient.delete(notePath);
  } catch (IOException e) {
    LOGGER.error(e.getMessage(), e);
  }
}
 
源代码16 项目: hbase   文件: FSUtils.java
/**
 * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
 * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true.  Does not consider
 * schema; i.e. if schemas different but path or subpath matches, the two will equate.
 * @param pathToSearch Path we will be trying to match.
 * @param pathTail
 * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
 */
public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
  Path tailPath = pathTail;
  String tailName;
  Path toSearch = pathToSearch;
  String toSearchName;
  boolean result = false;

  if (pathToSearch.depth() != pathTail.depth()) {
    return false;
  }

  do {
    tailName = tailPath.getName();
    if (tailName == null || tailName.isEmpty()) {
      result = true;
      break;
    }
    toSearchName = toSearch.getName();
    if (toSearchName == null || toSearchName.isEmpty()) {
      break;
    }
    // Move up a parent on each path for next go around.  Path doesn't let us go off the end.
    tailPath = tailPath.getParent();
    toSearch = toSearch.getParent();
  } while(tailName.equals(toSearchName));
  return result;
}
 
源代码17 项目: incubator-gobblin   文件: GitFlowGraphMonitor.java
/**
 * Remove an element (i.e. either a {@link DataNode} or a {@link FlowEdge} from the {@link FlowGraph} for
 * a renamed or deleted {@link DataNode} or {@link FlowEdge} file.
 * @param change
 */
@Override
public void removeChange(DiffEntry change) {
  Path path = new Path(change.getOldPath());
  if (path.depth() == NODE_FILE_DEPTH) {
    removeDataNode(change);
  } else if (path.depth() == EDGE_FILE_DEPTH) {
    removeFlowEdge(change);
  }
}
 
源代码18 项目: Hi-WAY   文件: Data.java
public void stageOut() throws IOException {
	Path localPath = getLocalPath();
	Path hdfsDirectory = getHdfsPath().getParent();
	Path hdfsPath = getHdfsPath();
	if (hdfsDirectory.depth() > 0) {
		mkHdfsDir(hdfsDirectory);
	}
	hdfs.copyFromLocalFile(false, true, localPath, hdfsPath);
}
 
源代码19 项目: hadoop   文件: NameNodeRpcServer.java
/**
 * Check path length does not exceed maximum.  Returns true if
 * length and depth are okay.  Returns false if length is too long 
 * or depth is too great.
 */
private boolean checkPathLength(String src) {
  Path srcPath = new Path(src);
  return (src.length() <= MAX_PATH_LENGTH &&
          srcPath.depth() <= MAX_PATH_DEPTH);
}
 
源代码20 项目: hadoop-gpu   文件: NameNode.java
/**
 * Check path length does not exceed maximum.  Returns true if
 * length and depth are okay.  Returns false if length is too long 
 * or depth is too great.
 * 
 */
private boolean checkPathLength(String src) {
  Path srcPath = new Path(src);
  return (src.length() <= MAX_PATH_LENGTH &&
          srcPath.depth() <= MAX_PATH_DEPTH);
}