org.apache.hadoop.fs.Path#equals ( )源码实例Demo

下面列出了org.apache.hadoop.fs.Path#equals ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: presto   文件: SemiTransactionalHiveMetastore.java
private void prepareInsertExistingTable(HdfsContext context, TableAndMore tableAndMore)
{
    deleteOnly = false;

    Table table = tableAndMore.getTable();
    Path targetPath = new Path(table.getStorage().getLocation());
    Path currentPath = tableAndMore.getCurrentLocation().get();
    cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, false));
    if (!targetPath.equals(currentPath)) {
        asyncRename(hdfsEnvironment, renameExecutor, fileRenameCancelled, fileRenameFutures, context, currentPath, targetPath, tableAndMore.getFileNames().get());
    }
    updateStatisticsOperations.add(new UpdateStatisticsOperation(
            tableAndMore.getIdentity(),
            table.getSchemaTableName(),
            Optional.empty(),
            tableAndMore.getStatisticsUpdate(),
            true));
}
 
源代码2 项目: emodb   文件: EmoFileSystem.java
@Override
public FileStatus getFileStatus(Path path)
        throws IOException {
    if (path.equals(_rootPath)) {
        return getRootFileStatus(_rootPath);
    }

    String table = getTableName(_rootPath, path);
    String split = getSplitName(_rootPath, path);

    if (split == null) {
        // This is a table.  Even if the table doesn't exist still return a value.
        return getTableFileStatus(_rootPath, table);
    }

    // This is a split.  As before we're using max long for the split size.
    return getSplitFileStatus(_rootPath, table, splitAsGzipFile(split), Long.MAX_VALUE, 1024);
}
 
源代码3 项目: hadoop   文件: LocalResourcesTrackerImpl.java
public void incrementFileCountForLocalCacheDirectory(Path cacheDir) {
  if (useLocalCacheDirectoryManager) {
    Path cacheRoot = LocalCacheDirectoryManager.getCacheDirectoryRoot(
        cacheDir);
    if (cacheRoot != null) {
      LocalCacheDirectoryManager dir = directoryManagers.get(cacheRoot);
      if (dir == null) {
        dir = new LocalCacheDirectoryManager(conf);
        LocalCacheDirectoryManager otherDir =
            directoryManagers.putIfAbsent(cacheRoot, dir);
        if (otherDir != null) {
          dir = otherDir;
        }
      }
      if (cacheDir.equals(cacheRoot)) {
        dir.incrementFileCountForPath("");
      } else {
        String dirStr = cacheDir.toUri().getRawPath();
        String rootStr = cacheRoot.toUri().getRawPath();
        dir.incrementFileCountForPath(
            dirStr.substring(rootStr.length() + 1));
      }
    }
  }
}
 
源代码4 项目: emodb   文件: FileSystemUtil.java
/**
 * Gets the table name from a path, or null if the path is the root path.
 */
@Nullable
public static String getTableName(Path rootPath, Path path) {
    path = qualified(rootPath, path);
    if (rootPath.equals(path)) {
        // Path is root, no table
        return null;
    }

    Path tablePath;
    Path parent = path.getParent();
    if (Objects.equals(parent, rootPath)) {
        // The path itself represents a table (e.g.; emodb://ci.us/mytable)
        tablePath = path;
    } else if (parent != null && Objects.equals(parent.getParent(), rootPath)) {
        // The path is a split (e.g.; emodb://ci.us/mytable/split-id)
        tablePath = parent;
    } else {
        throw new IllegalArgumentException(
                format("Path does not represent a table, split, or root (path=%s, root=%s)", path, rootPath));
    }
    return decode(tablePath.getName());
}
 
源代码5 项目: big-c   文件: LocalResourcesTrackerImpl.java
public void incrementFileCountForLocalCacheDirectory(Path cacheDir) {
  if (useLocalCacheDirectoryManager) {
    Path cacheRoot = LocalCacheDirectoryManager.getCacheDirectoryRoot(
        cacheDir);
    if (cacheRoot != null) {
      LocalCacheDirectoryManager dir = directoryManagers.get(cacheRoot);
      if (dir == null) {
        dir = new LocalCacheDirectoryManager(conf);
        LocalCacheDirectoryManager otherDir =
            directoryManagers.putIfAbsent(cacheRoot, dir);
        if (otherDir != null) {
          dir = otherDir;
        }
      }
      if (cacheDir.equals(cacheRoot)) {
        dir.incrementFileCountForPath("");
      } else {
        String dirStr = cacheDir.toUri().getRawPath();
        String rootStr = cacheRoot.toUri().getRawPath();
        dir.incrementFileCountForPath(
            dirStr.substring(rootStr.length() + 1));
      }
    }
  }
}
 
源代码6 项目: pravega   文件: MockFileSystem.java
@Override
public FileStatus getFileStatus(Path f) throws IOException {
    if (f.equals(root.getPath())) {
        return root;
    }
    return getFileData(f).getStatus();
}
 
源代码7 项目: presto   文件: LocationHandle.java
public LocationHandle(
        Path targetPath,
        Path writePath,
        boolean isExistingTable,
        WriteMode writeMode)
{
    if (writeMode.isWritePathSameAsTargetPath() && !targetPath.equals(writePath)) {
        throw new IllegalArgumentException(format("targetPath is expected to be same as writePath for writeMode %s", writeMode));
    }
    this.targetPath = requireNonNull(targetPath, "targetPath is null");
    this.writePath = requireNonNull(writePath, "writePath is null");
    this.isExistingTable = isExistingTable;
    this.writeMode = requireNonNull(writeMode, "writeMode is null");
}
 
源代码8 项目: big-c   文件: TestS3ABlocksize.java
@Test
@SuppressWarnings("deprecation")
public void testBlockSize() throws Exception {
  FileSystem fs = getFileSystem();
  long defaultBlockSize = fs.getDefaultBlockSize();
  assertEquals("incorrect blocksize",
      S3AFileSystem.DEFAULT_BLOCKSIZE, defaultBlockSize);
  long newBlockSize = defaultBlockSize * 2;
  fs.getConf().setLong(Constants.FS_S3A_BLOCK_SIZE, newBlockSize);

  Path dir = path("testBlockSize");
  Path file = new Path(dir, "file");
  createFile(fs, file, true, dataset(1024, 'a', 'z' - 'a'));
  FileStatus fileStatus = fs.getFileStatus(file);
  assertEquals("Double default block size in stat(): " + fileStatus,
      newBlockSize,
      fileStatus.getBlockSize());

  // check the listing  & assert that the block size is picked up by
  // this route too.
  boolean found = false;
  FileStatus[] listing = fs.listStatus(dir);
  for (FileStatus stat : listing) {
    LOG.info("entry: {}", stat);
    if (file.equals(stat.getPath())) {
      found = true;
      assertEquals("Double default block size in ls(): " + stat,
          newBlockSize,
          stat.getBlockSize());
    }
  }
  assertTrue("Did not find " + fileStatsToString(listing, ", "), found);
}
 
源代码9 项目: presto   文件: SemiTransactionalHiveMetastore.java
private void prepareAddPartition(HdfsContext hdfsContext, HiveIdentity identity, PartitionAndMore partitionAndMore)
{
    deleteOnly = false;

    Partition partition = partitionAndMore.getPartition();
    String targetLocation = partition.getStorage().getLocation();
    Path currentPath = partitionAndMore.getCurrentLocation();
    Path targetPath = new Path(targetLocation);

    PartitionAdder partitionAdder = partitionAdders.computeIfAbsent(
            partition.getSchemaTableName(),
            ignored -> new PartitionAdder(partitionAndMore.getIdentity(), partition.getDatabaseName(), partition.getTableName(), delegate, PARTITION_COMMIT_BATCH_SIZE));

    if (pathExists(hdfsContext, hdfsEnvironment, currentPath)) {
        if (!targetPath.equals(currentPath)) {
            renameDirectory(
                    hdfsContext,
                    hdfsEnvironment,
                    currentPath,
                    targetPath,
                    () -> cleanUpTasksForAbort.add(new DirectoryCleanUpTask(hdfsContext, targetPath, true)));
        }
    }
    else {
        cleanUpTasksForAbort.add(new DirectoryCleanUpTask(hdfsContext, targetPath, true));
        createDirectory(hdfsContext, hdfsEnvironment, targetPath);
    }
    String partitionName = getPartitionName(identity, partition.getDatabaseName(), partition.getTableName(), partition.getValues());
    partitionAdder.addPartition(new PartitionWithStatistics(partition, partitionName, partitionAndMore.getStatisticsUpdate()));
}
 
源代码10 项目: big-c   文件: InMemoryFileSystemStore.java
@Override
public Set<Path> listSubPaths(Path path) throws IOException {
  Path normalizedPath = normalize(path);
  // This is inefficient but more than adequate for testing purposes.
  Set<Path> subPaths = new LinkedHashSet<Path>();
  for (Path p : inodes.tailMap(normalizedPath).keySet()) {
    if (normalizedPath.equals(p.getParent())) {
      subPaths.add(p);
    }
  }
  return subPaths;
}
 
源代码11 项目: presto   文件: FileHiveMetastore.java
private void verifiedPartition(Table table, Partition partition)
{
    Path partitionMetadataDirectory = getPartitionMetadataDirectory(table, partition.getValues());

    if (table.getTableType().equals(MANAGED_TABLE.name())) {
        if (!partitionMetadataDirectory.equals(new Path(partition.getStorage().getLocation()))) {
            throw new PrestoException(HIVE_METASTORE_ERROR, "Partition directory must be " + partitionMetadataDirectory);
        }
    }
    else if (table.getTableType().equals(EXTERNAL_TABLE.name())) {
        try {
            Path externalLocation = new Path(partition.getStorage().getLocation());
            FileSystem externalFileSystem = hdfsEnvironment.getFileSystem(hdfsContext, externalLocation);
            if (!externalFileSystem.isDirectory(externalLocation)) {
                throw new PrestoException(HIVE_METASTORE_ERROR, "External partition location does not exist");
            }
            if (isChildDirectory(catalogDirectory, externalLocation)) {
                throw new PrestoException(HIVE_METASTORE_ERROR, "External partition location cannot be inside the system metadata directory");
            }
        }
        catch (IOException e) {
            throw new PrestoException(HIVE_METASTORE_ERROR, "Could not validate external partition location", e);
        }
    }
    else {
        throw new PrestoException(NOT_SUPPORTED, "Partitions cannot be added to " + table.getTableType());
    }
}
 
源代码12 项目: presto   文件: FileHiveMetastore.java
private static boolean isChildDirectory(Path parentDirectory, Path childDirectory)
{
    if (parentDirectory.equals(childDirectory)) {
        return true;
    }
    if (childDirectory.isRoot()) {
        return false;
    }
    return isChildDirectory(parentDirectory, childDirectory.getParent());
}
 
源代码13 项目: hbase   文件: SecureBulkLoadManager.java
@Override
public void failedBulkLoad(final byte[] family, final String srcPath) throws IOException {
  try {
    Path p = new Path(srcPath);
    if (srcFs == null) {
      srcFs = FileSystem.newInstance(p.toUri(), conf);
    }
    if (!FSUtils.isSameHdfs(conf, srcFs, fs)) {
      // files are copied so no need to move them back
      return;
    }
    Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName()));

    // In case of Replication for bulk load files, hfiles are not renamed by end point during
    // prepare stage, so no need of rename here again
    if (p.equals(stageP)) {
      LOG.debug(p.getName() + " is already available in source directory. Skipping rename.");
      return;
    }

    LOG.debug("Moving " + stageP + " back to " + p);
    if (!fs.rename(stageP, p)) {
      throw new IOException("Failed to move HFile: " + stageP + " to " + p);
    }

    // restore original permission
    if (origPermissions.containsKey(srcPath)) {
      fs.setPermission(p, origPermissions.get(srcPath));
    } else {
      LOG.warn("Can't find previous permission for path=" + srcPath);
    }
  } finally {
    closeSrcFs();
  }
}
 
源代码14 项目: hadoop   文件: SnapshotTestHelper.java
/**
 * Generate the path for a snapshot file.
 * 
 * @param snapshotRoot of format
 *          {@literal <snapshottble_dir>/.snapshot/<snapshot_name>}
 * @param file path to a file
 * @return The path of the snapshot of the file assuming the file has a
 *         snapshot under the snapshot root of format
 *         {@literal <snapshottble_dir>/.snapshot/<snapshot_name>/<path_to_file_inside_snapshot>}
 *         . Null if the file is not under the directory associated with the
 *         snapshot root.
 */
static Path getSnapshotFile(Path snapshotRoot, Path file) {
  Path rootParent = snapshotRoot.getParent();
  if (rootParent != null && rootParent.getName().equals(".snapshot")) {
    Path snapshotDir = rootParent.getParent();
    if (file.toString().contains(snapshotDir.toString())
        && !file.equals(snapshotDir)) {
      String fileName = file.toString().substring(
          snapshotDir.toString().length() + 1);
      Path snapshotFile = new Path(snapshotRoot, fileName);
      return snapshotFile;
    }
  }
  return null;
}
 
源代码15 项目: hadoop   文件: RetriableFileCopyCommand.java
private Path getTmpFile(Path target, Mapper.Context context) {
  Path targetWorkPath = new Path(context.getConfiguration().
      get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));

  Path root = target.equals(targetWorkPath)? targetWorkPath.getParent() : targetWorkPath;
  LOG.info("Creating temp file: " +
      new Path(root, ".distcp.tmp." + context.getTaskAttemptID().toString()));
  return new Path(root, ".distcp.tmp." + context.getTaskAttemptID().toString());
}
 
源代码16 项目: kite   文件: SingleStreamFileSystem.java
@Override
public FSDataInputStream open(final Path f, final int bufferSize)
    throws IOException {
  if (f.equals(path)) {
    return inputStream;
  }
  throw new UnsupportedOperationException("Path " + f.getName()
      + " is not found");
}
 
源代码17 项目: hadoop-gpu   文件: InMemoryFileSystemStore.java
public Set<Path> listSubPaths(Path path) throws IOException {
  Path normalizedPath = normalize(path);
  // This is inefficient but more than adequate for testing purposes.
  Set<Path> subPaths = new LinkedHashSet<Path>();
  for (Path p : inodes.tailMap(normalizedPath).keySet()) {
    if (normalizedPath.equals(p.getParent())) {
      subPaths.add(p);
    }
  }
  return subPaths;
}
 
源代码18 项目: hbase   文件: WALSplitUtil.java
/**
 * Create a file with name as region's max sequence id
 */
public static void writeRegionSequenceIdFile(FileSystem walFS, Path regionDir, long newMaxSeqId)
    throws IOException {
  FileStatus[] files = getSequenceIdFiles(walFS, regionDir);
  long maxSeqId = getMaxSequenceId(files);
  if (maxSeqId > newMaxSeqId) {
    throw new IOException("The new max sequence id " + newMaxSeqId
        + " is less than the old max sequence id " + maxSeqId);
  }
  // write a new seqId file
  Path newSeqIdFile =
      new Path(getRegionDirRecoveredEditsDir(regionDir), newMaxSeqId + SEQUENCE_ID_FILE_SUFFIX);
  if (newMaxSeqId != maxSeqId) {
    try {
      if (!walFS.createNewFile(newSeqIdFile) && !walFS.exists(newSeqIdFile)) {
        throw new IOException("Failed to create SeqId file:" + newSeqIdFile);
      }
      LOG.debug("Wrote file={}, newMaxSeqId={}, maxSeqId={}", newSeqIdFile, newMaxSeqId,
        maxSeqId);
    } catch (FileAlreadyExistsException ignored) {
      // latest hdfs throws this exception. it's all right if newSeqIdFile already exists
    }
  }
  // remove old ones
  for (FileStatus status : files) {
    if (!newSeqIdFile.equals(status.getPath())) {
      walFS.delete(status.getPath(), false);
    }
  }
}
 
源代码19 项目: presto   文件: SemiTransactionalHiveMetastore.java
private void prepareAddTable(HdfsContext context, TableAndMore tableAndMore)
{
    deleteOnly = false;

    Table table = tableAndMore.getTable();
    if (table.getTableType().equals(MANAGED_TABLE.name())) {
        String targetLocation = table.getStorage().getLocation();
        checkArgument(!targetLocation.isEmpty(), "target location is empty");
        Optional<Path> currentPath = tableAndMore.getCurrentLocation();
        Path targetPath = new Path(targetLocation);
        if (table.getPartitionColumns().isEmpty() && currentPath.isPresent()) {
            // CREATE TABLE AS SELECT unpartitioned table
            if (targetPath.equals(currentPath.get())) {
                // Target path and current path are the same. Therefore, directory move is not needed.
            }
            else {
                renameDirectory(
                        context,
                        hdfsEnvironment,
                        currentPath.get(),
                        targetPath,
                        () -> cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, true)));
            }
        }
        else {
            // CREATE TABLE AS SELECT partitioned table, or
            // CREATE TABLE partitioned/unpartitioned table (without data)
            if (pathExists(context, hdfsEnvironment, targetPath)) {
                if (currentPath.isPresent() && currentPath.get().equals(targetPath)) {
                    // It is okay to skip directory creation when currentPath is equal to targetPath
                    // because the directory may have been created when creating partition directories.
                    // However, it is important to note that the two being equal does not guarantee
                    // a directory had been created.
                }
                else {
                    throw new PrestoException(
                            HIVE_PATH_ALREADY_EXISTS,
                            format("Unable to create directory %s: target directory already exists", targetPath));
                }
            }
            else {
                cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, true));
                createDirectory(context, hdfsEnvironment, targetPath);
            }
        }
    }
    addTableOperations.add(new CreateTableOperation(tableAndMore.getIdentity(), table, tableAndMore.getPrincipalPrivileges(), tableAndMore.isIgnoreExisting()));
    if (!isPrestoView(table)) {
        updateStatisticsOperations.add(new UpdateStatisticsOperation(
                tableAndMore.getIdentity(),
                table.getSchemaTableName(),
                Optional.empty(),
                tableAndMore.getStatisticsUpdate(),
                false));
    }
}
 
源代码20 项目: presto   文件: SemiTransactionalHiveMetastore.java
private void prepareAlterPartition(HdfsContext hdfsContext, HiveIdentity identity, PartitionAndMore partitionAndMore)
{
    deleteOnly = false;

    Partition partition = partitionAndMore.getPartition();
    String targetLocation = partition.getStorage().getLocation();
    Optional<Partition> oldPartition = delegate.getPartition(identity, partition.getDatabaseName(), partition.getTableName(), partition.getValues());
    if (oldPartition.isEmpty()) {
        throw new PrestoException(
                TRANSACTION_CONFLICT,
                format("The partition that this transaction modified was deleted in another transaction. %s %s", partition.getTableName(), partition.getValues()));
    }
    String partitionName = getPartitionName(identity, partition.getDatabaseName(), partition.getTableName(), partition.getValues());
    PartitionStatistics oldPartitionStatistics = getExistingPartitionStatistics(identity, partition, partitionName);
    String oldPartitionLocation = oldPartition.get().getStorage().getLocation();
    Path oldPartitionPath = new Path(oldPartitionLocation);

    // Location of the old partition and the new partition can be different because we allow arbitrary directories through LocationService.
    // If the location of the old partition is the same as the location of the new partition:
    // * Rename the old data directory to a temporary path with a special suffix
    // * Remember we will need to delete that directory at the end if transaction successfully commits
    // * Remember we will need to undo the rename if transaction aborts
    // Otherwise,
    // * Remember we will need to delete the location of the old partition at the end if transaction successfully commits
    if (targetLocation.equals(oldPartitionLocation)) {
        String queryId = hdfsContext.getQueryId().orElseThrow(() -> new IllegalArgumentException("query ID not present"));
        Path oldPartitionStagingPath = new Path(oldPartitionPath.getParent(), "_temp_" + oldPartitionPath.getName() + "_" + queryId);
        renameDirectory(
                hdfsContext,
                hdfsEnvironment,
                oldPartitionPath,
                oldPartitionStagingPath,
                () -> renameTasksForAbort.add(new DirectoryRenameTask(hdfsContext, oldPartitionStagingPath, oldPartitionPath)));
        if (!skipDeletionForAlter) {
            deletionTasksForFinish.add(new DirectoryDeletionTask(hdfsContext, oldPartitionStagingPath));
        }
    }
    else {
        if (!skipDeletionForAlter) {
            deletionTasksForFinish.add(new DirectoryDeletionTask(hdfsContext, oldPartitionPath));
        }
    }

    Path currentPath = partitionAndMore.getCurrentLocation();
    Path targetPath = new Path(targetLocation);
    if (!targetPath.equals(currentPath)) {
        renameDirectory(
                hdfsContext,
                hdfsEnvironment,
                currentPath,
                targetPath,
                () -> cleanUpTasksForAbort.add(new DirectoryCleanUpTask(hdfsContext, targetPath, true)));
    }
    // Partition alter must happen regardless of whether original and current location is the same
    // because metadata might change: e.g. storage format, column types, etc
    alterPartitionOperations.add(new AlterPartitionOperation(
            partitionAndMore.getIdentity(),
            new PartitionWithStatistics(partition, partitionName, partitionAndMore.getStatisticsUpdate()),
            new PartitionWithStatistics(oldPartition.get(), partitionName, oldPartitionStatistics)));
}