org.apache.hadoop.fs.Path#getName ( )源码实例Demo

下面列出了org.apache.hadoop.fs.Path#getName ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: big-c   文件: FSDownload.java
private Path copy(Path sCopy, Path dstdir) throws IOException {
  FileSystem sourceFs = sCopy.getFileSystem(conf);
  Path dCopy = new Path(dstdir, "tmp_"+sCopy.getName());
  FileStatus sStat = sourceFs.getFileStatus(sCopy);
  if (sStat.getModificationTime() != resource.getTimestamp()) {
    throw new IOException("Resource " + sCopy +
        " changed on src filesystem (expected " + resource.getTimestamp() +
        ", was " + sStat.getModificationTime());
  }
  if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
    if (!isPublic(sourceFs, sCopy, sStat, statCache)) {
      throw new IOException("Resource " + sCopy +
          " is not publicly accessable and as such cannot be part of the" +
          " public cache.");
    }
  }

  FileUtil.copy(sourceFs, sStat, FileSystem.getLocal(conf), dCopy, false,
      true, conf);
  return dCopy;
}
 
protected void copyLocal(FileSystem fileSystem, FileStatus fileStatus, File destDir) throws IOException {
  Path path = fileStatus.getPath();
  File file = new File(destDir, path.getName());
  if (fileStatus.isDirectory()) {
    if (!file.mkdirs()) {
      LOG.error("Error while trying to create a sub directory [{0}].", file.getAbsolutePath());
      throw new IOException("Error while trying to create a sub directory [" + file.getAbsolutePath() + "].");
    }
    FileStatus[] listStatus = fileSystem.listStatus(path);
    for (FileStatus fs : listStatus) {
      copyLocal(fileSystem, fs, file);
    }
  } else {
    FileOutputStream output = new FileOutputStream(file);
    FSDataInputStream inputStream = fileSystem.open(path);
    IOUtils.copy(inputStream, output);
    inputStream.close();
    output.close();
  }
}
 
源代码3 项目: hadoopoffice   文件: ExcelFileOutputFormat.java
@Override
public RecordWriter<NullWritable,SpreadSheetCellDAO> getRecordWriter(TaskAttemptContext context) throws IOException {
	// check if mimeType is set. If not assume new Excel format (.xlsx)
	Configuration conf=context.getConfiguration();
	String defaultConf=conf.get(HadoopOfficeWriteConfiguration.CONF_MIMETYPE,ExcelFileOutputFormat.DEFAULT_MIMETYPE);
	conf.set(HadoopOfficeWriteConfiguration.CONF_MIMETYPE,defaultConf);
	// add suffix	
	Path file = getDefaultWorkFile(context,ExcelFileOutputFormat.getSuffix(conf.get(HadoopOfficeWriteConfiguration.CONF_MIMETYPE)));


	 	try {
			return new ExcelRecordWriter<>(HadoopUtil.getDataOutputStream(conf,file,context,getCompressOutput(context),getOutputCompressorClass(context, ExcelFileOutputFormat.defaultCompressorClass)),file.getName(),conf);
		} catch (InvalidWriterConfigurationException | InvalidCellSpecificationException | FormatNotUnderstoodException
				| GeneralSecurityException | OfficeWriterException e) {
			LOG.error(e);
		}

	return null;
}
 
源代码4 项目: iceberg   文件: Writer.java
private static String pathContext(Path dataPath) {
  Path parent = dataPath.getParent();
  if (parent != null) {
    // remove the data folder
    if (dataPath.getName().equals("data")) {
      return pathContext(parent);
    }

    return parent.getName() + "/" + dataPath.getName();
  }

  return dataPath.getName();
}
 
/**
 * Acquire the lock.
 *
 * @throws JobLockException thrown if the {@link JobLock} fails to be acquired
 */
void lock(Path lockFile) throws JobLockException {
  log.debug("Creating lock: {}", lockFile);
  try {
    if (!this.fs.createNewFile(lockFile)) {
      throw new JobLockException("Failed to create lock file " + lockFile.getName());
    }
  } catch (IOException e) {
    throw new JobLockException(e);
  }
}
 
源代码6 项目: attic-apex-malhar   文件: FileSplitterInput.java
protected ScannedFileInfo createScannedFileInfo(Path parentPath, FileStatus parentStatus, Path childPath,
    FileStatus childStatus, Path rootPath)
{
  ScannedFileInfo info;
  if (rootPath == null) {
    info = parentStatus.isDirectory() ?
      new ScannedFileInfo(parentPath.toUri().getPath(), childPath.getName(), childStatus.getModificationTime()) :
      new ScannedFileInfo(null, childPath.toUri().getPath(), childStatus.getModificationTime());
  } else {
    URI relativeChildURI = rootPath.toUri().relativize(childPath.toUri());
    info = new ScannedFileInfo(rootPath.toUri().getPath(), relativeChildURI.getPath(),
      childStatus.getModificationTime());
  }
  return info;
}
 
源代码7 项目: streamx   文件: TopicCommittedFileFilter.java
@Override
public boolean accept(Path path) {
  if (!super.accept(path)) {
    return false;
  }
  String filename = path.getName();
  Matcher m = HdfsSinkConnectorConstants.COMMITTED_FILENAME_PATTERN.matcher(filename);
  // NB: if statement has side effect of enabling group() call
  if (!m.matches()) {
    throw new AssertionError("match expected because of CommittedFileFilter");
  }
  String topic = m.group(HdfsSinkConnectorConstants.PATTERN_TOPIC_GROUP);
  return topic.equals(this.topic);
}
 
源代码8 项目: localization_nifi   文件: GetHDFS.java
protected PathFilter getPathFilter(final Path dir) {
    return new PathFilter() {

        @Override
        public boolean accept(Path path) {
            if (ignoreDottedFiles && path.getName().startsWith(".")) {
                return false;
            }
            final String pathToCompare;
            if (filterMatchBasenameOnly) {
                pathToCompare = path.getName();
            } else {
                // figure out portion of path that does not include the provided root dir.
                String relativePath = getPathDifference(dir, path);
                if (relativePath.length() == 0) {
                    pathToCompare = path.getName();
                } else {
                    pathToCompare = relativePath + Path.SEPARATOR + path.getName();
                }
            }

            if (fileFilterPattern != null && !fileFilterPattern.matcher(pathToCompare).matches()) {
                return false;
            }
            return true;
        }

    };
}
 
源代码9 项目: halvade   文件: HalvadeReducer.java
protected String checkBinaries(Reducer.Context context) throws IOException {
    Logger.DEBUG("Checking for binaries...");
    String binDir = null;
    URI[] localPaths = context.getCacheArchives();
    for(int i = 0; i < localPaths.length; i++ ) {
        Path path = new Path(localPaths[i].getPath());
        if(path.getName().endsWith("bin.tar.gz")) {
            binDir = "./" + path.getName() + "/bin/";
        }
    }
    printDirectoryTree(new File(binDir), 0);
    return binDir;
}
 
源代码10 项目: storm-hdfs   文件: MoveFileAction.java
@Override
public void execute(FileSystem fileSystem, Path filePath) throws IOException {
    Path destPath = new Path(destination, filePath.getName());
    LOG.info("Moving file {} to {}", filePath, destPath);
    boolean success = fileSystem.rename(filePath, destPath);
    return;
}
 
源代码11 项目: hbase   文件: LongTermArchivingHFileCleaner.java
@Override
public boolean isFileDeletable(FileStatus fStat) {
  try {
    // if its a directory, then it can be deleted
    if (fStat.isDirectory()) {
      return true;
    }
    
    Path file = fStat.getPath();
    // check to see if
    FileStatus[] deleteStatus = CommonFSUtils.listStatus(this.fs, file, null);
    // if the file doesn't exist, then it can be deleted (but should never
    // happen since deleted files shouldn't get passed in)
    if (deleteStatus == null) {
      return true;
    }

    // otherwise, we need to check the file's table and see its being archived
    Path family = file.getParent();
    Path region = family.getParent();
    Path table = region.getParent();

    String tableName = table.getName();
    boolean ret = !archiveTracker.keepHFiles(tableName);
    LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" +
        tableName);
    return ret;
  } catch (IOException e) {
    LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
    return false;
  }
}
 
源代码12 项目: attic-apex-core   文件: StramAppLauncher.java
public StramAppLauncher(FileSystem fs, Path path, Configuration conf) throws Exception
{
  File jarsDir = new File(StramClientUtils.getUserDTDirectory(), "jars");
  jarsDir.mkdirs();
  File localJarFile = new File(jarsDir, path.getName());
  this.fs = fs;
  fs.copyToLocalFile(path, new Path(localJarFile.getAbsolutePath()));
  this.jarFile = localJarFile;
  this.conf = conf;
  this.propertiesBuilder = new LogicalPlanConfiguration(conf);
  init(this.jarFile.getName());
}
 
源代码13 项目: jstorm   文件: MoveFileAction.java
@Override
public void execute(FileSystem fileSystem, Path filePath) throws IOException {
    Path destPath = new Path(destination, filePath.getName());
    LOG.info("Moving file {} to {}", filePath, destPath);
    boolean success = fileSystem.rename(filePath, destPath);
    return;
}
 
源代码14 项目: stocator   文件: COSAPIClient.java
/**
 * Merge between two paths
 *
 * @param hostName
 * @param p path
 * @param objectKey
 * @return merged path
 */
private String getMergedPath(String hostName, Path p, String objectKey) {
  if ((p.getParent() != null) && (p.getName() != null)
      && (p.getParent().toString().equals(hostName))) {
    if (objectKey.equals(p.getName())) {
      return p.toString();
    }
    return hostName + objectKey;
  }
  return hostName + objectKey;
}
 
源代码15 项目: incubator-retired-blur   文件: IndexerJobDriver.java
private List<Path> movePathList(FileSystem fileSystem, Path dstDir, List<Path> lst) throws IOException {
  List<Path> result = new ArrayList<Path>();
  for (Path src : lst) {
    Path dst = new Path(dstDir, src.getName());
    if (fileSystem.rename(src, dst)) {
      LOG.info("Moving [{0}] to [{1}]", src, dst);
      result.add(dst);
    } else {
      LOG.error("Could not move [{0}] to [{1}]", src, dst);
    }
  }
  return result;
}
 
源代码16 项目: incubator-gobblin   文件: GoogleDriveFileSystem.java
/**
 * org.apache.hadoop.fs.Path assumes that there separator in file system naming and "/" is the separator.
 * When org.apache.hadoop.fs.Path sees "/" in path String, it splits into parent and name. As fileID is a random
 * String determined by Google and it can contain "/" itself, this method check if parent and name is separated and
 * restore "/" back to file ID.
 *
 * @param p
 * @return
 */
public static String toFileId(Path p) {
  if (p.isRoot()) {
    return "";
  }
  final String format = "%s" + Path.SEPARATOR + "%s";
  if (p.getParent() != null && StringUtils.isEmpty(p.getParent().getName())) {
    return p.getName();
  }
  return String.format(format, toFileId(p.getParent()), p.getName());
}
 
源代码17 项目: big-c   文件: TestEncryptionZones.java
/**
 * Test correctness of successive snapshot creation and deletion
 * on a system with encryption zones.
 */
@Test(timeout = 60000)
public void testSnapshotsOnEncryptionZones() throws Exception {
  final String TEST_KEY2 = "testkey2";
  DFSTestUtil.createKey(TEST_KEY2, cluster, conf);

  final int len = 8196;
  final Path zoneParent = new Path("/zones");
  final Path zone = new Path(zoneParent, "zone");
  final Path zoneFile = new Path(zone, "zoneFile");
  fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
  dfsAdmin.allowSnapshot(zoneParent);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);
  DFSTestUtil.createFile(fs, zoneFile, len, (short) 1, 0xFEED);
  String contents = DFSTestUtil.readFile(fs, zoneFile);
  final Path snap1 = fs.createSnapshot(zoneParent, "snap1");
  final Path snap1Zone = new Path(snap1, zone.getName());
  assertEquals("Got unexpected ez path", zone.toString(),
      dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());

  // Now delete the encryption zone, recreate the dir, and take another
  // snapshot
  fsWrapper.delete(zone, true);
  fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
  final Path snap2 = fs.createSnapshot(zoneParent, "snap2");
  final Path snap2Zone = new Path(snap2, zone.getName());
  assertNull("Expected null ez path",
      dfsAdmin.getEncryptionZoneForPath(snap2Zone));

  // Create the encryption zone again
  dfsAdmin.createEncryptionZone(zone, TEST_KEY2);
  final Path snap3 = fs.createSnapshot(zoneParent, "snap3");
  final Path snap3Zone = new Path(snap3, zone.getName());
  // Check that snap3's EZ has the correct settings
  EncryptionZone ezSnap3 = dfsAdmin.getEncryptionZoneForPath(snap3Zone);
  assertEquals("Got unexpected ez path", zone.toString(),
      ezSnap3.getPath().toString());
  assertEquals("Unexpected ez key", TEST_KEY2, ezSnap3.getKeyName());
  // Check that older snapshots still have the old EZ settings
  EncryptionZone ezSnap1 = dfsAdmin.getEncryptionZoneForPath(snap1Zone);
  assertEquals("Got unexpected ez path", zone.toString(),
      ezSnap1.getPath().toString());
  assertEquals("Unexpected ez key", TEST_KEY, ezSnap1.getKeyName());

  // Check that listEZs only shows the current filesystem state
  ArrayList<EncryptionZone> listZones = Lists.newArrayList();
  RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
  while (it.hasNext()) {
    listZones.add(it.next());
  }
  for (EncryptionZone z: listZones) {
    System.out.println(z);
  }
  assertEquals("Did not expect additional encryption zones!", 1,
      listZones.size());
  EncryptionZone listZone = listZones.get(0);
  assertEquals("Got unexpected ez path", zone.toString(),
      listZone.getPath().toString());
  assertEquals("Unexpected ez key", TEST_KEY2, listZone.getKeyName());

  // Verify contents of the snapshotted file
  final Path snapshottedZoneFile = new Path(
      snap1.toString() + "/" + zone.getName() + "/" + zoneFile.getName());
  assertEquals("Contents of snapshotted file have changed unexpectedly",
      contents, DFSTestUtil.readFile(fs, snapshottedZoneFile));

  // Now delete the snapshots out of order and verify the zones are still
  // correct
  fs.deleteSnapshot(zoneParent, snap2.getName());
  assertEquals("Got unexpected ez path", zone.toString(),
      dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
  assertEquals("Got unexpected ez path", zone.toString(),
      dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
  fs.deleteSnapshot(zoneParent, snap1.getName());
  assertEquals("Got unexpected ez path", zone.toString(),
      dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
}
 
源代码18 项目: sqoop-on-spark   文件: HdfsUtils.java
@Override
public boolean accept(Path path) {
  String fileName = path.getName();
  return !fileName.startsWith("_") && !fileName.startsWith(".");
}
 
源代码19 项目: Halyard   文件: HalyardBulkExport.java
private String addTmpFile(String file) throws IOException {
    String tmpFiles = getConf().get("tmpfiles");
    Path path = new Path(new File(file).toURI());
    getConf().set("tmpfiles", tmpFiles == null ? path.toString() : tmpFiles + "," + path.toString());
    return path.getName();
}
 
源代码20 项目: kite   文件: DurableParquetAppender.java
private static Path avroPath(Path path) {
  return new Path(path.getParent(), path.getName() + ".avro");
}