下面列出了org.apache.hadoop.fs.Path#getName ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private Path copy(Path sCopy, Path dstdir) throws IOException {
FileSystem sourceFs = sCopy.getFileSystem(conf);
Path dCopy = new Path(dstdir, "tmp_"+sCopy.getName());
FileStatus sStat = sourceFs.getFileStatus(sCopy);
if (sStat.getModificationTime() != resource.getTimestamp()) {
throw new IOException("Resource " + sCopy +
" changed on src filesystem (expected " + resource.getTimestamp() +
", was " + sStat.getModificationTime());
}
if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
if (!isPublic(sourceFs, sCopy, sStat, statCache)) {
throw new IOException("Resource " + sCopy +
" is not publicly accessable and as such cannot be part of the" +
" public cache.");
}
}
FileUtil.copy(sourceFs, sStat, FileSystem.getLocal(conf), dCopy, false,
true, conf);
return dCopy;
}
protected void copyLocal(FileSystem fileSystem, FileStatus fileStatus, File destDir) throws IOException {
Path path = fileStatus.getPath();
File file = new File(destDir, path.getName());
if (fileStatus.isDirectory()) {
if (!file.mkdirs()) {
LOG.error("Error while trying to create a sub directory [{0}].", file.getAbsolutePath());
throw new IOException("Error while trying to create a sub directory [" + file.getAbsolutePath() + "].");
}
FileStatus[] listStatus = fileSystem.listStatus(path);
for (FileStatus fs : listStatus) {
copyLocal(fileSystem, fs, file);
}
} else {
FileOutputStream output = new FileOutputStream(file);
FSDataInputStream inputStream = fileSystem.open(path);
IOUtils.copy(inputStream, output);
inputStream.close();
output.close();
}
}
@Override
public RecordWriter<NullWritable,SpreadSheetCellDAO> getRecordWriter(TaskAttemptContext context) throws IOException {
// check if mimeType is set. If not assume new Excel format (.xlsx)
Configuration conf=context.getConfiguration();
String defaultConf=conf.get(HadoopOfficeWriteConfiguration.CONF_MIMETYPE,ExcelFileOutputFormat.DEFAULT_MIMETYPE);
conf.set(HadoopOfficeWriteConfiguration.CONF_MIMETYPE,defaultConf);
// add suffix
Path file = getDefaultWorkFile(context,ExcelFileOutputFormat.getSuffix(conf.get(HadoopOfficeWriteConfiguration.CONF_MIMETYPE)));
try {
return new ExcelRecordWriter<>(HadoopUtil.getDataOutputStream(conf,file,context,getCompressOutput(context),getOutputCompressorClass(context, ExcelFileOutputFormat.defaultCompressorClass)),file.getName(),conf);
} catch (InvalidWriterConfigurationException | InvalidCellSpecificationException | FormatNotUnderstoodException
| GeneralSecurityException | OfficeWriterException e) {
LOG.error(e);
}
return null;
}
private static String pathContext(Path dataPath) {
Path parent = dataPath.getParent();
if (parent != null) {
// remove the data folder
if (dataPath.getName().equals("data")) {
return pathContext(parent);
}
return parent.getName() + "/" + dataPath.getName();
}
return dataPath.getName();
}
/**
* Acquire the lock.
*
* @throws JobLockException thrown if the {@link JobLock} fails to be acquired
*/
void lock(Path lockFile) throws JobLockException {
log.debug("Creating lock: {}", lockFile);
try {
if (!this.fs.createNewFile(lockFile)) {
throw new JobLockException("Failed to create lock file " + lockFile.getName());
}
} catch (IOException e) {
throw new JobLockException(e);
}
}
protected ScannedFileInfo createScannedFileInfo(Path parentPath, FileStatus parentStatus, Path childPath,
FileStatus childStatus, Path rootPath)
{
ScannedFileInfo info;
if (rootPath == null) {
info = parentStatus.isDirectory() ?
new ScannedFileInfo(parentPath.toUri().getPath(), childPath.getName(), childStatus.getModificationTime()) :
new ScannedFileInfo(null, childPath.toUri().getPath(), childStatus.getModificationTime());
} else {
URI relativeChildURI = rootPath.toUri().relativize(childPath.toUri());
info = new ScannedFileInfo(rootPath.toUri().getPath(), relativeChildURI.getPath(),
childStatus.getModificationTime());
}
return info;
}
@Override
public boolean accept(Path path) {
if (!super.accept(path)) {
return false;
}
String filename = path.getName();
Matcher m = HdfsSinkConnectorConstants.COMMITTED_FILENAME_PATTERN.matcher(filename);
// NB: if statement has side effect of enabling group() call
if (!m.matches()) {
throw new AssertionError("match expected because of CommittedFileFilter");
}
String topic = m.group(HdfsSinkConnectorConstants.PATTERN_TOPIC_GROUP);
return topic.equals(this.topic);
}
protected PathFilter getPathFilter(final Path dir) {
return new PathFilter() {
@Override
public boolean accept(Path path) {
if (ignoreDottedFiles && path.getName().startsWith(".")) {
return false;
}
final String pathToCompare;
if (filterMatchBasenameOnly) {
pathToCompare = path.getName();
} else {
// figure out portion of path that does not include the provided root dir.
String relativePath = getPathDifference(dir, path);
if (relativePath.length() == 0) {
pathToCompare = path.getName();
} else {
pathToCompare = relativePath + Path.SEPARATOR + path.getName();
}
}
if (fileFilterPattern != null && !fileFilterPattern.matcher(pathToCompare).matches()) {
return false;
}
return true;
}
};
}
protected String checkBinaries(Reducer.Context context) throws IOException {
Logger.DEBUG("Checking for binaries...");
String binDir = null;
URI[] localPaths = context.getCacheArchives();
for(int i = 0; i < localPaths.length; i++ ) {
Path path = new Path(localPaths[i].getPath());
if(path.getName().endsWith("bin.tar.gz")) {
binDir = "./" + path.getName() + "/bin/";
}
}
printDirectoryTree(new File(binDir), 0);
return binDir;
}
@Override
public void execute(FileSystem fileSystem, Path filePath) throws IOException {
Path destPath = new Path(destination, filePath.getName());
LOG.info("Moving file {} to {}", filePath, destPath);
boolean success = fileSystem.rename(filePath, destPath);
return;
}
@Override
public boolean isFileDeletable(FileStatus fStat) {
try {
// if its a directory, then it can be deleted
if (fStat.isDirectory()) {
return true;
}
Path file = fStat.getPath();
// check to see if
FileStatus[] deleteStatus = CommonFSUtils.listStatus(this.fs, file, null);
// if the file doesn't exist, then it can be deleted (but should never
// happen since deleted files shouldn't get passed in)
if (deleteStatus == null) {
return true;
}
// otherwise, we need to check the file's table and see its being archived
Path family = file.getParent();
Path region = family.getParent();
Path table = region.getParent();
String tableName = table.getName();
boolean ret = !archiveTracker.keepHFiles(tableName);
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" +
tableName);
return ret;
} catch (IOException e) {
LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
return false;
}
}
public StramAppLauncher(FileSystem fs, Path path, Configuration conf) throws Exception
{
File jarsDir = new File(StramClientUtils.getUserDTDirectory(), "jars");
jarsDir.mkdirs();
File localJarFile = new File(jarsDir, path.getName());
this.fs = fs;
fs.copyToLocalFile(path, new Path(localJarFile.getAbsolutePath()));
this.jarFile = localJarFile;
this.conf = conf;
this.propertiesBuilder = new LogicalPlanConfiguration(conf);
init(this.jarFile.getName());
}
@Override
public void execute(FileSystem fileSystem, Path filePath) throws IOException {
Path destPath = new Path(destination, filePath.getName());
LOG.info("Moving file {} to {}", filePath, destPath);
boolean success = fileSystem.rename(filePath, destPath);
return;
}
/**
* Merge between two paths
*
* @param hostName
* @param p path
* @param objectKey
* @return merged path
*/
private String getMergedPath(String hostName, Path p, String objectKey) {
if ((p.getParent() != null) && (p.getName() != null)
&& (p.getParent().toString().equals(hostName))) {
if (objectKey.equals(p.getName())) {
return p.toString();
}
return hostName + objectKey;
}
return hostName + objectKey;
}
private List<Path> movePathList(FileSystem fileSystem, Path dstDir, List<Path> lst) throws IOException {
List<Path> result = new ArrayList<Path>();
for (Path src : lst) {
Path dst = new Path(dstDir, src.getName());
if (fileSystem.rename(src, dst)) {
LOG.info("Moving [{0}] to [{1}]", src, dst);
result.add(dst);
} else {
LOG.error("Could not move [{0}] to [{1}]", src, dst);
}
}
return result;
}
/**
* org.apache.hadoop.fs.Path assumes that there separator in file system naming and "/" is the separator.
* When org.apache.hadoop.fs.Path sees "/" in path String, it splits into parent and name. As fileID is a random
* String determined by Google and it can contain "/" itself, this method check if parent and name is separated and
* restore "/" back to file ID.
*
* @param p
* @return
*/
public static String toFileId(Path p) {
if (p.isRoot()) {
return "";
}
final String format = "%s" + Path.SEPARATOR + "%s";
if (p.getParent() != null && StringUtils.isEmpty(p.getParent().getName())) {
return p.getName();
}
return String.format(format, toFileId(p.getParent()), p.getName());
}
/**
* Test correctness of successive snapshot creation and deletion
* on a system with encryption zones.
*/
@Test(timeout = 60000)
public void testSnapshotsOnEncryptionZones() throws Exception {
final String TEST_KEY2 = "testkey2";
DFSTestUtil.createKey(TEST_KEY2, cluster, conf);
final int len = 8196;
final Path zoneParent = new Path("/zones");
final Path zone = new Path(zoneParent, "zone");
final Path zoneFile = new Path(zone, "zoneFile");
fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
dfsAdmin.allowSnapshot(zoneParent);
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
DFSTestUtil.createFile(fs, zoneFile, len, (short) 1, 0xFEED);
String contents = DFSTestUtil.readFile(fs, zoneFile);
final Path snap1 = fs.createSnapshot(zoneParent, "snap1");
final Path snap1Zone = new Path(snap1, zone.getName());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
// Now delete the encryption zone, recreate the dir, and take another
// snapshot
fsWrapper.delete(zone, true);
fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
final Path snap2 = fs.createSnapshot(zoneParent, "snap2");
final Path snap2Zone = new Path(snap2, zone.getName());
assertNull("Expected null ez path",
dfsAdmin.getEncryptionZoneForPath(snap2Zone));
// Create the encryption zone again
dfsAdmin.createEncryptionZone(zone, TEST_KEY2);
final Path snap3 = fs.createSnapshot(zoneParent, "snap3");
final Path snap3Zone = new Path(snap3, zone.getName());
// Check that snap3's EZ has the correct settings
EncryptionZone ezSnap3 = dfsAdmin.getEncryptionZoneForPath(snap3Zone);
assertEquals("Got unexpected ez path", zone.toString(),
ezSnap3.getPath().toString());
assertEquals("Unexpected ez key", TEST_KEY2, ezSnap3.getKeyName());
// Check that older snapshots still have the old EZ settings
EncryptionZone ezSnap1 = dfsAdmin.getEncryptionZoneForPath(snap1Zone);
assertEquals("Got unexpected ez path", zone.toString(),
ezSnap1.getPath().toString());
assertEquals("Unexpected ez key", TEST_KEY, ezSnap1.getKeyName());
// Check that listEZs only shows the current filesystem state
ArrayList<EncryptionZone> listZones = Lists.newArrayList();
RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
while (it.hasNext()) {
listZones.add(it.next());
}
for (EncryptionZone z: listZones) {
System.out.println(z);
}
assertEquals("Did not expect additional encryption zones!", 1,
listZones.size());
EncryptionZone listZone = listZones.get(0);
assertEquals("Got unexpected ez path", zone.toString(),
listZone.getPath().toString());
assertEquals("Unexpected ez key", TEST_KEY2, listZone.getKeyName());
// Verify contents of the snapshotted file
final Path snapshottedZoneFile = new Path(
snap1.toString() + "/" + zone.getName() + "/" + zoneFile.getName());
assertEquals("Contents of snapshotted file have changed unexpectedly",
contents, DFSTestUtil.readFile(fs, snapshottedZoneFile));
// Now delete the snapshots out of order and verify the zones are still
// correct
fs.deleteSnapshot(zoneParent, snap2.getName());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
fs.deleteSnapshot(zoneParent, snap1.getName());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
}
@Override
public boolean accept(Path path) {
String fileName = path.getName();
return !fileName.startsWith("_") && !fileName.startsWith(".");
}
private String addTmpFile(String file) throws IOException {
String tmpFiles = getConf().get("tmpfiles");
Path path = new Path(new File(file).toURI());
getConf().set("tmpfiles", tmpFiles == null ? path.toString() : tmpFiles + "," + path.toString());
return path.getName();
}
private static Path avroPath(Path path) {
return new Path(path.getParent(), path.getName() + ".avro");
}