下面列出了怎么用org.apache.hadoop.fs.FSError的API类实例代码及写法,或者点击链接到github查看源代码。
/**
* Canonicalizes a path if supported by the filesystem
*
* @param fs the filesystem to use
* @param path the path to canonicalize
* @return the canonicalized path, or the same path if not supported by the filesystem.
* @throws IOException
*/
public static Path canonicalizePath(org.apache.hadoop.fs.FileSystem fs, Path path) throws IOException {
try {
if (fs instanceof PathCanonicalizer) {
final org.apache.hadoop.fs.Path hadoopPath = toHadoopPath(path);
final org.apache.hadoop.fs.Path result = ((PathCanonicalizer) fs).canonicalizePath(hadoopPath);
if (hadoopPath == result) {
return path;
}
return fromHadoopPath(result);
}
return path;
} catch (FSError e) {
throw propagateFSError(e);
}
}
@Override
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress) throws IOException {
try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
return newFSDataOutputStreamWrapper(underlyingFs.createNonRecursive(f, permission, overwrite, bufferSize, replication,
blockSize, progress));
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public int read(ByteBuffer dst) throws IOException {
try {
return underlyingIs.read(dst);
} catch(FSError e) {
throw DremioHadoopFileSystemWrapper.propagateFSError(e);
}
}
@Override
public void write(int b) throws IOException {
try {
underlyingOS.write(b);
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public int available() throws IOException {
try {
return underlyingIs.available();
} catch(FSError e) {
throw DremioHadoopFileSystemWrapper.propagateFSError(e);
}
}
@Override
public void hflush() throws IOException {
try {
underlyingOS.hflush();
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public void setPermission(Path p, Set<PosixFilePermission> permissions) throws IOException {
try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
underlyingFs.setPermission(toHadoopPath(p), toFsPermission(permissions));
} catch (FSError e) {
throw propagateFSError(e);
}
}
@Override
@Deprecated
public boolean delete(Path f) throws IOException {
try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
return underlyingFs.delete(f);
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public void flush() throws IOException {
try {
underlyingOS.flush();
} catch(FSError e) {
propagateFSError(e);
}
}
@Override
public boolean createNewFile(Path f) throws IOException {
try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
return underlyingFs.createNewFile(f);
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public long skip(long n) throws IOException {
try {
return is.skip(n);
} catch(FSError e) {
throw HadoopFileSystemWrapper.propagateFSError(e);
}
}
@Override
public DirectoryStream<FileAttributes> glob(Path pattern, Predicate<Path> filter)
throws FileNotFoundException, IOException {
try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
return new ArrayDirectoryStream(underlyingFs.globStatus(toHadoopPath(pattern), toPathFilter(filter)));
} catch (FSError e) {
throw propagateFSError(e);
}
}
@Override
public void write(byte[] b) throws IOException {
try {
underlyingOS.write(b);
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public boolean isDirectory(Path f) throws IOException {
final org.apache.hadoop.fs.Path p = toHadoopPath(f);
boolean exists = false;
try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
exists = underlyingFs.isDirectory(p);
if (!exists && isNAS) {
forceRefresh(f);
exists = underlyingFs.isDirectory(p);
}
} catch(FSError e) {
throw propagateFSError(e);
}
return exists;
}
@Override
public boolean isDirectory(Path f) throws IOException {
final org.apache.hadoop.fs.Path p = toHadoopPath(f);
boolean exists = false;
try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
exists = underlyingFs.isDirectory(p);
if (!exists && isNAS) {
forceRefresh(f);
exists = underlyingFs.isDirectory(p);
}
} catch (FSError e) {
throw propagateFSError(e);
}
return exists;
}
@Override
public long skip(long n) throws IOException {
try {
return underlyingIs.skip(n);
} catch(FSError e) {
throw DremioHadoopFileSystemWrapper.propagateFSError(e);
}
}
@Override
public void reset() throws IOException {
try {
underlyingIs.reset();
} catch(FSError e) {
throw DremioHadoopFileSystemWrapper.propagateFSError(e);
}
}
@Override
public int available() throws IOException {
try{
return is.available();
} catch(FSError e) {
throw HadoopFileSystemWrapper.propagateFSError(e);
}
}
/**
* Most of the read are going to be block reads which use {@link #read(byte[], int,
* int)}. So not adding stats for single byte reads.
*/
@Override
public int read() throws IOException {
try {
return is.read();
} catch(FSError e) {
throw HadoopFileSystemWrapper.propagateFSError(e);
}
}
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path[] srcs, Path dst) throws IOException {
try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
underlyingFs.copyFromLocalFile(delSrc, overwrite, srcs, dst);
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public void setDropBehind(Boolean dropBehind) throws IOException {
try {
underlyingOS.setDropBehind(dropBehind);
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public FSOutputStream create(Path f) throws IOException {
try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
return newFSDataOutputStreamWrapper(underlyingFs.create(toHadoopPath(f)), f.toString());
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public void setDropBehind(Boolean dropBehind) throws IOException {
try {
underlyingOS.setDropBehind(dropBehind);
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
try {
return underlyingIs.read(b, off, len);
} catch(FSError e) {
throw HadoopFileSystem.propagateFSError(e);
}
}
@Override
public int read(ByteBuffer dst) throws IOException {
try {
return underlyingIs.read(dst);
} catch(FSError e) {
throw HadoopFileSystem.propagateFSError(e);
}
}
@Override
public byte[] getXAttr(final Path path, final String name) throws IOException {
try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
return underlyingFs.getXAttr(path, name);
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public void write(byte[] b) throws IOException {
try {
underlyingOS.write(b);
} catch(FSError e) {
throw propagateFSError(e);
}
}
@Override
public void setDropBehind(Boolean dropBehind) throws IOException, UnsupportedOperationException {
try {
underlyingIs.setDropBehind(dropBehind);
} catch(FSError e) {
throw HadoopFileSystemWrapper.propagateFSError(e);
}
}
@Override
public int available() throws IOException {
try {
return underlyingIs.available();
} catch(FSError e) {
throw HadoopFileSystem.propagateFSError(e);
}
}
@Override
public void flush() throws IOException {
try {
underlyingOS.flush();
} catch(FSError e) {
propagateFSError(e);
}
}