org.apache.hadoop.fs.permission.FsPermission#getFileDefault ( )源码实例Demo

下面列出了org.apache.hadoop.fs.permission.FsPermission#getFileDefault ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: incubator-crail   文件: CrailHadoopFileSystem.java
@Override
public FileStatus[] listStatus(Path path) throws FileNotFoundException, IOException {
	try {
		CrailNode node = dfs.lookup(path.toUri().getRawPath()).get();
		Iterator<String> iter = node.asContainer().listEntries();
		ArrayList<FileStatus> statusList = new ArrayList<FileStatus>();
		while(iter.hasNext()){
			String filepath = iter.next();
			CrailNode directFile = dfs.lookup(filepath).get();
			if (directFile != null){
				FsPermission permission = FsPermission.getFileDefault();
				if (directFile.getType().isDirectory()) {
					permission = FsPermission.getDirDefault();
				}
				FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, new Path(filepath).makeQualified(this.getUri(), this.workingDir));	
				statusList.add(status);
			}
		}
		FileStatus[] list = new FileStatus[statusList.size()];
		statusList.toArray(list);
		return list;
	} catch(Exception e){
		throw new FileNotFoundException(path.toUri().getRawPath());
	}
}
 
源代码2 项目: incubator-crail   文件: CrailHadoopFileSystem.java
@Override
public FileStatus getFileStatus(Path path) throws IOException {
	statistics.incrementReadOps(1);
	CrailNode directFile = null;
	try {
		directFile = dfs.lookup(path.toUri().getRawPath()).get();
	} catch (Exception e) {
		throw new IOException(e);
	}
	if (directFile == null) {
		throw new FileNotFoundException("File does not exist: " + path);
	}
	FsPermission permission = FsPermission.getFileDefault();
	if (directFile.getType().isDirectory()) {
		permission = FsPermission.getDirDefault();
	}
	FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, path.makeQualified(this.getUri(), this.workingDir));
	return status;
}
 
源代码3 项目: hadoop   文件: HdfsFileStatus.java
/**
 * Constructor
 * @param length the number of bytes the file has
 * @param isdir if the path is a directory
 * @param block_replication the replication factor
 * @param blocksize the block size
 * @param modification_time modification time
 * @param access_time access time
 * @param permission permission
 * @param owner the owner of the path
 * @param group the group of the path
 * @param path the local name in java UTF8 encoding the same as that in-memory
 * @param fileId the file id
 * @param feInfo the file's encryption info
 */
public HdfsFileStatus(long length, boolean isdir, int block_replication,
    long blocksize, long modification_time, long access_time,
    FsPermission permission, String owner, String group, byte[] symlink,
    byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
    byte storagePolicy) {
  this.length = length;
  this.isdir = isdir;
  this.block_replication = (short)block_replication;
  this.blocksize = blocksize;
  this.modification_time = modification_time;
  this.access_time = access_time;
  this.permission = (permission == null) ? 
      ((isdir || symlink!=null) ? 
          FsPermission.getDefault() : 
          FsPermission.getFileDefault()) :
      permission;
  this.owner = (owner == null) ? "" : owner;
  this.group = (group == null) ? "" : group;
  this.symlink = symlink;
  this.path = path;
  this.fileId = fileId;
  this.childrenNum = childrenNum;
  this.feInfo = feInfo;
  this.storagePolicy = storagePolicy;
}
 
源代码4 项目: hadoop   文件: DFSClient.java
/**
 * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
 * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
 * a hint to where the namenode should place the file blocks.
 * The favored nodes hint is not persisted in HDFS. Hence it may be honored
 * at the creation time only. HDFS could move the blocks during balancing or
 * replication, to move the blocks from favored nodes. A value of null means
 * no favored nodes for this create
 */
public DFSOutputStream create(String src, 
                           FsPermission permission,
                           EnumSet<CreateFlag> flag, 
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt,
                           InetSocketAddress[] favoredNodes) throws IOException {
  checkOpen();
  if (permission == null) {
    permission = FsPermission.getFileDefault();
  }
  FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + masked);
  }
  final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
      src, masked, flag, createParent, replication, blockSize, progress,
      buffersize, dfsClientConf.createChecksum(checksumOpt),
      getFavoredNodesStr(favoredNodes));
  beginFileLease(result.getFileId(), result);
  return result;
}
 
源代码5 项目: big-c   文件: HdfsFileStatus.java
/**
 * Constructor
 * @param length the number of bytes the file has
 * @param isdir if the path is a directory
 * @param block_replication the replication factor
 * @param blocksize the block size
 * @param modification_time modification time
 * @param access_time access time
 * @param permission permission
 * @param owner the owner of the path
 * @param group the group of the path
 * @param path the local name in java UTF8 encoding the same as that in-memory
 * @param fileId the file id
 * @param feInfo the file's encryption info
 */
public HdfsFileStatus(long length, boolean isdir, int block_replication,
    long blocksize, long modification_time, long access_time,
    FsPermission permission, String owner, String group, byte[] symlink,
    byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
    byte storagePolicy) {
  this.length = length;
  this.isdir = isdir;
  this.block_replication = (short)block_replication;
  this.blocksize = blocksize;
  this.modification_time = modification_time;
  this.access_time = access_time;
  this.permission = (permission == null) ? 
      ((isdir || symlink!=null) ? 
          FsPermission.getDefault() : 
          FsPermission.getFileDefault()) :
      permission;
  this.owner = (owner == null) ? "" : owner;
  this.group = (group == null) ? "" : group;
  this.symlink = symlink;
  this.path = path;
  this.fileId = fileId;
  this.childrenNum = childrenNum;
  this.feInfo = feInfo;
  this.storagePolicy = storagePolicy;
}
 
源代码6 项目: big-c   文件: DFSClient.java
/**
 * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
 * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
 * a hint to where the namenode should place the file blocks.
 * The favored nodes hint is not persisted in HDFS. Hence it may be honored
 * at the creation time only. HDFS could move the blocks during balancing or
 * replication, to move the blocks from favored nodes. A value of null means
 * no favored nodes for this create
 */
public DFSOutputStream create(String src, 
                           FsPermission permission,
                           EnumSet<CreateFlag> flag, 
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt,
                           InetSocketAddress[] favoredNodes) throws IOException {
  checkOpen();
  if (permission == null) {
    permission = FsPermission.getFileDefault();
  }
  FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + masked);
  }
  final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
      src, masked, flag, createParent, replication, blockSize, progress,
      buffersize, dfsClientConf.createChecksum(checksumOpt),
      getFavoredNodesStr(favoredNodes));
  beginFileLease(result.getFileId(), result);
  return result;
}
 
源代码7 项目: crail   文件: CrailHDFS.java
@Override
public FileStatus getFileStatus(Path path) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
	CrailNode directFile = null;
	try {
		directFile = dfs.lookup(path.toUri().getRawPath()).get();
	} catch(Exception e){
		throw new IOException(e);
	}
	if (directFile == null){
		throw new FileNotFoundException("filename " + path);
	}
	
	FsPermission permission = FsPermission.getFileDefault();
	if (directFile.getType().isDirectory()) {
		permission = FsPermission.getDirDefault();
	}		
	FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, path.makeQualified(this.getUri(), this.workingDir));
	return status;
}
 
源代码8 项目: crail   文件: CrailHDFS.java
@Override
public FileStatus[] listStatus(Path path) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
	try {
		CrailNode node = dfs.lookup(path.toUri().getRawPath()).get();
		Iterator<String> iter = node.asContainer().listEntries();
		ArrayList<FileStatus> statusList = new ArrayList<FileStatus>();
		while(iter.hasNext()){
			String filepath = iter.next();
			CrailNode directFile = dfs.lookup(filepath).get();
			if (directFile != null){
				FsPermission permission = FsPermission.getFileDefault();
				if (directFile.getType().isDirectory()) {
					permission = FsPermission.getDirDefault();
				}
				FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, new Path(filepath).makeQualified(this.getUri(), workingDir));	
				statusList.add(status);
			}
		}
		FileStatus[] list = new FileStatus[statusList.size()];
		statusList.toArray(list);
		return list;
	} catch(Exception e){
		throw new FileNotFoundException(path.toUri().getRawPath());
	}
}
 
源代码9 项目: crail   文件: CrailHadoopFileSystem.java
@Override
public FileStatus[] listStatus(Path path) throws FileNotFoundException, IOException {
	try {
		CrailNode node = dfs.lookup(path.toUri().getRawPath()).get();
		Iterator<String> iter = node.asContainer().listEntries();
		ArrayList<FileStatus> statusList = new ArrayList<FileStatus>();
		while(iter.hasNext()){
			String filepath = iter.next();
			CrailNode directFile = dfs.lookup(filepath).get();
			if (directFile != null){
				FsPermission permission = FsPermission.getFileDefault();
				if (directFile.getType().isDirectory()) {
					permission = FsPermission.getDirDefault();
				}
				FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, new Path(filepath).makeQualified(this.getUri(), this.workingDir));	
				statusList.add(status);
			}
		}
		FileStatus[] list = new FileStatus[statusList.size()];
		statusList.toArray(list);
		return list;
	} catch(Exception e){
		throw new FileNotFoundException(path.toUri().getRawPath());
	}
}
 
源代码10 项目: crail   文件: CrailHadoopFileSystem.java
@Override
public FileStatus getFileStatus(Path path) throws IOException {
	CrailNode directFile = null;
	try {
		directFile = dfs.lookup(path.toUri().getRawPath()).get();
	} catch (Exception e) {
		throw new IOException(e);
	}
	if (directFile == null) {
		throw new FileNotFoundException("File does not exist: " + path);
	}
	FsPermission permission = FsPermission.getFileDefault();
	if (directFile.getType().isDirectory()) {
		permission = FsPermission.getDirDefault();
	}
	FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, path.makeQualified(this.getUri(), this.workingDir));
	return status;
}
 
源代码11 项目: zeppelin   文件: FileSystemStorage.java
public void writeFile(final String content, final Path file, boolean writeTempFileFirst, Set<PosixFilePermission> permissions)
    throws IOException {
  FsPermission fsPermission;
  if (permissions == null || permissions.isEmpty()) {
    fsPermission = FsPermission.getFileDefault();
  } else {
    // FsPermission expects a 10-character string because of the leading
    // directory indicator, i.e. "drwx------". The JDK toString method returns
    // a 9-character string, so prepend a leading character.
    fsPermission = FsPermission.valueOf("-" + PosixFilePermissions.toString(permissions));
  }
  callHdfsOperation(new HdfsOperation<Void>() {
    @Override
    public Void call() throws IOException {
      InputStream in = new ByteArrayInputStream(content.getBytes(
          zConf.getString(ZeppelinConfiguration.ConfVars.ZEPPELIN_ENCODING)));
      Path tmpFile = new Path(file.toString() + ".tmp");
      IOUtils.copyBytes(in, fs.create(tmpFile), hadoopConf);
      fs.setPermission(tmpFile, fsPermission);
      fs.delete(file, true);
      fs.rename(tmpFile, file);
      return null;
    }
  });
}
 
源代码12 项目: lucene-solr   文件: RawLocalFileSystem.java
private LocalFSFileOutputStream(Path f, boolean append,
                                FsPermission permission) throws IOException {
  File file = pathToFile(f);
  if (!append && permission == null) {
    permission = FsPermission.getFileDefault();
  }
  if (permission == null) {
    this.fos = new FileOutputStream(file, append);
  } else {
    permission = permission.applyUMask(FsPermission.getUMask(getConf()));
    if (Shell.WINDOWS && NativeIO.isAvailable()) {
      this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file,
          append, permission.toShort());
    } else {
      this.fos = new FileOutputStream(file, append);
      boolean success = false;
      try {
        setPermission(f, permission);
        success = true;
      } finally {
        if (!success) {
          IOUtils.cleanup(LOG, this.fos);
        }
      }
    }
  }
}
 
源代码13 项目: hbase   文件: CommonFSUtils.java
/**
 * Get the file permissions specified in the configuration, if they are
 * enabled.
 *
 * @param fs filesystem that the file will be created on.
 * @param conf configuration to read for determining if permissions are
 *          enabled and which to use
 * @param permssionConfKey property key in the configuration to use when
 *          finding the permission
 * @return the permission to use when creating a new file on the fs. If
 *         special permissions are not specified in the configuration, then
 *         the default permissions on the the fs will be returned.
 */
public static FsPermission getFilePermissions(final FileSystem fs,
    final Configuration conf, final String permssionConfKey) {
  boolean enablePermissions = conf.getBoolean(
      HConstants.ENABLE_DATA_FILE_UMASK, false);

  if (enablePermissions) {
    try {
      FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
      // make sure that we have a mask, if not, go default.
      String mask = conf.get(permssionConfKey);
      if (mask == null) {
        return FsPermission.getFileDefault();
      }
      // appy the umask
      FsPermission umask = new FsPermission(mask);
      return perm.applyUMask(umask);
    } catch (IllegalArgumentException e) {
      LOG.warn(
          "Incorrect umask attempted to be created: "
              + conf.get(permssionConfKey)
              + ", using default file permissions.", e);
      return FsPermission.getFileDefault();
    }
  }
  return FsPermission.getFileDefault();
}
 
源代码14 项目: hadoop   文件: FileStatus.java
/**
 * Sets permission.
 * @param permission if permission is null, default value is set
 */
protected void setPermission(FsPermission permission) {
  this.permission = (permission == null) ? 
                    FsPermission.getFileDefault() : permission;
}
 
源代码15 项目: big-c   文件: FileStatus.java
/**
 * Sets permission.
 * @param permission if permission is null, default value is set
 */
protected void setPermission(FsPermission permission) {
  this.permission = (permission == null) ? 
                    FsPermission.getFileDefault() : permission;
}
 
源代码16 项目: pravega   文件: MockFileSystem.java
FileData(Path path) {
    this.path = path;
    this.permission = FsPermission.getFileDefault();
}