org.apache.hadoop.fs.permission.FsPermission#getDefault ( )源码实例Demo

下面列出了org.apache.hadoop.fs.permission.FsPermission#getDefault ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop-gpu   文件: FileStatus.java
public FileStatus(long length, boolean isdir, int block_replication,
                  long blocksize, long modification_time, long access_time,
                  FsPermission permission, String owner, String group, 
                  Path path) {
  this.length = length;
  this.isdir = isdir;
  this.block_replication = (short)block_replication;
  this.blocksize = blocksize;
  this.modification_time = modification_time;
  this.access_time = access_time;
  this.permission = (permission == null) ? 
                    FsPermission.getDefault() : permission;
  this.owner = (owner == null) ? "" : owner;
  this.group = (group == null) ? "" : group;
  this.path = path;
}
 
源代码2 项目: ignite   文件: IgniteHadoopFileSystem.java
/**
 * Convert Hadoop permission into IGFS file attribute.
 *
 * @param perm Hadoop permission.
 * @return IGFS attributes.
 */
private Map<String, String> permission(FsPermission perm) {
    if (perm == null)
        perm = FsPermission.getDefault();

    return F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm));
}
 
源代码3 项目: RDFS   文件: TestINodeFile.java
/**
 * Test for the PreferredBlockSize value. Sets a value and checks if it was
 * set correct.
 */
@Test
public void testPreferredBlockSize () {
  replication = 3;
  preferredBlockSize = 128*1024*1024;
  INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
                                FsPermission.getDefault()), null, replication,
                                0L, 0L, preferredBlockSize);
  assertEquals("True has to be returned in this case", preferredBlockSize,
         inf.getPreferredBlockSize());
}
 
源代码4 项目: hadoop   文件: FSDirSymlinkOp.java
/**
 * Add the given symbolic link to the fs. Record it in the edits log.
 */
private static INodeSymlink addSymlink(FSDirectory fsd, String path,
    INodesInPath iip, String target, PermissionStatus dirPerms,
    boolean createParent, boolean logRetryCache) throws IOException {
  final long mtime = now();
  final byte[] localName = iip.getLastLocalName();
  if (createParent) {
    Map.Entry<INodesInPath, String> e = FSDirMkdirOp
        .createAncestorDirectories(fsd, iip, dirPerms);
    if (e == null) {
      return null;
    }
    iip = INodesInPath.append(e.getKey(), null, localName);
  }
  final String userName = dirPerms.getUserName();
  long id = fsd.allocateNewInodeId();
  PermissionStatus perm = new PermissionStatus(
      userName, null, FsPermission.getDefault());
  INodeSymlink newNode = unprotectedAddSymlink(fsd, iip.getExistingINodes(),
      localName, id, target, mtime, mtime, perm);
  if (newNode == null) {
    NameNode.stateChangeLog.info("addSymlink: failed to add " + path);
    return null;
  }
  fsd.getEditLog().logSymlink(path, target, mtime, mtime, newNode,
      logRetryCache);

  if(NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("addSymlink: " + path + " is added");
  }
  return newNode;
}
 
源代码5 项目: hadoop   文件: NativeAzureFileSystem.java
private FileStatus newDirectory(FileMetadata meta, Path path) {
  return new FileStatus (
      0,
      true,
      1,
      blockSize,
      meta == null ? 0 : meta.getLastModified(),
      0,
      meta == null ? FsPermission.getDefault() : meta.getPermissionStatus().getPermission(),
      meta == null ? "" : meta.getPermissionStatus().getUserName(),
      meta == null ? "" : meta.getPermissionStatus().getGroupName(),
      path.makeQualified(getUri(), getWorkingDirectory()));
}
 
/**
 * Convert Hadoop FileStatus properties to map.
 *
 * @param status File status.
 * @return IGFS attributes.
 */
private static Map<String, String> properties(FileStatus status) {
    FsPermission perm = status.getPermission();

    if (perm == null)
        perm = FsPermission.getDefault();

    HashMap<String, String> res = new HashMap<>(3);

    res.put(IgfsUtils.PROP_PERMISSION, String.format("%04o", perm.toShort()));
    res.put(IgfsUtils.PROP_USER_NAME, status.getOwner());
    res.put(IgfsUtils.PROP_GROUP_NAME, status.getGroup());

    return res;
}
 
源代码7 项目: incubator-gobblin   文件: CopyableFileTest.java
@Test
public void testSerializeDeserialzeNulls() throws Exception {

  CopyableFile copyableFile =
      new CopyableFile(null, null, new OwnerAndPermission("owner", "group",
          FsPermission.getDefault()), Lists.newArrayList(new OwnerAndPermission(null, "group2", FsPermission
          .getDefault())), "checksum".getBytes(), PreserveAttributes.fromMnemonicString(""), "", 0, 0,
          Maps.<String, String>newHashMap(), "", null);

  String serialized = CopyEntity.serialize(copyableFile);
  CopyEntity deserialized = CopyEntity.deserialize(serialized);

  Assert.assertEquals(deserialized, copyableFile);

}
 
源代码8 项目: RDFS   文件: TestINodeFile.java
/**
 * Test for the Replication value. Sets a value and checks if it was set
 * correct.
 */
@Test
public void testReplication () {
  replication = 3;
  preferredBlockSize = 128*1024*1024;
  INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
                                FsPermission.getDefault()), null, replication,
                                0L, 0L, preferredBlockSize);
  assertEquals("True has to be returned in this case", replication,
               inf.getReplication());
}
 
源代码9 项目: RDFS   文件: TestINodeFile.java
/**
 * IllegalArgumentException is expected for setting above upper bound
 * for PreferredBlockSize.
 * @throws IllegalArgumentException as the result
 */
@Test(expected=IllegalArgumentException.class)
public void testPreferredBlockSizeAboveUpperBound ()
            throws IllegalArgumentException {
  replication = 3;
  preferredBlockSize = BLKSIZE_MAXVALUE+1;
  INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
                                FsPermission.getDefault()), null, replication,
                                0L, 0L, preferredBlockSize);
}
 
源代码10 项目: ignite   文件: IgniteHadoopFileSystem.java
/**
 * Convert IGFS file attributes into Hadoop permission.
 *
 * @param file File info.
 * @return Hadoop permission.
 */
private FsPermission permission(IgfsFile file) {
    String perm = file.property(IgfsUtils.PROP_PERMISSION, null);

    if (perm == null)
        return FsPermission.getDefault();

    try {
        return new FsPermission((short)Integer.parseInt(perm, 8));
    }
    catch (NumberFormatException ignore) {
        return FsPermission.getDefault();
    }
}
 
源代码11 项目: lucene-solr   文件: RawLocalFileSystem.java
/**
 * Deprecated. Remains for legacy support. Should be removed when {@link Stat}
 * gains support for Windows and other operating systems.
 */
@Deprecated
private FileStatus deprecatedGetFileLinkStatusInternal(final Path f)
    throws IOException {
  String target = FileUtil.readLink(new File(f.toString()));

  try {
    FileStatus fs = getFileStatus(f);
    // If f refers to a regular file or directory
    if (target.isEmpty()) {
      return fs;
    }
    // Otherwise f refers to a symlink
    return new FileStatus(fs.getLen(),
        false,
        fs.getReplication(),
        fs.getBlockSize(),
        fs.getModificationTime(),
        fs.getAccessTime(),
        fs.getPermission(),
        fs.getOwner(),
        fs.getGroup(),
        new Path(target),
        f);
  } catch (FileNotFoundException e) {
    /* The exists method in the File class returns false for dangling
     * links so we can get a FileNotFoundException for links that exist.
     * It's also possible that we raced with a delete of the link. Use
     * the readBasicFileAttributes method in java.nio.file.attributes
     * when available.
     */
    if (!target.isEmpty()) {
      return new FileStatus(0, false, 0, 0, 0, 0, FsPermission.getDefault(),
          "", "", new Path(target), f);
    }
    // f refers to a file or directory that does not exist
    throw e;
  }
}
 
源代码12 项目: incubator-gobblin   文件: CopyableFileTest.java
@Test
public void testResolveOwnerAndPermission() throws Exception {

  Path path = new Path("/test/path");

  FileStatus fileStatus = new FileStatus(1, false, 0, 0, 0, 0, FsPermission.getDefault(), "owner", "group", path);

  FileSystem fs = mock(FileSystem.class);
  Mockito.doReturn(fileStatus).when(fs).getFileStatus(path);
  Mockito.doReturn(path).when(fs).makeQualified(path);
  Mockito.doReturn(new URI("hdfs://uri")).when(fs).getUri();

  Properties properties = new Properties();
  properties.put(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/final/dir");

  OwnerAndPermission ownerAndPermission = CopyableFile.resolveReplicatedOwnerAndPermission(fs, path,
      new CopyConfiguration.CopyConfigurationBuilder(fs, properties).build());
  Assert.assertEquals(ownerAndPermission.getOwner(), null);
  Assert.assertEquals(ownerAndPermission.getGroup(), null);
  Assert.assertEquals(ownerAndPermission.getFsPermission(), null);

  ownerAndPermission = CopyableFile.resolveReplicatedOwnerAndPermission(fs, path,
      new CopyConfiguration.CopyConfigurationBuilder(fs, properties).targetGroup(Optional.of("target")).build());
  Assert.assertEquals(ownerAndPermission.getOwner(), null);
  Assert.assertEquals(ownerAndPermission.getGroup(), "target");
  Assert.assertEquals(ownerAndPermission.getFsPermission(), null);

  ownerAndPermission = CopyableFile.resolveReplicatedOwnerAndPermission(fs, path,
      new CopyConfiguration.CopyConfigurationBuilder(fs, properties).targetGroup(Optional.of("target")).
          preserve(PreserveAttributes.fromMnemonicString("ug")).build());
  Assert.assertEquals(ownerAndPermission.getOwner(), "owner");
  Assert.assertEquals(ownerAndPermission.getGroup(), "target");
  Assert.assertEquals(ownerAndPermission.getFsPermission(), null);

  ownerAndPermission = CopyableFile.resolveReplicatedOwnerAndPermission(fs, path,
      new CopyConfiguration.CopyConfigurationBuilder(fs, properties).preserve(PreserveAttributes.fromMnemonicString("ug")).build());
  Assert.assertEquals(ownerAndPermission.getOwner(), "owner");
  Assert.assertEquals(ownerAndPermission.getGroup(), "group");
  Assert.assertEquals(ownerAndPermission.getFsPermission(), null);

  ownerAndPermission = CopyableFile.resolveReplicatedOwnerAndPermission(fs, path,
      new CopyConfiguration.CopyConfigurationBuilder(fs, properties).preserve(PreserveAttributes.fromMnemonicString("ugp")).build());
  Assert.assertEquals(ownerAndPermission.getOwner(), "owner");
  Assert.assertEquals(ownerAndPermission.getGroup(), "group");
  Assert.assertEquals(ownerAndPermission.getFsPermission(), FsPermission.getDefault());

}
 
源代码13 项目: hadoop   文件: TestFsck.java
/** Test fsck with FileNotFound */
@Test
public void testFsckFileNotFound() throws Exception {

  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;

  Configuration conf = new Configuration();
  NameNode namenode = mock(NameNode.class);
  NetworkTopology nettop = mock(NetworkTopology.class);
  Map<String,String[]> pmap = new HashMap<String, String[]>();
  Writer result = new StringWriter();
  PrintWriter out = new PrintWriter(result, true);
  InetAddress remoteAddress = InetAddress.getLocalHost();
  FSNamesystem fsName = mock(FSNamesystem.class);
  BlockManager blockManager = mock(BlockManager.class);
  DatanodeManager dnManager = mock(DatanodeManager.class);

  when(namenode.getNamesystem()).thenReturn(fsName);
  when(fsName.getBlockLocations(any(FSPermissionChecker.class), anyString(),
                                anyLong(), anyLong(),
                                anyBoolean(), anyBoolean()))
      .thenThrow(new FileNotFoundException());
  when(fsName.getBlockManager()).thenReturn(blockManager);
  when(blockManager.getDatanodeManager()).thenReturn(dnManager);

  NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
      NUM_REPLICAS, remoteAddress);

  String pathString = "/tmp/testFile";

  long length = 123L;
  boolean isDir = false;
  int blockReplication = 1;
  long blockSize = 128 *1024L;
  long modTime = 123123123L;
  long accessTime = 123123120L;
  FsPermission perms = FsPermission.getDefault();
  String owner = "foo";
  String group = "bar";
  byte [] symlink = null;
  byte [] path = new byte[128];
  path = DFSUtil.string2Bytes(pathString);
  long fileId = 312321L;
  int numChildren = 1;
  byte storagePolicy = 0;

  HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
      blockSize, modTime, accessTime, perms, owner, group, symlink, path,
      fileId, numChildren, null, storagePolicy);
  Result res = new Result(conf);

  try {
    fsck.check(pathString, file, res);
  } catch (Exception e) {
    fail("Unexpected exception "+ e.getMessage());
  }
  assertTrue(res.toString().contains("HEALTHY"));
}
 
源代码14 项目: incubator-gobblin   文件: HiveConverterUtils.java
/**
 * Creates a staging directory with the permission as in source directory.
 * @param fs filesystem object
 * @param destination staging directory location
 * @param conversionEntity conversion entity used to get source directory permissions
 * @param workUnit workunit
 */
public static void createStagingDirectory(FileSystem fs, String destination, HiveProcessingEntity conversionEntity,
    WorkUnitState workUnit) {
  /*
   * Create staging data location with the same permissions as source data location
   *
   * Note that hive can also automatically create the non-existing directories but it does not
   * seem to create it with the desired permissions.
   * According to hive docs permissions for newly created directories/files can be controlled using uMask like,
   *
   * SET hive.warehouse.subdir.inherit.perms=false;
   * SET fs.permissions.umask-mode=022;
   * Upon testing, this did not work
   */
  Path destinationPath = new Path(destination);
  try {
    FsPermission permission;
    String group = null;
    if (conversionEntity.getTable().getDataLocation() != null) {
      FileStatus sourceDataFileStatus = fs.getFileStatus(conversionEntity.getTable().getDataLocation());
      permission = sourceDataFileStatus.getPermission();
      group = sourceDataFileStatus.getGroup();
    } else {
      permission = FsPermission.getDefault();
    }

    if (!fs.mkdirs(destinationPath, permission)) {
      throw new RuntimeException(String.format("Failed to create path %s with permissions %s",
          destinationPath, permission));
    } else {
      fs.setPermission(destinationPath, permission);
      // Set the same group as source
      if (group != null && !workUnit.getPropAsBoolean(HIVE_DATASET_DESTINATION_SKIP_SETGROUP, DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP)) {
        fs.setOwner(destinationPath, null, group);
      }
      log.info(String.format("Created %s with permissions %s and group %s", destinationPath, permission, group));
    }
  } catch (IOException e) {
    Throwables.propagate(e);
  }
}
 
源代码15 项目: RDFS   文件: FileStatus.java
/**
 * Sets permission.
 * @param permission if permission is null, default value is set
 */
protected void setPermission(FsPermission permission) {
  this.permission = (permission == null) ? 
                    FsPermission.getDefault() : permission;
}
 
private static CopyableFile createCopyableFile(String path, String fileSet) {
  return new CopyableFile(new FileStatus(0, false, 0, 0, 0, new Path(path)), new Path(path),
      new OwnerAndPermission("owner", "group", FsPermission.getDefault()), null, null,
      PreserveAttributes.fromMnemonicString(""), fileSet, 0, 0, Maps.<String, String>newHashMap(), "", null);
}
 
源代码17 项目: hadoop   文件: DFSClient.java
/**
 * Create a directory (or hierarchy of directories) with the given
 * name and permission.
 *
 * @param src The path of the directory being created
 * @param permission The permission of the directory being created.
 * If permission == null, use {@link FsPermission#getDefault()}.
 * @param createParent create missing parent directory if true
 * 
 * @return True if the operation success.
 * 
 * @see ClientProtocol#mkdirs(String, FsPermission, boolean)
 */
public boolean mkdirs(String src, FsPermission permission,
    boolean createParent) throws IOException {
  if (permission == null) {
    permission = FsPermission.getDefault();
  }
  FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
  return primitiveMkdir(src, masked, createParent);
}
 
源代码18 项目: big-c   文件: DFSClient.java
/**
 * Create a directory (or hierarchy of directories) with the given
 * name and permission.
 *
 * @param src The path of the directory being created
 * @param permission The permission of the directory being created.
 * If permission == null, use {@link FsPermission#getDefault()}.
 * @param createParent create missing parent directory if true
 * 
 * @return True if the operation success.
 * 
 * @see ClientProtocol#mkdirs(String, FsPermission, boolean)
 */
public boolean mkdirs(String src, FsPermission permission,
    boolean createParent) throws IOException {
  if (permission == null) {
    permission = FsPermission.getDefault();
  }
  FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
  return primitiveMkdir(src, masked, createParent);
}
 
源代码19 项目: hadoop   文件: AzureNativeFileSystemStore.java
/**
 * Default permission to use when no permission metadata is found.
 * 
 * @return The default permission to use.
 */
private static PermissionStatus defaultPermissionNoBlobMetadata() {
  return new PermissionStatus("", "", FsPermission.getDefault());
}
 
源代码20 项目: big-c   文件: AzureNativeFileSystemStore.java
/**
 * Default permission to use when no permission metadata is found.
 * 
 * @return The default permission to use.
 */
private static PermissionStatus defaultPermissionNoBlobMetadata() {
  return new PermissionStatus("", "", FsPermission.getDefault());
}