类org.apache.hadoop.fs.permission.FsPermission源码实例Demo

下面列出了怎么用org.apache.hadoop.fs.permission.FsPermission的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop-ozone   文件: TestOzoneFileInterfaces.java
/**
 * verify that a directory exists and is initialized correctly.
 * @param path of the directory
 * @return null indicates FILE_NOT_FOUND, else the FileStatus
 * @throws IOException
 */
private FileStatus getDirectoryStat(Path path) throws IOException {

  FileStatus status = null;

  try {
    status = fs.getFileStatus(path);
  } catch (FileNotFoundException e) {
    return null;
  }
  assertTrue("The created path is not directory.", status.isDirectory());

  assertEquals(FsPermission.getDirDefault(), status.getPermission());
  verifyOwnerGroup(status);

  assertEquals(0, status.getLen());

  return status;
}
 
源代码2 项目: big-c   文件: PBHelper.java
public static CachePoolInfo convert (CachePoolInfoProto proto) {
  // Pool name is a required field, the rest are optional
  String poolName = checkNotNull(proto.getPoolName());
  CachePoolInfo info = new CachePoolInfo(poolName);
  if (proto.hasOwnerName()) {
      info.setOwnerName(proto.getOwnerName());
  }
  if (proto.hasGroupName()) {
    info.setGroupName(proto.getGroupName());
  }
  if (proto.hasMode()) {
    info.setMode(new FsPermission((short)proto.getMode()));
  }
  if (proto.hasLimit())  {
    info.setLimit(proto.getLimit());
  }
  if (proto.hasMaxRelativeExpiry()) {
    info.setMaxRelativeExpiryMs(proto.getMaxRelativeExpiry());
  }
  return info;
}
 
源代码3 项目: RDFS   文件: DistCh.java
/**
 * path:owner:group:permission
 * e.g.
 * /user/foo:foo:bar:700 
 */
FileOperation(String line) {
  try {
    String[] t = line.split(":", 4);
    for(int i = 0; i < t.length; i++) {
      if ("".equals(t[i])) {
        t[i] = null;
      }
    }

    src = new Path(t[0]);
    owner = t[1];
    group = t[2];
    permission = t[3] == null? null:
      new FsPermission(Short.parseShort(t[3], 8));

    checkState();
  }
  catch(Exception e) {
    throw (IllegalArgumentException)new IllegalArgumentException(
        "line=" + line).initCause(e);
  }
}
 
源代码4 项目: hadoop   文件: NativeAzureFileSystem.java
@Override
public void setPermission(Path p, FsPermission permission) throws IOException {
  Path absolutePath = makeAbsolute(p);
  String key = pathToKey(absolutePath);
  FileMetadata metadata = store.retrieveMetadata(key);
  if (metadata == null) {
    throw new FileNotFoundException("File doesn't exist: " + p);
  }
  permission = applyUMask(permission,
      metadata.isDir() ? UMaskApplyMode.ChangeExistingDirectory
          : UMaskApplyMode.ChangeExistingFile);
  if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
    // It's an implicit folder, need to materialize it.
    store.storeEmptyFolder(key, createPermissionStatus(permission));
  } else if (!metadata.getPermissionStatus().getPermission().
      equals(permission)) {
    store.changePermissionStatus(key, new PermissionStatus(
        metadata.getPermissionStatus().getUserName(),
        metadata.getPermissionStatus().getGroupName(),
        permission));
  }
}
 
源代码5 项目: hadoop   文件: FSAclBaseTest.java
@Test
public void testModifyAclEntriesOnlyDefault() throws IOException {
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(DEFAULT, USER, "foo", ALL));
  fs.setAcl(path, aclSpec);
  aclSpec = Lists.newArrayList(
    aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
  fs.modifyAclEntries(path, aclSpec);
  AclStatus s = fs.getAclStatus(path);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] {
    aclEntry(DEFAULT, USER, ALL),
    aclEntry(DEFAULT, USER, "foo", READ_EXECUTE),
    aclEntry(DEFAULT, GROUP, READ_EXECUTE),
    aclEntry(DEFAULT, MASK, READ_EXECUTE),
    aclEntry(DEFAULT, OTHER, NONE) }, returned);
  assertPermission((short)010750);
  assertAclFeature(true);
}
 
源代码6 项目: hadoop   文件: TestDFSPermission.java
private void createAndCheckPermission(OpType op, Path name, short umask,
    FsPermission permission, boolean delete) throws Exception {
  // create the file/directory
  create(op, name, umask, permission);

  // get the short form of the permission
  short permissionNum = (DEFAULT_PERMISSION.equals(permission)) ? MAX_PERMISSION
      : permission.toShort();

  // get the expected permission
  short expectedPermission = (op == OpType.CREATE) ? (short) (~umask
      & permissionNum) : (short) (~umask & permissionNum);

  // check if permission is correctly set
  checkPermission(name, expectedPermission, delete);
}
 
源代码7 项目: big-c   文件: S3AFileSystem.java
/**
 * Create an FSDataOutputStream at the indicated Path with write-progress
 * reporting.
 * @param f the file name to open
 * @param permission
 * @param overwrite if a file with this name already exists, then if true,
 *   the file will be overwritten, and if false an error will be thrown.
 * @param bufferSize the size of the buffer to be used.
 * @param replication required block replication for the file.
 * @param blockSize
 * @param progress
 * @throws IOException
 * @see #setPermission(Path, FsPermission)
 */
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, 
  int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
  String key = pathToKey(f);

  if (!overwrite && exists(f)) {
    throw new FileAlreadyExistsException(f + " already exists");
  }
  if (getConf().getBoolean(FAST_UPLOAD, DEFAULT_FAST_UPLOAD)) {
    return new FSDataOutputStream(new S3AFastOutputStream(s3, this, bucket,
        key, progress, statistics, cannedACL,
        serverSideEncryptionAlgorithm, partSize, (long)multiPartThreshold,
        threadPoolExecutor), statistics);
  }
  // We pass null to FSDataOutputStream so it won't count writes that are being buffered to a file
  return new FSDataOutputStream(new S3AOutputStream(getConf(), transfers, this,
    bucket, key, progress, cannedACL, statistics, 
    serverSideEncryptionAlgorithm), null);
}
 
源代码8 项目: distributedlog   文件: DLFileSystem.java
@Override
public FSDataOutputStream create(Path path,
                                 FsPermission fsPermission,
                                 boolean overwrite,
                                 int bufferSize,
                                 short replication,
                                 long blockSize,
                                 Progressable progressable) throws IOException {
    // for overwrite, delete the existing file first.
    if (overwrite) {
        delete(path, false);
    }

    DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
    confLocal.addConfiguration(dlConf);
    confLocal.setEnsembleSize(replication);
    confLocal.setWriteQuorumSize(replication);
    confLocal.setAckQuorumSize(replication);
    confLocal.setMaxLogSegmentBytes(blockSize);
    return append(path, bufferSize, Optional.of(confLocal));
}
 
源代码9 项目: hadoop   文件: DefaultContainerExecutor.java
/**
 * Initialize the local directories for a particular user.
 * <ul>.mkdir
 * <li>$local.dir/usercache/$user</li>
 * </ul>
 */
void createUserLocalDirs(List<String> localDirs, String user)
    throws IOException {
  boolean userDirStatus = false;
  FsPermission userperms = new FsPermission(USER_PERM);
  for (String localDir : localDirs) {
    // create $local.dir/usercache/$user and its immediate parent
    try {
      createDir(getUserCacheDir(new Path(localDir), user), userperms, true, user);
    } catch (IOException e) {
      LOG.warn("Unable to create the user directory : " + localDir, e);
      continue;
    }
    userDirStatus = true;
  }
  if (!userDirStatus) {
    throw new IOException("Not able to initialize user directories "
        + "in any of the configured local directories for user " + user);
  }
}
 
源代码10 项目: big-c   文件: TestJsonUtil.java
@Test
public void testHdfsFileStatus() throws IOException {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  ObjectReader reader = new ObjectMapper().reader(Map.class);
  final HdfsFileStatus s2 =
      JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
 
源代码11 项目: nifi   文件: PutHDFSTest.java
@Test
public void testPutFilePermissionsWithNoConfiguredUmask() throws IOException {
    // assert the file permission fallback works. It should read FsPermission.DEFAULT_UMASK
    MockFileSystem fileSystem = new MockFileSystem();
    PutHDFS proc = new TestablePutHDFS(kerberosProperties, fileSystem);
    TestRunner runner = TestRunners.newTestRunner(proc);
    runner.setProperty(PutHDFS.DIRECTORY, "target/test-classes");
    runner.setProperty(PutHDFS.CONFLICT_RESOLUTION, "replace");
    // invoke the abstractOnScheduled method so the Hadoop configuration is available to apply the MockFileSystem instance
    proc.abstractOnScheduled(runner.getProcessContext());
    fileSystem.setConf(proc.getConfiguration());
    try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1")) {
        Map<String, String> attributes = new HashMap<>();
        attributes.put(CoreAttributes.FILENAME.key(), "randombytes-1");
        runner.enqueue(fis, attributes);
        runner.run();
    }
    assertEquals(FsPermission.getFileDefault().applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK)),
        fileSystem.getFileStatus(new Path("target/test-classes/randombytes-1")).getPermission());
}
 
源代码12 项目: big-c   文件: TestFavoredNodesEndToEnd.java
@Test(timeout = 180000)
public void testFavoredNodesEndToEndForAppend() throws Exception {
  // create 10 files with random preferred nodes
  for (int i = 0; i < NUM_FILES; i++) {
    Random rand = new Random(System.currentTimeMillis() + i);
    // pass a new created rand so as to get a uniform distribution each time
    // without too much collisions (look at the do-while loop in getDatanodes)
    InetSocketAddress datanode[] = getDatanodes(rand);
    Path p = new Path("/filename" + i);
    // create and close the file.
    dfs.create(p, FsPermission.getDefault(), true, 4096, (short) 3, 4096L,
        null, null).close();
    // re-open for append
    FSDataOutputStream out = dfs.append(p, EnumSet.of(CreateFlag.APPEND),
        4096, null, datanode);
    out.write(SOME_BYTES);
    out.close();
    BlockLocation[] locations = getBlockLocations(p);
    // verify the files got created in the right nodes
    for (BlockLocation loc : locations) {
      String[] hosts = loc.getNames();
      String[] hosts1 = getStringForInetSocketAddrs(datanode);
      assertTrue(compareNodes(hosts, hosts1));
    }
  }
}
 
/** @throws Exception If failed. */
@Test
public void testDeleteRecursively() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");

    FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
        Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();

    Path someDir2 = new Path(fsHome, "/someDir1/someDir2");

    assertTrue(fs.delete(someDir2, true));

    assertPathDoesNotExist(fs, someDir2);
    assertPathDoesNotExist(fs, someDir3);
}
 
源代码14 项目: datacollector   文件: AzureFile.java
@SuppressWarnings("unchecked")
public Map<String, Object> getFileMetadata() {
  Map<String, Object>  metadata = new HashMap<>();
  metadata.put(HeaderAttributeConstants.FILE_NAME, status.getPath().getName());
  metadata.put(HeaderAttributeConstants.FILE, status.getPath().toUri().getPath());
  metadata.put(HeaderAttributeConstants.LAST_MODIFIED_TIME, status.getModificationTime());
  metadata.put(HeaderAttributeConstants.LAST_ACCESS_TIME, status.getAccessTime());
  metadata.put(HeaderAttributeConstants.IS_DIRECTORY, status.isDirectory());
  metadata.put(HeaderAttributeConstants.IS_SYMBOLIC_LINK, status.isSymlink());
  metadata.put(HeaderAttributeConstants.SIZE, status.getLen());
  metadata.put(HeaderAttributeConstants.OWNER, status.getOwner());
  metadata.put(HeaderAttributeConstants.GROUP, status.getGroup());
  metadata.put(HeaderAttributeConstants.BLOCK_SIZE, status.getBlockSize());
  metadata.put(HeaderAttributeConstants.REPLICATION, status.getReplication());
  metadata.put(HeaderAttributeConstants.IS_ENCRYPTED, status.isEncrypted());

  FsPermission permission = status.getPermission();
  if (permission != null) {
    metadata.put(PERMISSIONS, permission.toString());
  }

  return metadata;
}
 
源代码15 项目: ignite   文件: HadoopIgfsProperties.java
/**
 * Constructor.
 *
 * @param props Properties.
 * @throws IgniteException In case of error.
 */
public HadoopIgfsProperties(Map<String, String> props) throws IgniteException {
    if (props == null)
        return;

    usrName = props.get(IgfsUtils.PROP_USER_NAME);
    grpName = props.get(IgfsUtils.PROP_GROUP_NAME);

    String permStr = props.get(IgfsUtils.PROP_PERMISSION);

    if (permStr != null) {
        try {
            perm = new FsPermission((short)Integer.parseInt(permStr, 8));
        }
        catch (NumberFormatException ignore) {
            throw new IgniteException("Permissions cannot be parsed: " + permStr);
        }
    }
}
 
源代码16 项目: big-c   文件: TestEditLog.java
@Override
public void run() {
  PermissionStatus p = namesystem.createFsOwnerPermissions(
                                      new FsPermission((short)0777));
  FSEditLog editLog = namesystem.getEditLog();

  for (int i = 0; i < numTransactions; i++) {
    INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
        p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize);
    inode.toUnderConstruction("", "");

    editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
    editLog.logCloseFile("/filename" + (startIndex + i), inode);
    editLog.logSync();
  }
}
 
源代码17 项目: hadoop   文件: TestListFilesInFileContext.java
/** Test when input path is a file */
@Test
public void testFile() throws IOException {
  fc.mkdir(TEST_DIR, FsPermission.getDefault(), true);
  writeFile(fc, FILE1, FILE_LEN);

  RemoteIterator<LocatedFileStatus> itor = fc.util().listFiles(
      FILE1, true);
  LocatedFileStatus stat = itor.next();
  assertFalse(itor.hasNext());
  assertTrue(stat.isFile());
  assertEquals(FILE_LEN, stat.getLen());
  assertEquals(fc.makeQualified(FILE1), stat.getPath());
  assertEquals(1, stat.getBlockLocations().length);
  
  itor = fc.util().listFiles(FILE1, false);
  stat = itor.next();
  assertFalse(itor.hasNext());
  assertTrue(stat.isFile());
  assertEquals(FILE_LEN, stat.getLen());
  assertEquals(fc.makeQualified(FILE1), stat.getPath());
  assertEquals(1, stat.getBlockLocations().length);
}
 
源代码18 项目: big-c   文件: RawLocalFileSystem.java
private LocalFSFileOutputStream(Path f, boolean append,
    FsPermission permission) throws IOException {
  File file = pathToFile(f);
  if (permission == null) {
    this.fos = new FileOutputStream(file, append);
  } else {
    if (Shell.WINDOWS && NativeIO.isAvailable()) {
      this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file,
          append, permission.toShort());
    } else {
      this.fos = new FileOutputStream(file, append);
      boolean success = false;
      try {
        setPermission(f, permission);
        success = true;
      } finally {
        if (!success) {
          IOUtils.cleanup(LOG, this.fos);
        }
      }
    }
  }
}
 
源代码19 项目: big-c   文件: TestGlobPaths.java
void run() throws Exception {
  // Verify that the default scheme is hdfs, when we don't supply one.
  wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(),
      false);
  wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR
      + "/alphaLink"), false);
  FileStatus statuses[] = wrap.globStatus(
      new Path(USER_DIR + "/alphaLink"), new AcceptAllPathFilter());
  Assert.assertEquals(1, statuses.length);
  Path path = statuses[0].getPath();
  Assert.assertEquals(USER_DIR + "/alpha", path.toUri().getPath());
  Assert.assertEquals("hdfs", path.toUri().getScheme());

  // FileContext can list a file:/// URI.
  // Since everyone should have the root directory, we list that.
  statuses = fc.util().globStatus(new Path("file:///"),
      new AcceptAllPathFilter());
  Assert.assertEquals(1, statuses.length);
  Path filePath = statuses[0].getPath();
  Assert.assertEquals("file", filePath.toUri().getScheme());
  Assert.assertEquals("/", filePath.toUri().getPath());

  // The FileSystem should have scheme 'hdfs'
  Assert.assertEquals("hdfs", fs.getScheme());
}
 
源代码20 项目: hadoop   文件: TestEditLog.java
@Override
public void run() {
  PermissionStatus p = namesystem.createFsOwnerPermissions(
                                      new FsPermission((short)0777));
  FSEditLog editLog = namesystem.getEditLog();

  for (int i = 0; i < numTransactions; i++) {
    INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
        p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize);
    inode.toUnderConstruction("", "");

    editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
    editLog.logCloseFile("/filename" + (startIndex + i), inode);
    editLog.logSync();
  }
}
 
源代码21 项目: big-c   文件: FSPermissionChecker.java
/**
 * Whether a cache pool can be accessed by the current context
 *
 * @param pool CachePool being accessed
 * @param access type of action being performed on the cache pool
 * @throws AccessControlException if pool cannot be accessed
 */
public void checkPermission(CachePool pool, FsAction access)
    throws AccessControlException {
  FsPermission mode = pool.getMode();
  if (isSuperUser()) {
    return;
  }
  if (getUser().equals(pool.getOwnerName())
      && mode.getUserAction().implies(access)) {
    return;
  }
  if (getGroups().contains(pool.getGroupName())
      && mode.getGroupAction().implies(access)) {
    return;
  }
  if (mode.getOtherAction().implies(access)) {
    return;
  }
  throw new AccessControlException("Permission denied while accessing pool "
      + pool.getPoolName() + ": user " + getUser() + " does not have "
      + access.toString() + " permissions.");
}
 
源代码22 项目: hadoop   文件: TestDistCacheEmulation.java
/**
 * Test the configuration property for disabling/enabling emulation of
 * distributed cache load.
 */
@Test  (timeout=2000)
public void testDistCacheEmulationConfigurability() throws IOException {
  Configuration jobConf = GridmixTestUtils.mrvl.getConfig();
  Path ioPath = new Path("testDistCacheEmulationConfigurability")
      .makeQualified(GridmixTestUtils.dfs.getUri(),GridmixTestUtils.dfs.getWorkingDirectory());
  FileSystem fs = FileSystem.get(jobConf);
  FileSystem.mkdirs(fs, ioPath, new FsPermission((short) 0777));

  // default config
  dce = createDistributedCacheEmulator(jobConf, ioPath, false);
  assertTrue("Default configuration of "
      + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
      + " is wrong.", dce.shouldEmulateDistCacheLoad());

  // config property set to false
  jobConf.setBoolean(
      DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE, false);
  dce = createDistributedCacheEmulator(jobConf, ioPath, false);
  assertFalse("Disabling of emulation of distributed cache load by setting "
      + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
      + " to false is not working.", dce.shouldEmulateDistCacheLoad());
}
 
/** @throws Exception If failed. */
@Test
public void testDeleteRecursivelyFromRoot() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");

    FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
        Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();

    Path root = new Path(fsHome, "/");

    assertFalse(fs.delete(root, true));

    assertTrue(fs.delete(new Path(fsHome, "/someDir1"), true));

    assertPathDoesNotExist(fs, someDir3);
    assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2"));
    assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1"));
    assertPathExists(fs, root);
}
 
@Override
public FsPermission getFsPermission(
    INodeAuthorizationInfo node, int snapshotId) {
  FsPermission permission;
  String[] pathElements = getPathElements(node);
  if (!isSentryManaged(pathElements)) {
    permission = defaultAuthzProvider.getFsPermission(node, snapshotId);
  } else {
    FsPermission returnPerm = this.permission;
    // Handle case when prefix directory is itself associated with an
    // authorizable object (default db directory in hive)
    // An executable permission needs to be set on the the prefix directory
    // in this case.. else, subdirectories (which map to other dbs) will
    // not be travesible.
    for (String [] prefixPath : authzInfo.getPathPrefixes()) {
      if (Arrays.equals(prefixPath, pathElements)) {
        returnPerm = FsPermission.createImmutable((short)(returnPerm.toShort() | 0x01));
        break;
      }
    }
    permission = returnPerm;
  }
  return permission;
}
 
源代码25 项目: ranger   文件: HDFSRangerTest.java
void createFile(String baseDir, Integer index) throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    String dirName = baseDir + (index != null ? String.valueOf(index) : "");
    String fileName = dirName + "/dummy-data";
    final Path file = new Path(fileName);
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();
    }
    out.close();

    // Change permissions to read-only
    fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));
}
 
源代码26 项目: RDFS   文件: NNThroughputBenchmark.java
/**
 * Do file create.
 */
long executeOp(int daemonId, int inputIdx, String clientName) 
throws IOException {
  long start = System.currentTimeMillis();
  // dummyActionNoSynch(fileIdx);
  nameNode.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
                  clientName, true, true, replication, BLOCK_SIZE);
  long end = System.currentTimeMillis();
  for(boolean written = !closeUponCreate; !written; 
    written = nameNode.complete(fileNames[daemonId][inputIdx], clientName));
  return end-start;
}
 
源代码27 项目: hadoop   文件: ViewFileSystemBaseTest.java
@Test
public void testRootReadableExecutable() throws IOException {
  // verify executable permission on root: cd /
  //
  Assert.assertFalse("In root before cd",
      fsView.getWorkingDirectory().isRoot());
  fsView.setWorkingDirectory(new Path("/"));
  Assert.assertTrue("Not in root dir after cd",
    fsView.getWorkingDirectory().isRoot());

  // verify readable
  //
  verifyRootChildren(fsView.listStatus(fsView.getWorkingDirectory()));

  // verify permissions
  //
  final FileStatus rootStatus =
      fsView.getFileStatus(fsView.getWorkingDirectory());
  final FsPermission perms = rootStatus.getPermission();

  Assert.assertTrue("User-executable permission not set!",
      perms.getUserAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("User-readable permission not set!",
      perms.getUserAction().implies(FsAction.READ));
  Assert.assertTrue("Group-executable permission not set!",
      perms.getGroupAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("Group-readable permission not set!",
      perms.getGroupAction().implies(FsAction.READ));
  Assert.assertTrue("Other-executable permission not set!",
      perms.getOtherAction().implies(FsAction.EXECUTE));
  Assert.assertTrue("Other-readable permission not set!",
      perms.getOtherAction().implies(FsAction.READ));
}
 
源代码28 项目: big-c   文件: TestHdfsCryptoStreams.java
@Before
@Override
public void setUp() throws IOException {
  ++pathCount;
  path = new Path("/p" + pathCount);
  file = new Path(path, "file");
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0700));

  super.setUp();
}
 
@Test
public void testMkdirs_shouldReturnDefaultFilePermissions()
    throws IOException, URISyntaxException {
  Configuration config = GoogleHadoopFileSystemIntegrationHelper.getTestConfig();
  config.set("fs.gs.reported.permissions", "357");
  GoogleHadoopFS ghfs = new GoogleHadoopFS(initUri, config);

  FsPermission permission = new FsPermission("000");
  FsPermission expectedPermission = new FsPermission("357");

  Path path = new Path(initUri.resolve("/testMkdirs_shouldRespectFilePermissions").toString());
  ghfs.mkdir(path, permission, /* createParent= */ true);

  assertThat(ghfs.getFileStatus(path).getPermission()).isEqualTo(expectedPermission);
}
 
源代码30 项目: jsr203-hadoop   文件: HadoopPosixFileAttributes.java
public HadoopPosixFileAttributes(HadoopFileSystem hdfs, Object fileKey,
    FileStatus fileStatus) throws IOException {
  super(fileKey, fileStatus);
  this.owner = hdfs.getUserPrincipalLookupService()
      .lookupPrincipalByGroupName(fileStatus.getOwner());
  this.group = hdfs.getUserPrincipalLookupService()
      .lookupPrincipalByGroupName(fileStatus.getGroup());
  FsPermission fsPermission = getFileStatus().getPermission();
  String perms = fsPermission.getUserAction().SYMBOL
      + fsPermission.getGroupAction().SYMBOL
      + fsPermission.getOtherAction().SYMBOL;
  this.permissions = PosixFilePermissions.fromString(perms);
}
 
 类所在包
 同包方法