org.apache.hadoop.fs.contract.ContractOptions#org.apache.hadoop.util.Shell源码实例Demo

下面列出了org.apache.hadoop.fs.contract.ContractOptions#org.apache.hadoop.util.Shell 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: lucene-solr   文件: FileUtil.java
/**
 * Change the permissions on a file / directory, recursively, if
 * needed.
 * @param filename name of the file whose permissions are to change
 * @param perm permission string
 * @param recursive true, if permissions should be changed recursively
 * @return the exit code from the command.
 * @throws IOException exception on chmod
 */
public static int chmod(String filename, String perm, boolean recursive)
    throws IOException {
  String [] cmd = Shell.getSetPermissionCommand(perm, recursive);
  String[] args = new String[cmd.length + 1];
  System.arraycopy(cmd, 0, args, 0, cmd.length);
  args[cmd.length] = new File(filename).getPath();
  ShellCommandExecutor shExec = new ShellCommandExecutor(args);
  try {
    shExec.execute();
  }catch(IOException e) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Error while changing permission : {} Exception: {}", filename, StringUtils.stringifyException(e));
    }
  }
  return shExec.getExitCode();
}
 
源代码2 项目: big-c   文件: TestNodeManagerShutdown.java
/**
 * Creates a script to run a container that will run forever unless
 * stopped by external means.
 */
private static File createUnhaltingScriptFile(ContainerId cId,
    File scriptFileDir, File processStartFile) throws IOException {
  File scriptFile = Shell.appendScriptExtension(scriptFileDir, "scriptFile");
  PrintWriter fileWriter = new PrintWriter(scriptFile);
  if (Shell.WINDOWS) {
    fileWriter.println("@echo \"Running testscript for delayed kill\"");
    fileWriter.println("@echo \"Writing pid to start file\"");
    fileWriter.println("@echo " + cId + ">> " + processStartFile);
    fileWriter.println("@pause");
  } else {
    fileWriter.write("#!/bin/bash\n\n");
    fileWriter.write("echo \"Running testscript for delayed kill\"\n");
    fileWriter.write("hello=\"Got SIGTERM\"\n");
    fileWriter.write("umask 0\n");
    fileWriter.write("trap \"echo $hello >> " + processStartFile +
      "\" SIGTERM\n");
    fileWriter.write("echo \"Writing pid to start file\"\n");
    fileWriter.write("echo $$ >> " + processStartFile + "\n");
    fileWriter.write("while true; do\ndate >> /dev/null;\n done\n");
  }

  fileWriter.close();
  return scriptFile;
}
 
源代码3 项目: hadoop   文件: ResourceCalculatorPlugin.java
/**
 * Create the ResourceCalculatorPlugin from the class name and configure it. If
 * class name is null, this method will try and return a memory calculator
 * plugin available for this system.
 *
 * @param clazz ResourceCalculator plugin class-name
 * @param conf configure the plugin with this.
 * @return ResourceCalculatorPlugin or null if ResourceCalculatorPlugin is not
 * 		 available for current system
 */
public static ResourceCalculatorPlugin getResourceCalculatorPlugin(
    Class<? extends ResourceCalculatorPlugin> clazz, Configuration conf) {

  if (clazz != null) {
    return ReflectionUtils.newInstance(clazz, conf);
  }

  // No class given, try a os specific class
  try {
    if (Shell.LINUX) {
      return new LinuxResourceCalculatorPlugin();
    }
    if (Shell.WINDOWS) {
      return new WindowsResourceCalculatorPlugin();
    }
  } catch (SecurityException se) {
    // Failed to get Operating System name.
    return null;
  }

  // Not supported on this system.
  return null;
}
 
源代码4 项目: big-c   文件: NativeIO.java
public static void chmod(String path, int mode) throws IOException {
  if (!Shell.WINDOWS) {
    chmodImpl(path, mode);
  } else {
    try {
      chmodImpl(path, mode);
    } catch (NativeIOException nioe) {
      if (nioe.getErrorCode() == 3) {
        throw new NativeIOException("No such file or directory",
            Errno.ENOENT);
      } else {
        LOG.warn(String.format("NativeIO.chmod error (%d): %s",
            nioe.getErrorCode(), nioe.getMessage()));
        throw new NativeIOException("Unknown error", Errno.UNKNOWN);
      }
    }
  }
}
 
源代码5 项目: big-c   文件: TestJournalNode.java
@Test(timeout=100000)
public void testFailToStartWithBadConfig() throws Exception {
  Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, "non-absolute-path");
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  assertJNFailsToStart(conf, "should be an absolute path");
  
  // Existing file which is not a directory 
  File existingFile = new File(TEST_BUILD_DATA, "testjournalnodefile");
  assertTrue(existingFile.createNewFile());
  try {
    conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
        existingFile.getAbsolutePath());
    assertJNFailsToStart(conf, "Not a directory");
  } finally {
    existingFile.delete();
  }
  
  // Directory which cannot be created
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
      Shell.WINDOWS ? "\\\\cannotBeCreated" : "/proc/does-not-exist");
  assertJNFailsToStart(conf, "Cannot create directory");
}
 
源代码6 项目: hadoop   文件: MiniYARNCluster.java
@Override
protected synchronized void serviceStop() throws Exception {
  if (resourceManagers[index] != null) {
    waitForAppMastersToFinish(5000);
    resourceManagers[index].stop();
  }

  if (Shell.WINDOWS) {
    // On Windows, clean up the short temporary symlink that was created to
    // work around path length limitation.
    String testWorkDirPath = testWorkDir.getAbsolutePath();
    try {
      FileContext.getLocalFSFileContext().delete(new Path(testWorkDirPath),
        true);
    } catch (IOException e) {
      LOG.warn("could not cleanup symlink: " +
        testWorkDir.getAbsolutePath());
    }
  }
  super.serviceStop();
}
 
源代码7 项目: hadoop   文件: RawLocalFileSystem.java
private LocalFSFileOutputStream(Path f, boolean append,
    FsPermission permission) throws IOException {
  File file = pathToFile(f);
  if (permission == null) {
    this.fos = new FileOutputStream(file, append);
  } else {
    if (Shell.WINDOWS && NativeIO.isAvailable()) {
      this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file,
          append, permission.toShort());
    } else {
      this.fos = new FileOutputStream(file, append);
      boolean success = false;
      try {
        setPermission(f, permission);
        success = true;
      } finally {
        if (!success) {
          IOUtils.cleanup(LOG, this.fos);
        }
      }
    }
  }
}
 
源代码8 项目: big-c   文件: NativeIO.java
/**
 * Returns the file stat for a file descriptor.
 *
 * @param fd file descriptor.
 * @return the file descriptor file stat.
 * @throws IOException thrown if there was an IO error while obtaining the file stat.
 */
public static Stat getFstat(FileDescriptor fd) throws IOException {
  Stat stat = null;
  if (!Shell.WINDOWS) {
    stat = fstat(fd); 
    stat.owner = getName(IdCache.USER, stat.ownerId);
    stat.group = getName(IdCache.GROUP, stat.groupId);
  } else {
    try {
      stat = fstat(fd);
    } catch (NativeIOException nioe) {
      if (nioe.getErrorCode() == 6) {
        throw new NativeIOException("The handle is invalid.",
            Errno.EBADF);
      } else {
        LOG.warn(String.format("NativeIO.getFstat error (%d): %s",
            nioe.getErrorCode(), nioe.getMessage()));
        throw new NativeIOException("Unknown error", Errno.UNKNOWN);
      }
    }
  }
  return stat;
}
 
源代码9 项目: hadoop   文件: ContainerLaunch.java
@Override
protected void link(Path src, Path dst) throws IOException {
  File srcFile = new File(src.toUri().getPath());
  String srcFileStr = srcFile.getPath();
  String dstFileStr = new File(dst.toString()).getPath();
  // If not on Java7+ on Windows, then copy file instead of symlinking.
  // See also FileUtil#symLink for full explanation.
  if (!Shell.isJava7OrAbove() && srcFile.isFile()) {
    lineWithLenCheck(String.format("@copy \"%s\" \"%s\"", srcFileStr, dstFileStr));
    errorCheck();
  } else {
    lineWithLenCheck(String.format("@%s symlink \"%s\" \"%s\"", Shell.WINUTILS,
      dstFileStr, srcFileStr));
    errorCheck();
  }
}
 
源代码10 项目: hadoop   文件: TestLocalDirAllocator.java
/** Two buffer dirs. The first dir exists & is on a read-only disk;
 * The second dir exists & is RW
 * @throws Exception
 */
@Test (timeout = 30000)
public void testROBufferDirAndRWBufferDir() throws Exception {
  if (isWindows) return;
  String dir1 = buildBufferDir(ROOT, 1);
  String dir2 = buildBufferDir(ROOT, 2);
  try {
    conf.set(CONTEXT, dir1 + "," + dir2);
    assertTrue(localFs.mkdirs(new Path(dir2)));
    BUFFER_ROOT.setReadOnly();
    validateTempDirCreation(dir2);
    validateTempDirCreation(dir2);
  } finally {
    Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
                                                    BUFFER_DIR_ROOT));
    rmBufferDirs();
  }
}
 
源代码11 项目: big-c   文件: FileUtil.java
/**
 * Change the permissions on a file / directory, recursively, if
 * needed.
 * @param filename name of the file whose permissions are to change
 * @param perm permission string
 * @param recursive true, if permissions should be changed recursively
 * @return the exit code from the command.
 * @throws IOException
 */
public static int chmod(String filename, String perm, boolean recursive)
                          throws IOException {
  String [] cmd = Shell.getSetPermissionCommand(perm, recursive);
  String[] args = new String[cmd.length + 1];
  System.arraycopy(cmd, 0, args, 0, cmd.length);
  args[cmd.length] = new File(filename).getPath();
  ShellCommandExecutor shExec = new ShellCommandExecutor(args);
  try {
    shExec.execute();
  }catch(IOException e) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Error while changing permission : " + filename 
                +" Exception: " + StringUtils.stringifyException(e));
    }
  }
  return shExec.getExitCode();
}
 
源代码12 项目: big-c   文件: FileUtil.java
/**
 * Given a Tar File as input it will untar the file in a the untar directory
 * passed as the second parameter
 * 
 * This utility will untar ".tar" files and ".tar.gz","tgz" files.
 *  
 * @param inFile The tar file as input. 
 * @param untarDir The untar directory where to untar the tar file.
 * @throws IOException
 */
public static void unTar(File inFile, File untarDir) throws IOException {
  if (!untarDir.mkdirs()) {
    if (!untarDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + untarDir);
    }
  }

  boolean gzipped = inFile.toString().endsWith("gz");
  if(Shell.WINDOWS) {
    // Tar is not native to Windows. Use simple Java based implementation for 
    // tests and simple tar archives
    unTarUsingJava(inFile, untarDir, gzipped);
  }
  else {
    // spawn tar utility to untar archive for full fledged unix behavior such 
    // as resolving symlinks in tar archives
    unTarUsingTar(inFile, untarDir, gzipped);
  }
}
 
源代码13 项目: hadoop   文件: TestContainerExecutor.java
@Test (timeout = 5000)
public void testRunCommandWithCpuAndMemoryResources() {
  // Windows only test
  assumeTrue(Shell.WINDOWS);
  Configuration conf = new Configuration();
  conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED, "true");
  conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "true");
  String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
      conf, Resource.newInstance(1024, 1));
  float yarnProcessors = NodeManagerHardwareUtils.getContainersCores(
      ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf),
      conf);
  int cpuRate = Math.min(10000, (int) ((1 * 10000) / yarnProcessors));
  // Assert the cpu and memory limits are set correctly in the command
  String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
      String.valueOf(cpuRate), "group1", "cmd /c " + "echo" };
  Assert.assertTrue(Arrays.equals(expected, command));
}
 
源代码14 项目: hadoop   文件: TestContainerLaunch.java
@Test (timeout = 10000)
public void testWindowsShellScriptBuilderEnv() throws IOException {
  // Test is only relevant on Windows
  Assume.assumeTrue(Shell.WINDOWS);

  // The tests are built on assuming 8191 max command line length
  assertEquals(8191, Shell.WINDOWS_MAX_SHELL_LENGHT);

  ShellScriptBuilder builder = ShellScriptBuilder.create();

  // test env
  builder.env("somekey", org.apache.commons.lang.StringUtils.repeat("A", 1024));
  builder.env("somekey", org.apache.commons.lang.StringUtils.repeat(
      "A", Shell.WINDOWS_MAX_SHELL_LENGHT - ("@set somekey=").length()));
  try {
    builder.env("somekey", org.apache.commons.lang.StringUtils.repeat(
        "A", Shell.WINDOWS_MAX_SHELL_LENGHT - ("@set somekey=").length()) + 1);
    fail("long env was expected to throw");
  } catch(IOException e) {
    assertThat(e.getMessage(), CoreMatchers.containsString(expectedMessage));
  }
}
 
源代码15 项目: big-c   文件: TestLocalDirAllocator.java
/**
 * Test getLocalPathToRead() returns correct filename and "file" schema.
 *
 * @throws IOException
 */
@Test (timeout = 30000)
public void testGetLocalPathToRead() throws IOException {
  assumeTrue(!isWindows);
  String dir = buildBufferDir(ROOT, 0);
  try {
    conf.set(CONTEXT, dir);
    assertTrue(localFs.mkdirs(new Path(dir)));
    File f1 = dirAllocator.createTmpFileForWrite(FILENAME, SMALL_FILE_SIZE,
        conf);
    Path p1 = dirAllocator.getLocalPathToRead(f1.getName(), conf);
    assertEquals(f1.getName(), p1.getName());
    assertEquals("file", p1.getFileSystem(conf).getUri().getScheme());
  } finally {
    Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
                                                    BUFFER_DIR_ROOT));
    rmBufferDirs();
  }
}
 
源代码16 项目: big-c   文件: HttpServer.java
@InterfaceAudience.Private
public static Connector createDefaultChannelConnector() {
  SelectChannelConnector ret = new SelectChannelConnectorWithSafeStartup();
  ret.setLowResourceMaxIdleTime(10000);
  ret.setAcceptQueueSize(128);
  ret.setResolveNames(false);
  ret.setUseDirectBuffers(false);
  if(Shell.WINDOWS) {
    // result of setting the SO_REUSEADDR flag is different on Windows
    // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
    // without this 2 NN's can start on the same machine and listen on 
    // the same port with indeterminate routing of incoming requests to them
    ret.setReuseAddress(false);
  }
  ret.setHeaderBufferSize(1024*64);
  return ret;
}
 
源代码17 项目: hadoop   文件: FileUtil.java
/**
 * Given a Tar File as input it will untar the file in a the untar directory
 * passed as the second parameter
 * 
 * This utility will untar ".tar" files and ".tar.gz","tgz" files.
 *  
 * @param inFile The tar file as input. 
 * @param untarDir The untar directory where to untar the tar file.
 * @throws IOException
 */
public static void unTar(File inFile, File untarDir) throws IOException {
  if (!untarDir.mkdirs()) {
    if (!untarDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + untarDir);
    }
  }

  boolean gzipped = inFile.toString().endsWith("gz");
  if(Shell.WINDOWS) {
    // Tar is not native to Windows. Use simple Java based implementation for 
    // tests and simple tar archives
    unTarUsingJava(inFile, untarDir, gzipped);
  }
  else {
    // spawn tar utility to untar archive for full fledged unix behavior such 
    // as resolving symlinks in tar archives
    unTarUsingTar(inFile, untarDir, gzipped);
  }
}
 
源代码18 项目: big-c   文件: RawLocalFileSystem.java
protected boolean mkOneDirWithMode(Path p, File p2f, FsPermission permission)
    throws IOException {
  if (permission == null) {
    return p2f.mkdir();
  } else {
    if (Shell.WINDOWS && NativeIO.isAvailable()) {
      try {
        NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());
        return true;
      } catch (IOException e) {
        if (LOG.isDebugEnabled()) {
          LOG.debug(String.format(
              "NativeIO.createDirectoryWithMode error, path = %s, mode = %o",
              p2f, permission.toShort()), e);
        }
        return false;
      }
    } else {
      boolean b = p2f.mkdir();
      if (b) {
        setPermission(p, permission);
      }
      return b;
    }
  }
}
 
源代码19 项目: hadoop   文件: NativeIO.java
public static String getOwner(FileDescriptor fd) throws IOException {
  ensureInitialized();
  if (Shell.WINDOWS) {
    String owner = Windows.getOwner(fd);
    owner = stripDomain(owner);
    return owner;
  } else {
    long uid = POSIX.getUIDforFDOwnerforOwner(fd);
    CachedUid cUid = uidCache.get(uid);
    long now = System.currentTimeMillis();
    if (cUid != null && (cUid.timestamp + cacheTimeout) > now) {
      return cUid.username;
    }
    String user = POSIX.getUserName(uid);
    LOG.info("Got UserName " + user + " for UID " + uid
        + " from the native implementation");
    cUid = new CachedUid(user, now);
    uidCache.put(uid, cUid);
    return user;
  }
}
 
源代码20 项目: hadoop   文件: TestAtomicFileOutputStream.java
@Test
public void testFailToRename() throws IOException {
  assumeTrue(Shell.WINDOWS);
  OutputStream fos = null;
  try {
    fos = new AtomicFileOutputStream(DST_FILE);
    fos.write(TEST_STRING.getBytes());
    FileUtil.setWritable(TEST_DIR, false);
    exception.expect(IOException.class);
    exception.expectMessage("failure in native rename");
    try {
      fos.close();
    } finally {
      fos = null;
    }
  } finally {
    IOUtils.cleanup(null, fos);
    FileUtil.setWritable(TEST_DIR, true);
  }
}
 
源代码21 项目: hadoop   文件: TestLocalDirAllocator.java
/**
 * Test getLocalPathToRead() returns correct filename and "file" schema.
 *
 * @throws IOException
 */
@Test (timeout = 30000)
public void testGetLocalPathToRead() throws IOException {
  assumeTrue(!isWindows);
  String dir = buildBufferDir(ROOT, 0);
  try {
    conf.set(CONTEXT, dir);
    assertTrue(localFs.mkdirs(new Path(dir)));
    File f1 = dirAllocator.createTmpFileForWrite(FILENAME, SMALL_FILE_SIZE,
        conf);
    Path p1 = dirAllocator.getLocalPathToRead(f1.getName(), conf);
    assertEquals(f1.getName(), p1.getName());
    assertEquals("file", p1.getFileSystem(conf).getUri().getScheme());
  } finally {
    Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
                                                    BUFFER_DIR_ROOT));
    rmBufferDirs();
  }
}
 
源代码22 项目: big-c   文件: HttpServer2.java
@InterfaceAudience.Private
public static Connector createDefaultChannelConnector() {
  SelectChannelConnector ret = new SelectChannelConnectorWithSafeStartup();
  ret.setLowResourceMaxIdleTime(10000);
  ret.setAcceptQueueSize(128);
  ret.setResolveNames(false);
  ret.setUseDirectBuffers(false);
  if(Shell.WINDOWS) {
    // result of setting the SO_REUSEADDR flag is different on Windows
    // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
    // without this 2 NN's can start on the same machine and listen on
    // the same port with indeterminate routing of incoming requests to them
    ret.setReuseAddress(false);
  }
  ret.setHeaderBufferSize(1024*64);
  return ret;
}
 
源代码23 项目: hadoop   文件: Stat.java
/**
 * Whether Stat is supported on the current platform
 * @return
 */
public static boolean isAvailable() {
  if (Shell.LINUX || Shell.FREEBSD || Shell.MAC) {
    return true;
  }
  return false;
}
 
源代码24 项目: big-c   文件: TestDistributedShell.java
@Test(timeout=90000)
public void testContainerLaunchFailureHandling() throws Exception {
  String[] args = {
    "--jar",
    APPMASTER_JAR,
    "--num_containers",
    "2",
    "--shell_command",
    Shell.WINDOWS ? "dir" : "ls",
    "--master_memory",
    "512",
    "--container_memory",
    "128"
  };

  LOG.info("Initializing DS Client");
  Client client = new Client(ContainerLaunchFailAppMaster.class.getName(),
    new Configuration(yarnCluster.getConfig()));
  boolean initSuccess = client.init(args);
  Assert.assertTrue(initSuccess);
  LOG.info("Running DS Client");
  boolean result = client.run();

  LOG.info("Client run completed. Result=" + result);
  Assert.assertFalse(result);

}
 
源代码25 项目: RDFS   文件: TestLocalDirAllocator.java
/** Two buffer dirs. The first dir exists & is on a read-only disk; 
 * The second dir exists & is RW
 * @throws Exception
 */
public void test1() throws Exception {
  if (isWindows) return;
  try {
    conf.set(CONTEXT, BUFFER_DIR[1]+","+BUFFER_DIR[2]);
    assertTrue(localFs.mkdirs(BUFFER_PATH[2]));
    BUFFER_ROOT.setReadOnly();
    validateTempDirCreation(2);
    validateTempDirCreation(2);
  } finally {
    Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
    rmBufferDirs();
  }
}
 
源代码26 项目: hadoop-gpu   文件: UnixUserGroupInformation.java
/** Get current user's name from Unix by running the command whoami.
 * 
 * @return current user's name
 * @throws IOException if encounter any error while running the command
 */
static String getUnixUserName() throws IOException {
  String[] result = executeShellCommand(
      new String[]{Shell.USER_NAME_COMMAND});
  if (result.length!=1) {
    throw new IOException("Expect one token as the result of " + 
        Shell.USER_NAME_COMMAND + ": " + toString(result));
  }
  return result[0];
}
 
/**
 * Executes chmod on the specified file, passing in the mode string 'modstr'
 * which may be e.g. "a+x" or "0600", etc.
 * @throws IOException if chmod failed.
 */
public static void setFilePermissions(File file, String modstr)
    throws IOException {
  // Set this file to be 0600. Java doesn't have a built-in mechanism for this
  // so we need to go out to the shell to execute chmod.
  try {
    Shell.execCommand("chmod", modstr, file.toString());
  } catch (IOException ioe) {
    // Shell.execCommand will throw IOException on exit code != 0.
    LOG.error("Could not chmod " + modstr + " " + file.toString());
    throw new IOException("Could not ensure password file security.", ioe);
  }
}
 
源代码28 项目: hadoop-gpu   文件: TestLocalDirAllocator.java
/** Two buffer dirs. The first dir exists & is on a read-only disk; 
 * The second dir exists & is RW
 * @throws Exception
 */
public void test1() throws Exception {
  if (isWindows) return;
  try {
    conf.set(CONTEXT, BUFFER_DIR[1]+","+BUFFER_DIR[2]);
    assertTrue(localFs.mkdirs(BUFFER_PATH[2]));
    BUFFER_ROOT.setReadOnly();
    validateTempDirCreation(2);
    validateTempDirCreation(2);
  } finally {
    Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
    rmBufferDirs();
  }
}
 
源代码29 项目: RDFS   文件: UnixUserGroupInformation.java
private static String[] executeShellCommand(String[] command)
throws IOException {
  String groups = Shell.execCommand(command);
  StringTokenizer tokenizer = new StringTokenizer(groups);
  int numOfTokens = tokenizer.countTokens();
  String[] tokens = new String[numOfTokens];
  for (int i=0; tokenizer.hasMoreTokens(); i++) {
    tokens[i] = tokenizer.nextToken();
  }

  return tokens;
}
 
源代码30 项目: hadoop   文件: ShellBasedUnixGroupsMapping.java
/** 
 * Get the current user's group list from Unix by running the command 'groups'
 * NOTE. For non-existing user it will return EMPTY list
 * @param user user name
 * @return the groups list that the <code>user</code> belongs to. The primary
 *         group is returned first.
 * @throws IOException if encounter any error when running the command
 */
private static List<String> getUnixGroups(final String user) throws IOException {
  String result = "";
  try {
    result = Shell.execCommand(Shell.getGroupsForUserCommand(user));
  } catch (ExitCodeException e) {
    // if we didn't get the group - just return empty list;
    LOG.warn("got exception trying to get groups for user " + user + ": "
        + e.getMessage());
    return new LinkedList<String>();
  }
  
  StringTokenizer tokenizer =
      new StringTokenizer(result, Shell.TOKEN_SEPARATOR_REGEX);
  List<String> groups = new LinkedList<String>();
  while (tokenizer.hasMoreTokens()) {
    groups.add(tokenizer.nextToken());
  }

  // remove duplicated primary group
  if (!Shell.WINDOWS) {
    for (int i = 1; i < groups.size(); i++) {
      if (groups.get(i).equals(groups.get(0))) {
        groups.remove(i);
        break;
      }
    }
  }

  return groups;
}