org.apache.hadoop.fs.Path#toString ( )源码实例Demo

下面列出了org.apache.hadoop.fs.Path#toString ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hbase   文件: TableSnapshotInputFormatImpl.java
public InputSplit(TableDescriptor htd, RegionInfo regionInfo, List<String> locations,
    Scan scan, Path restoreDir) {
  this.htd = htd;
  this.regionInfo = regionInfo;
  if (locations == null || locations.isEmpty()) {
    this.locations = new String[0];
  } else {
    this.locations = locations.toArray(new String[locations.size()]);
  }
  try {
    this.scan = scan != null ? TableMapReduceUtil.convertScanToString(scan) : "";
  } catch (IOException e) {
    LOG.warn("Failed to convert Scan to String", e);
  }

  this.restoreDir = restoreDir.toString();
}
 
源代码2 项目: cephfs-hadoop   文件: CephFileSystem.java
/**
* Opens an FSDataOutputStream at the indicated Path with write-progress
* reporting. Same as create(), except fails if parent directory doesn't
* already exist.
* @param path the file name to open
* @param permission
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
@Deprecated
public FSDataOutputStream createNonRecursive(Path path, FsPermission permission,
    boolean overwrite,
    int bufferSize, short replication, long blockSize,
    Progressable progress) throws IOException {

  path = makeAbsolute(path);

  Path parent = path.getParent();

  if (parent != null) {
    CephStat stat = new CephStat();
    ceph.lstat(parent, stat); // handles FileNotFoundException case
    if (stat.isFile())
      throw new FileAlreadyExistsException(parent.toString());
  }

  return this.create(path, permission, overwrite,
      bufferSize, replication, blockSize, progress);
}
 
源代码3 项目: mrgeo   文件: MbVectorTilesInputFormatTest.java
@Test
public void getSplitsZoom14_3() throws Exception
{
  Path dbPath = new Path(input, "AmbulatoryPt.mbtiles");
  MbVectorTilesSettings dbSettings = new MbVectorTilesSettings(dbPath.toString(), new String[] { "ambulatory"}, 14, 3, null);
  MbVectorTilesInputFormat ifmt = new MbVectorTilesInputFormat(dbSettings);
  List<InputSplit> splits = ifmt.getSplits(context);
  Assert.assertNotNull(splits);
  Assert.assertEquals(2, splits.size());
  Assert.assertEquals(4L, ifmt.getRecordCount(conf));
  int count = 0;
  for (InputSplit split: splits) {
    RecordReader<FeatureIdWritable, Geometry> reader = ifmt.createRecordReader(split, context);
    Assert.assertNotNull(reader);
    while (reader.nextKeyValue()) count++;
  }
  Assert.assertEquals(8, count);
}
 
源代码4 项目: incubator-pinot   文件: ThirdEyeJob.java
@Override
Properties getJobProperties(Properties inputConfig, String root, String collection,
    DateTime minTime, DateTime maxTime, String inputPaths)
        throws Exception {
  Properties config = new Properties();

  Path aggOutputPath = new Path(getIndexDir(root, collection, minTime, maxTime) + File.separator + AGGREGATION.getName());
  FileSystem fs = FileSystem.get(new Configuration());
  if (fs.exists(aggOutputPath)) {
    inputPaths = aggOutputPath.toString();
  }
  config.setProperty(TopKPhaseConstants.TOPK_PHASE_INPUT_PATH.toString(),
      inputPaths);
  config.setProperty(TopKPhaseConstants.TOPK_PHASE_OUTPUT_PATH.toString(),
      getIndexDir(root, collection, minTime, maxTime) + File.separator
          + TOPK.getName());

  return config;
}
 
源代码5 项目: incubator-gobblin   文件: Trash.java
protected void ensureTrashLocationExists(FileSystem fs, Path trashLocation) throws IOException {
  if (fs.exists(trashLocation)) {
    if (!fs.isDirectory(trashLocation)) {
      throw new IOException(String.format("Trash location %s is not a directory.", trashLocation));
    }

    if (!fs.exists(new Path(trashLocation, TRASH_IDENTIFIER_FILE))) {
      // If trash identifier file is not present, directory might have been created by user.
      // Add trash identifier file only if directory is empty.
      if (fs.listStatus(trashLocation).length > 0) {
        throw new IOException(String.format("Trash directory %s exists, but it does not look like a trash directory. "
            + "File: %s missing and directory is not empty.", trashLocation, TRASH_IDENTIFIER_FILE));
      } else if (!fs.createNewFile(new Path(trashLocation, TRASH_IDENTIFIER_FILE))) {
        throw new IOException(String.format("Failed to create file %s in existing trash directory %s.",
            TRASH_IDENTIFIER_FILE, trashLocation));
      }
    }
  } else if (!(safeFsMkdir(fs, trashLocation.getParent(), ALL_PERM) && safeFsMkdir(fs, trashLocation, PERM)
      && fs.createNewFile(new Path(trashLocation, TRASH_IDENTIFIER_FILE)))) {
    // Failed to create directory or create trash identifier file.
    throw new IOException("Failed to create trash directory at " + trashLocation.toString());
  }
}
 
源代码6 项目: hbase   文件: BackupSystemTable.java
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family,
    final List<Pair<Path, Path>> pairs) {
  List<Put> puts = new ArrayList<>(pairs.size());
  for (Pair<Path, Path> pair : pairs) {
    Path path = pair.getSecond();
    String file = path.toString();
    int lastSlash = file.lastIndexOf("/");
    String filename = file.substring(lastSlash + 1);
    Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM,
      Bytes.toString(region), BLK_LD_DELIM, filename));
    put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
    put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family);
    put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, Bytes.toBytes(file));
    put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_PREPARE);
    puts.add(put);
    LOG.debug("writing raw bulk path " + file + " for " + table + " " + Bytes.toString(region));
  }
  return puts;
}
 
源代码7 项目: neo4j-mazerunner   文件: Writer.java
public static void dispatchJob(GraphDatabaseService db, String type) throws IOException, URISyntaxException {

        // Export the subgraph to HDFS
        Path pt = exportSubgraphToHDFSParallel(db);

        // Serialize processor message
        ProcessorMessage message = new ProcessorMessage(pt.toString(), type, ProcessorMode.Unpartitioned);
        Gson gson = new Gson();
        String strMessage = gson.toJson(message);

        // Send message to the Spark graph processor
        Worker.sendMessage(strMessage);
    }
 
源代码8 项目: hadoop-gpu   文件: NativeS3FileSystem.java
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
  if (!exists(f)) {
    throw new FileNotFoundException(f.toString());
  }
  Path absolutePath = makeAbsolute(f);
  String key = pathToKey(absolutePath);
  return new FSDataInputStream(new BufferedFSInputStream(
      new NativeS3FsInputStream(store.retrieve(key), key), bufferSize));
}
 
源代码9 项目: phoenix   文件: UserDefinedFunctionsIT.java
/**
 * Test creating functions using dir otherthan hbase.dynamic.jars.dir
 * @throws Exception
 */
@Test
public void testCreateFunctionNonDynamicJarDir() throws Exception {
    Connection conn = driver.connect(url, EMPTY_PROPS);
    String tableName = "table" + name.getMethodName();

    conn.createStatement().execute("create table " + tableName + "(tenant_id varchar not null, k integer not null, "
            + "firstname varchar, lastname varchar constraint pk primary key(tenant_id,k)) MULTI_TENANT=true");
    String tenantId = "tenId" + name.getMethodName();
    Connection tenantConn = driver.connect(url + ";" + PhoenixRuntime.TENANT_ID_ATTRIB + "=" + tenantId, EMPTY_PROPS);
    Statement stmtTenant = tenantConn.createStatement();
    tenantConn.commit();

    compileTestClass(MY_REVERSE_CLASS_NAME, MY_REVERSE_PROGRAM, 8);
    Path destJarPathOnHDFS = copyJarsFromDynamicJarsDirToDummyHDFSDir("myjar8.jar");

    try {
        String sql =
                "create function myfunction(VARCHAR) returns VARCHAR as 'org.apache.phoenix.end2end."
                        + MY_REVERSE_CLASS_NAME + "' using jar '" + destJarPathOnHDFS.toString()
                        + "'";
        stmtTenant.execute(sql);
        ResultSet rs = stmtTenant.executeQuery("select myfunction(firstname) from " + tableName);
        fail("expecting java.lang.SecurityException");
    }catch(Exception e){
        assertTrue(ExceptionUtils.getRootCause(e) instanceof SecurityException);
    }finally {
        stmtTenant.execute("drop function myfunction");
    }
}
 
源代码10 项目: jumbune   文件: JobUtil.java
/**
 * This method call when injected into a class will modify the output path,
 * only if output is into HDFS
 * 
 * @param job
 *            Job whose output path need to be changed
 */
public static void modifyOutputPath(Job job) throws Exception {
	Path path = FileOutputFormat.getOutputPath(job);
	if (path == null) {
		throw new IllegalArgumentException("Job Output path is null, expecting not null path value");
	}
		StringBuilder out = new StringBuilder(path.toString());
		out.append(SEPARATOR_UNDERSCORE).append(System.currentTimeMillis());
		FileOutputFormat.setOutputPath(job, new Path(out.toString()));
}
 
源代码11 项目: systemds   文件: MatrixReader.java
protected static void checkValidInputFile(FileSystem fs, Path path) 
	throws IOException
{
	//check non-existing file
	if( !fs.exists(path) )	
		throw new IOException("File "+path.toString()+" does not exist on HDFS/LFS.");

	//check for empty file
	if( HDFSTool.isFileEmpty(fs, path) )
		throw new EOFException("Empty input file "+ path.toString() +".");
	
}
 
源代码12 项目: big-c   文件: TestMiniMRChildTask.java
private void configure(JobConf conf, Path inDir, Path outDir, String input,
                       Class<? extends Mapper> map, 
                       Class<? extends Reducer> reduce) 
throws IOException {
  // set up the input file system and write input text.
  FileSystem inFs = inDir.getFileSystem(conf);
  FileSystem outFs = outDir.getFileSystem(conf);
  outFs.delete(outDir, true);
  if (!inFs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    // write input into input file
    DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
    file.writeBytes(input);
    file.close();
  }

  // configure the mapred Job which creates a tempfile in map.
  conf.setJobName("testmap");
  conf.setMapperClass(map);
  conf.setReducerClass(reduce);
  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(0);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data",
                                    "/tmp")).toString().replace(' ', '+');
  conf.set("test.build.data", TEST_ROOT_DIR);
}
 
源代码13 项目: hadoop   文件: WindowsSecureContainerExecutor.java
@Override
protected String[] getRunCommand(String command, String groupId,
    String userName, Path pidFile, Configuration conf) {
  File f = new File(command);
  if (LOG.isDebugEnabled()) {
    LOG.debug(String.format("getRunCommand: %s exists:%b", 
        command, f.exists()));
  }
  return new String[] { Shell.WINUTILS, "task", "createAsUser", groupId, 
      userName, pidFile.toString(), "cmd /c " + command };
}
 
源代码14 项目: hadoop-gpu   文件: TestMultipleOutputs.java
public void setUp() throws Exception {
  super.setUp();
  Path rootDir = getDir(ROOT_DIR);
  Path inDir = getDir(IN_DIR);

  JobConf conf = createJobConf();
  FileSystem fs = FileSystem.get(conf);
  fs.delete(rootDir, true);
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
}
 
源代码15 项目: RDFS   文件: HistoryViewer.java
public HistoryViewer(String outputDir, Configuration conf, boolean printAll)
throws IOException {
  this.conf = conf;
  this.printAll = printAll;
  Path output = new Path(outputDir);
  historyLogDir = new Path(output, "_logs/history");
  try {
    fs = output.getFileSystem(this.conf);
    if (!fs.exists(output)) {
      throw new IOException("History directory " + historyLogDir.toString()
                            + "does not exist");
    }
    Path[] jobFiles = FileUtil.stat2Paths(fs.listStatus(historyLogDir,
                                                        jobLogFileFilter));
    if (jobFiles.length == 0) {
      throw new IOException("Not a valid history directory " 
                            + historyLogDir.toString());
    }
    jobLogFile = jobFiles[0].toString();
    String[] jobDetails = 
        JobInfo.decodeJobHistoryFileName(jobFiles[0].getName()).split("_");
    trackerHostName = jobDetails[0];
    trackerStartTime = jobDetails[1];
    jobId = jobDetails[2] + "_" + jobDetails[3] + "_" + jobDetails[4];
    job = new JobHistory.JobInfo(jobId); 
    DefaultJobHistoryParser.parseJobTasks(jobFiles[0].toString(), job, fs);
  } catch(Exception e) {
    throw new IOException("Not able to initialize History viewer", e);
  }
}
 
源代码16 项目: big-c   文件: TestMiniMRClasspath.java
static void configureWordCount(FileSystem fs, JobConf conf, String input,
    int numMaps, int numReduces, Path inDir, Path outDir) throws IOException {
  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  DataOutputStream file = fs.create(new Path(inDir, "part-0"));
  file.writeBytes(input);
  file.close();
  FileSystem.setDefaultUri(conf, fs.getUri());
  conf.set(JTConfig.FRAMEWORK_NAME, JTConfig.YARN_FRAMEWORK_NAME);
  conf.setJobName("wordcount");
  conf.setInputFormat(TextInputFormat.class);
  
  // the keys are words (strings)
  conf.setOutputKeyClass(Text.class);
  // the values are counts (ints)
  conf.setOutputValueClass(IntWritable.class);
  
  conf.set("mapred.mapper.class", "testjar.ClassWordCount$MapClass");        
  conf.set("mapred.combine.class", "testjar.ClassWordCount$Reduce");
  conf.set("mapred.reducer.class", "testjar.ClassWordCount$Reduce");
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
  //set the tests jar file
  conf.setJarByClass(TestMiniMRClasspath.class);
}
 
源代码17 项目: big-c   文件: MapFileOutputFormat.java
public RecordWriter<WritableComparable, Writable> getRecordWriter(FileSystem ignored, JobConf job,
                                    String name, Progressable progress)
  throws IOException {
  // get the path of the temporary output file 
  Path file = FileOutputFormat.getTaskOutputPath(job, name);
  
  FileSystem fs = file.getFileSystem(job);
  CompressionCodec codec = null;
  CompressionType compressionType = CompressionType.NONE;
  if (getCompressOutput(job)) {
    // find the kind of compression to do
    compressionType = SequenceFileOutputFormat.getOutputCompressionType(job);

    // find the right codec
    Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job,
 DefaultCodec.class);
    codec = ReflectionUtils.newInstance(codecClass, job);
  }
  
  // ignore the progress parameter, since MapFile is local
  final MapFile.Writer out =
    new MapFile.Writer(job, fs, file.toString(),
                       job.getOutputKeyClass().asSubclass(WritableComparable.class),
                       job.getOutputValueClass().asSubclass(Writable.class),
                       compressionType, codec,
                       progress);

  return new RecordWriter<WritableComparable, Writable>() {

      public void write(WritableComparable key, Writable value)
        throws IOException {

        out.append(key, value);
      }

      public void close(Reporter reporter) throws IOException { out.close();}
    };
}
 
源代码18 项目: RDFS   文件: TestHarWithCombineFileInputFormat.java
protected void setUp() throws Exception {
  super.setUp();
  conf = new JobConf();
  dfscluster = new MiniDFSCluster(conf, 1, true, null);
  fs = dfscluster.getFileSystem();
  mapred = new MiniMRCluster(1, fs.getUri().toString(), 1);
  
  hdfsInputPath = new Path(fs.getHomeDirectory(), "test"); 
  archiveDir = new Path(fs.getHomeDirectory(), "test-archive");
  
  filea = new Path(hdfsInputPath, "a");
  fileb = new Path(hdfsInputPath, "b");
  
  // Create the following directory structure
  // ~/test/a
  // ~/test/b/
  // ~/test-archive/foo.har/a (in HAR)
  // ~/test-archive/foo.har/b (in HAR)
  
  fs.mkdirs(hdfsInputPath);
  FSDataOutputStream out = fs.create(filea); 
  out.write("a".getBytes());
  out.close();
  out = fs.create(fileb);
  out.write("b".getBytes());
  out.close();
  
  HadoopArchives har = new HadoopArchives(conf);

  String archiveName = "foo.har";
  String[] args = new String[5];
  args[0] = "-archiveName";
  args[1] = "foo.har";
  args[2] = "-p";
  args[3] = hdfsInputPath.toString();
  args[4] = archiveDir.toString();
  int ret = ToolRunner.run(har, args);
  assertTrue("Failed to create HAR", ret == 0);

  archiveInputPath = 
    new Path("har://" + archiveDir.toUri().getPath(), archiveName);
}
 
源代码19 项目: hadoop   文件: WindowsSecureContainerExecutor.java
@Override
public void startLocalizer(LocalizerStartContext ctx) throws IOException,
    InterruptedException {
  Path nmPrivateContainerTokensPath = ctx.getNmPrivateContainerTokens();
  InetSocketAddress nmAddr = ctx.getNmAddr();
  String user = ctx.getUser();
  String appId = ctx.getAppId();
  String locId = ctx.getLocId();
  LocalDirsHandlerService dirsHandler = ctx.getDirsHandler();
  List<String> localDirs = dirsHandler.getLocalDirs();
  List<String> logDirs = dirsHandler.getLogDirs();

  Path classpathJarPrivateDir = dirsHandler.getLocalPathForWrite(
      ResourceLocalizationService.NM_PRIVATE_DIR);
  createUserLocalDirs(localDirs, user);
  createUserCacheDirs(localDirs, user);
  createAppDirs(localDirs, user, appId);
  createAppLogDirs(appId, logDirs, user);

  Path appStorageDir = getWorkingDir(localDirs, user, appId);

  String tokenFn = String.format(
      ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId);
  Path tokenDst = new Path(appStorageDir, tokenFn);
  copyFile(nmPrivateContainerTokensPath, tokenDst, user);

  File cwdApp = new File(appStorageDir.toString());
  if (LOG.isDebugEnabled()) {
    LOG.debug(String.format("cwdApp: %s", cwdApp));
  }

  List<String> command ;

  command = new ArrayList<String>();

  //use same jvm as parent
  File jvm = new File(
      new File(System.getProperty("java.home"), "bin"), "java.exe");
  command.add(jvm.toString());

  Path cwdPath = new Path(cwdApp.getPath());

  // Build a temp classpath jar. See ContainerLaunch.sanitizeEnv().
  // Passing CLASSPATH explicitly is *way* too long for command line.
  String classPath = System.getProperty("java.class.path");
  Map<String, String> env = new HashMap<String, String>(System.getenv());
  String jarCp[] = FileUtil.createJarWithClassPath(classPath,
      classpathJarPrivateDir, cwdPath, env);
  String classPathJar = localizeClasspathJar(
      new Path(jarCp[0]), cwdPath, user).toString();
  command.add("-classpath");
  command.add(classPathJar + jarCp[1]);

  String javaLibPath = System.getProperty("java.library.path");
  if (javaLibPath != null) {
    command.add("-Djava.library.path=" + javaLibPath);
  }
  command.addAll(ContainerLocalizer.getJavaOpts(getConf()));

  ContainerLocalizer.buildMainArgs(command, user, appId, locId, nmAddr,
      localDirs);

  String cmdLine = StringUtils.join(command, " ");

  String localizerPid = String.format(LOCALIZER_PID_FORMAT, locId);

  WintuilsProcessStubExecutor stubExecutor = new WintuilsProcessStubExecutor(
      cwdApp.getAbsolutePath(),
      localizerPid, user, "nul:", cmdLine);
  try {
    stubExecutor.execute();
    stubExecutor.validateResult();
  } finally {
    stubExecutor.close();
    try
    {
      killContainer(localizerPid, Signal.KILL);
    }
    catch(Throwable e) {
      LOG.warn(String.format(
          "An exception occured during the cleanup of localizer job %s:%n%s",
          localizerPid,
          org.apache.hadoop.util.StringUtils.stringifyException(e)));
    }
  }
}
 
源代码20 项目: gemfirexd-oss   文件: HDFSUtil.java
private static Path getBaseHoplogPath(Path hoplogPath) {
  String originalFilename = hoplogPath.toString();
  int tmpExtIndex = originalFilename.lastIndexOf(".tmp");
  String trimmedFilename = originalFilename.substring(0, tmpExtIndex);
  return new Path(trimmedFilename);
}