org.apache.hadoop.fs.Path#toUri ( )源码实例Demo

下面列出了org.apache.hadoop.fs.Path#toUri ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: spork   文件: PlanHelper.java
/**
 * Creates a relative path that can be used to build a temporary
 * place to store the output from a number of map-reduce tasks.
 */
public static String makeStoreTmpPath(String orig) {
    Path path = new Path(orig);
    URI uri = path.toUri();
    uri.normalize();

    String pathStr = uri.getPath();
    if (path.isAbsolute()) {
        return new Path("abs"+pathStr).toString();
    } else {
        return new Path("rel/"+pathStr).toString();
    }
}
 
@BeforeMethod
public void setup() throws Exception {
    jksPath = new Path(Files.createTempDirectory("tempproviders").toString(), "test.jks");
    providerUrl = JavaKeyStoreProvider.SCHEME_NAME + "://file/" + jksPath.toUri();

    String baseUrl = String.format("https://localhost:%d/", securePort);

    DefaultClientConfig config = new DefaultClientConfig();
    Client client = Client.create(config);
    client.resource(UriBuilder.fromUri(baseUrl).build());

    service = client.resource(UriBuilder.fromUri(baseUrl).build());
}
 
源代码3 项目: incubator-gobblin   文件: JobConfigurationUtils.java
/**
 * Load the properties from the specified file into a {@link Properties} object.
 *
 * @param fileName the name of the file to load properties from
 * @param conf configuration object to determine the file system to be used
 * @return a new {@link Properties} instance
 */
public static Properties fileToProperties(String fileName, Configuration conf)
    throws IOException, ConfigurationException {

  PropertiesConfiguration propsConfig = new PropertiesConfiguration();
  Path filePath = new Path(fileName);
  URI fileURI = filePath.toUri();

  if (fileURI.getScheme() == null && fileURI.getAuthority() == null) {
    propsConfig.load(FileSystem.getLocal(conf).open(filePath));
  } else {
    propsConfig.load(filePath.getFileSystem(conf).open(filePath));
  }
  return ConfigurationConverter.getProperties(propsConfig);
}
 
源代码4 项目: ignite   文件: HadoopRawLocalFileSystem.java
/** {@inheritDoc} */
@Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufSize,
    short replication, long blockSize, Progressable progress) throws IOException {
    File file = convert(f);

    if (!overwrite && !file.createNewFile())
        throw new IOException("Failed to create new file: " + f.toUri());

    return out(file, false, bufSize);
}
 
源代码5 项目: hraven   文件: TestFileLister.java
/**
 * removes conf file which has already been put in prunedList
 *
 * @throws IOException
 */
@Test
public void testPruneFileListRemovingConfFromPruneList() throws IOException {

  long maxFileSize = 20L;
  FileStatus[] origList = new FileStatus[2];
  FileSystem hdfs = FileSystem.get(UTIL.getConfiguration());
  Path inputPath = new Path("/inputdir_filesize_pruneList");
  boolean os = hdfs.mkdirs(inputPath);
  assertTrue(os);
  assertTrue(hdfs.exists(inputPath));

  Path relocationPath = new Path("/relocation_filesize_pruneList");
  os = hdfs.mkdirs(relocationPath);
  assertTrue(os);
  assertTrue(hdfs.exists(relocationPath));

  Path emptyConfFile = new Path(inputPath.toUri() + "/" + "job_1329348432655_0001_conf.xml");
  os = hdfs.createNewFile(emptyConfFile);
  assertTrue(os);
  assertTrue(hdfs.exists(emptyConfFile));
  origList[0] = hdfs.getFileStatus(emptyConfFile);

  final String JOB_HISTORY_FILE_NAME =
      "src/test/resources/job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist";
  File jobHistoryfile = new File(JOB_HISTORY_FILE_NAME);
  Path srcPath = new Path(jobHistoryfile.toURI());
  hdfs.copyFromLocalFile(srcPath, inputPath);
  Path expPath = new Path(inputPath.toUri() + "/" + srcPath.getName());
  assertTrue(hdfs.exists(expPath));
  origList[1] = hdfs.getFileStatus(expPath);

  FileStatus [] prunedList = FileLister.pruneFileListBySize(maxFileSize, origList, hdfs, inputPath);
  assertNotNull(prunedList);
  assertTrue(prunedList.length == 0);
}
 
源代码6 项目: Hadoop-BAM   文件: TestCRAMInputFormat.java
@Test
public void testMapReduceJob() throws Exception {
  Configuration conf = new Configuration();
  conf.set(CRAMInputFormat.REFERENCE_SOURCE_PATH_PROPERTY, reference);

  FileSystem fileSystem = FileSystem.get(conf);
  Path inputPath = new Path(input);
  Path outputPath = fileSystem.makeQualified(new Path("target/out"));
  fileSystem.delete(outputPath, true);

  Job job = Job.getInstance(conf);
  FileInputFormat.setInputPaths(job, inputPath);
  job.setInputFormatClass(CRAMInputFormat.class);
  job.setOutputKeyClass(LongWritable.class);
  job.setOutputValueClass(SAMRecordWritable.class);
  job.setNumReduceTasks(0);
  FileOutputFormat.setOutputPath(job, outputPath);

  boolean success = job.waitForCompletion(true);
  assertTrue(success);

  List<String> samStrings = new ArrayList<String>();
  SamReader samReader = SamReaderFactory.makeDefault()
      .referenceSequence(new File(URI.create(reference))).open(new File(input));
  for (SAMRecord r : samReader) {
    samStrings.add(r.getSAMString().trim());
  }

  File outputFile = new File(new File(outputPath.toUri()), "part-m-00000");
  BufferedReader br = new BufferedReader(new FileReader(outputFile));
  String line;
  int index = 0;
  while ((line = br.readLine()) != null) {
    String value = line.substring(line.indexOf("\t") + 1); // ignore key
    assertEquals(samStrings.get(index++), value);
  }
  br.close();
}
 
源代码7 项目: datawave   文件: VisitorFunction.java
protected URI getFstHdfsQueryCacheUri(ShardQueryConfiguration config, Query settings) {
    if (config.getIvaratorFstHdfsBaseURIs() != null) {
        String[] choices = StringUtils.split(config.getIvaratorFstHdfsBaseURIs(), ',');
        int index = new Random().nextInt(choices.length);
        Path path = new Path(choices[index], settings.getId().toString());
        return path.toUri();
    }
    return null;
}
 
源代码8 项目: big-c   文件: FTPFileSystem.java
/**
 * Probe for a path being a parent of another
 * @param parent parent path
 * @param child possible child path
 * @return true if the parent's path matches the start of the child's
 */
private boolean isParentOf(Path parent, Path child) {
  URI parentURI = parent.toUri();
  String parentPath = parentURI.getPath();
  if (!parentPath.endsWith("/")) {
    parentPath += "/";
  }
  URI childURI = child.toUri();
  String childPath = childURI.getPath();
  return childPath.startsWith(parentPath);
}
 
源代码9 项目: pentaho-hadoop-shims   文件: S3NCredentialUtils.java
public static void applyS3CredentialsToHadoopConfigurationIfNecessary( String filename, Configuration conf ) {
  Path outputFile = new Path( scrubFilePathIfNecessary( filename ) );
  URI uri = outputFile.toUri();
  String scheme = uri != null ? uri.getScheme() : null;
  if ( scheme != null && scheme.equals( S3NSCHEME ) ) {
    AWSCredentials credentials = DefaultAWSCredentialsProviderChain.getInstance().getCredentials();
    conf.set( "fs.s3n.awsAccessKeyId", credentials.getAWSAccessKeyId() );
    conf.set( "fs.s3n.awsSecretAccessKey", credentials.getAWSSecretKey() );
    conf.set( "fs.s3.buffer.dir", System.getProperty( "java.io.tmpdir" ) );
  }
}
 
源代码10 项目: incubator-tajo   文件: TestCatalogConstants.java
@BeforeClass
public static void setUp() throws Exception {
  conf = new TajoConf();
  Path testDir = CommonTestingUtil.getTestDir("target/test-data/TestDBSTore");
  File absolutePath = new File(testDir.toUri());
  conf.set(CatalogConstants.DEPRECATED_CATALOG_URI, "jdbc:derby:"+absolutePath.getAbsolutePath()+"/db;create=true");
  LOG.info("derby repository is set to "+conf.get(CatalogConstants.DEPRECATED_CATALOG_URI));
  store = new DerbyStore(conf);
}
 
源代码11 项目: circus-train   文件: S3MapReduceCpCopier.java
private URI toURI(Path replicaDataLocation) {
  if (Boolean.parseBoolean((String) copierOptions.get(CopierOptions.COPY_DESTINATION_IS_FILE))) {
    return replicaDataLocation.toUri();
  } else {
    return toDirectoryUri(replicaDataLocation);
  }
}
 
源代码12 项目: circus-train   文件: CopyMapper.java
private S3UploadDescriptor describeUpload(FileStatus sourceFileStatus, Path targetPath) throws IOException {
  URI targetUri = targetPath.toUri();
  String bucketName = PathUtil.toBucketName(targetUri);
  String key = PathUtil.toBucketKey(targetUri);

  Path sourcePath = sourceFileStatus.getPath();

  ObjectMetadata metadata = new ObjectMetadata();
  metadata.setContentLength(sourceFileStatus.getLen());
  if (conf.getBoolean(ConfigurationVariable.S3_SERVER_SIDE_ENCRYPTION)) {
    metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
  }
  return new S3UploadDescriptor(sourcePath, bucketName, key, metadata);
}
 
源代码13 项目: circus-train   文件: FileSystemPathResolver.java
public Path resolveScheme(Path path) {
  try {
    URI uri = path.toUri();
    if (isEmpty(uri.getScheme())) {
      String scheme = FileSystem.get(configuration).getScheme();
      Path result = new Path(new URI(scheme, uri.getUserInfo(), uri.getHost(), uri.getPort(), uri.getPath(),
          uri.getQuery(), uri.getFragment()).toString());
      LOG.info("Added scheme {} to path {}. Resulting path is {}", scheme, path, result);
      return result;
    }
  } catch (URISyntaxException | IOException e) {
    throw new CircusTrainException(e);
  }
  return path;
}
 
源代码14 项目: incubator-tajo   文件: TestDBStore.java
@BeforeClass
public static void setUp() throws Exception {
  conf = new TajoConf();
  Path testDir = CommonTestingUtil.getTestDir("target/test-data/TestDBSTore");
  File absolutePath = new File(testDir.toUri());
  conf.set(CatalogConstants.CATALOG_URI, "jdbc:derby:"+absolutePath.getAbsolutePath()+"/db;create=true");
  LOG.info("derby repository is set to "+conf.get(CatalogConstants.CATALOG_URI));
  store = new DerbyStore(conf);
}
 
@Override
protected void checkPath(Path path) {
  URI uri = path.toUri();
  String scheme = uri.getScheme();
  // Only check that the scheme matches. The authority and path will be
  // validated later.
  if (scheme == null || scheme.equalsIgnoreCase(getScheme())) {
    return;
  }
  String msg = String.format(
      "Wrong FS scheme: %s, in path: %s, expected scheme: %s",
      scheme, path, getScheme());
  throw new IllegalArgumentException(msg);
}
 
源代码16 项目: kite   文件: FileSystemPartitionView.java
public PartitionKeyPredicate(Path root, Path location) {
  this.root = root;
  this.location = location.toUri();
}
 
源代码17 项目: hraven   文件: JobFilePartitioner.java
@Override
public int run(String[] args) throws Exception {

  myConf = getConf();

  // Presume this is all HDFS paths, even when access as file://
  hdfs = FileSystem.get(myConf);

  // Grab input args and allow for -Dxyz style arguments
  String[] otherArgs = new GenericOptionsParser(myConf, args)
      .getRemainingArgs();

  // Grab the arguments we're looking for.
  CommandLine commandLine = parseArgs(otherArgs);

  // Grab the input path argument
  input = commandLine.getOptionValue("i");
  LOG.info("input=" + input);

  // Grab the input path argument
  String output = commandLine.getOptionValue("o");
  LOG.info("output=" + output);

  skipExisting = commandLine.hasOption("s");
  LOG.info("skipExisting=" + skipExisting);

  moveFiles = commandLine.hasOption("m");
  LOG.info("moveFiles=" + moveFiles);

  if (skipExisting && moveFiles) {
    throw new IllegalArgumentException(
        "Cannot use both options skipExisting and move simultaneously.");
  }

  if (commandLine.hasOption("x")) {
    try {
      maXretention = Integer.parseInt(commandLine.getOptionValue("x"));
    } catch (NumberFormatException nfe) {
      throw new IllegalArgumentException(
          "maXretention option -x is is not a valid number: "
              + commandLine.getOptionValue("x"), nfe);
    }
    // Additional check
    if (maXretention < 0) {
      throw new IllegalArgumentException(
          "Cannot retain less than 0 files. Specified maXretention option -x is: "
              + commandLine.getOptionValue("x"));
    }
    LOG.info("maXretention=" + maXretention);
    if (moveFiles) {
      throw new IllegalArgumentException(
          "Cannot use both options maXretention and move simultaneously.");
    }
  } else {
    maXretention = Integer.MAX_VALUE;
  }

  outputPath = new Path(output);
  FileStatus outputFileStatus = hdfs.getFileStatus(outputPath);

  if (!outputFileStatus.isDir()) {
    throw new IOException("Output is not a directory"
        + outputFileStatus.getPath().getName());
  }

  Path inputPath = new Path(input);
  URI inputURI = inputPath.toUri();
  String inputScheme = inputURI.getScheme();

  LOG.info("input scheme is: " + inputScheme);

  // If input directory is HDFS, then process as such. Assume not scheme is
  // HDFS
  if ((inputScheme == null)
      || (hdfs.getUri().getScheme().equals(inputScheme))) {
    processHDFSSources(inputPath);
  } else if (inputScheme.equals("file")) {
    if (moveFiles) {
      throw new IllegalArgumentException(
          "Cannot move files that are not already in hdfs. Input is not HDFS: "
              + input);
    }
    processPlainFileSources(inputURI);
  } else {
    throw new IllegalArgumentException(
        "Cannot process files from this URI scheme: " + inputScheme);
  }

  Statistics statistics = FileSystem.getStatistics(outputPath.toUri()
      .getScheme(), hdfs.getClass());
  if (statistics != null) {
    LOG.info("HDFS bytes read: " + statistics.getBytesRead());
    LOG.info("HDFS bytes written: " + statistics.getBytesWritten());
    LOG.info("HDFS read ops: " + statistics.getReadOps());
    System.out
        .println("HDFS large read ops: " + statistics.getLargeReadOps());
    LOG.info("HDFS write ops: " + statistics.getWriteOps());
  }

  return 0;
}
 
源代码18 项目: tajo   文件: BenchmarkSort.java
@Setup
public void setup() throws Exception {
  this.conf = new TajoConf();
  util = new TajoTestingCluster();
  util.startCatalogCluster();
  catalog = util.getCatalogService();
  testDir = CommonTestingUtil.getTestDir(TEST_PATH);
  catalog.createTablespace(DEFAULT_TABLESPACE_NAME, testDir.toUri().toString());
  catalog.createDatabase(TajoConstants.DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
  conf.setVar(TajoConf.ConfVars.WORKER_TEMPORAL_DIR, testDir.toString());

  Schema schema = SchemaBuilder.builder().addAll(new Column[] {
      new Column("col0", Type.INT8),
      new Column("col1", Type.INT4),
      new Column("col2", Type.INT2),
      new Column("col3", Type.DATE),
      new Column("col4", Type.TIMESTAMP),
      new Column("col5", Type.TIME),
      new Column("col6", Type.FLOAT4),
      new Column("col7", Type.FLOAT8),
      new Column("col8", Type.INT8),
      new Column("col9", Type.INT8),
      new Column("col10", Type.INT8),
      new Column("col11", Type.INT8),
      new Column("col12", Type.INT8),
      new Column("col13", Type.INT8),
  }).build();

  TableMeta employeeMeta = CatalogUtil.newTableMeta(BuiltinStorages.TEXT, conf);
  Path employeePath = new Path(testDir, "employee.csv");
  Appender appender = ((FileTablespace) TablespaceManager.getLocalFs())
      .getAppender(employeeMeta, schema, employeePath);
  appender.enableStats();
  appender.init();
  VTuple tuple = new VTuple(schema.size());
  for (int i = 0; i < numTuple; i++) {
    if (rnd.nextInt(10000) == 0) {
      tuple.put(new Datum[] {
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get(),
          NullDatum.get()
      });
    } else {
      tuple.put(new Datum[]{
          DatumFactory.createInt8(rnd.nextLong()),
          DatumFactory.createInt4(rnd.nextInt()),
          DatumFactory.createInt2((short) rnd.nextInt(Short.MAX_VALUE)),
          DatumFactory.createDate(Math.abs(rnd.nextInt())),
          DatumFactory.createTimestamp(Math.abs(rnd.nextLong())),
          DatumFactory.createTime(Math.abs(rnd.nextLong())),
          DatumFactory.createFloat4(rnd.nextFloat()),
          DatumFactory.createFloat8(rnd.nextDouble()),
          DatumFactory.createInt8(rnd.nextLong()),
          DatumFactory.createInt8(rnd.nextLong()),
          DatumFactory.createInt8(rnd.nextLong()),
          DatumFactory.createInt8(rnd.nextLong()),
          DatumFactory.createInt8(rnd.nextLong()),
          DatumFactory.createInt8(rnd.nextLong())
      });
    }
    appender.addTuple(tuple);
  }

  appender.flush();
  appender.close();

  employee = new TableDesc("default.employee", schema, employeeMeta, employeePath.toUri());
  catalog.createTable(employee);
  analyzer = new SQLAnalyzer();
  planner = new LogicalPlanner(catalog, TablespaceManager.getInstance());
  optimizer = new LogicalOptimizer(conf, catalog, TablespaceManager.getInstance());
}
 
源代码19 项目: tajo   文件: TestJoinOrderAlgorithm.java
@BeforeClass
public static void setUp() throws Exception {
  util = new TajoTestingCluster();
  util.startCatalogCluster();
  catalog = util.getCatalogService();
  catalog.createTablespace(DEFAULT_TABLESPACE_NAME, "hdfs://localhost:1234/warehouse");
  catalog.createDatabase(DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
  for (FunctionDesc funcDesc : FunctionLoader.findLegacyFunctions()) {
    catalog.createFunction(funcDesc);
  }

  Schema schema = SchemaBuilder.builder()
      .add("name", Type.TEXT)
      .add("empid", Type.INT4)
      .add("deptname", Type.TEXT)
      .build();

  Schema schema2 = SchemaBuilder.builder()
      .add("deptname", Type.TEXT)
      .add("manager", Type.TEXT)
      .build();

  Schema schema3 = SchemaBuilder.builder()
      .add("deptname", Type.TEXT)
      .add("score", Type.INT4)
      .add("phone", Type.INT4)
      .build();

  TableMeta meta = CatalogUtil.newTableMeta(BuiltinStorages.TEXT, util.getConfiguration());
  TableDesc people = new TableDesc(
      IdentifierUtil.buildFQName(TajoConstants.DEFAULT_DATABASE_NAME, "employee"), schema, meta,
      CommonTestingUtil.getTestDir().toUri());
  catalog.createTable(people);

  TableDesc student =
      new TableDesc(
          IdentifierUtil.buildFQName(DEFAULT_DATABASE_NAME, "dept"), schema2, "TEXT", new KeyValueSet(),
          CommonTestingUtil.getTestDir().toUri());
  catalog.createTable(student);

  TableDesc score =
      new TableDesc(
          IdentifierUtil.buildFQName(DEFAULT_DATABASE_NAME, "score"), schema3, "TEXT", new KeyValueSet(),
          CommonTestingUtil.getTestDir().toUri());
  catalog.createTable(score);

  ///////////////////////////////////////////////////////////////////////////
  // creating table for overflow in JoinOrderOptimizer.
  Schema schema4 = SchemaBuilder.builder()
      .add("deptname", Type.TEXT)
      .add("manager", Type.TEXT)
      .build();
  // Set store type as FAKEFILE to prevent auto update of physical information in LogicalPlanner.updatePhysicalInfo()
  TableMeta largeTableMeta = CatalogUtil.newTableMeta("FAKEFILE", util.getConfiguration());
  TableDesc largeDept;
  TableStats largeTableStats;
  FileSystem fs = FileSystem.getLocal(util.getConfiguration());
  for (int i = 0; i < 6; i++) {
    Path tablePath = new Path(CommonTestingUtil.getTestDir(), "" + (i+1));
    fs.create(tablePath);
    largeDept =
        new TableDesc(
            IdentifierUtil.buildFQName(DEFAULT_DATABASE_NAME, "large_dept"+(i+1)), schema4, largeTableMeta,
            tablePath.toUri());
    largeTableStats = new TableStats();
    largeTableStats.setNumBytes(StorageUnit.PB * (i+1));  //1 PB * i
    largeDept.setStats(largeTableStats);
    catalog.createTable(largeDept);
  }
  ///////////////////////////////////////////////////////////////////////////

  sqlAnalyzer = new SQLAnalyzer();
  planner = new LogicalPlanner(catalog, TablespaceManager.getInstance());
  optimizer = new LogicalOptimizer(util.getConfiguration(), catalog, TablespaceManager.getInstance());

  defaultContext = LocalTajoTestingUtility.createDummyContext(util.getConfiguration());
}
 
源代码20 项目: pentaho-hadoop-shims   文件: HCPConf.java
@Override public Path mapPath( Path pvfsPath, Path realFsPath ) {
  URI uri = realFsPath.toUri();
  return new Path( pvfsPath.toUri().getScheme(),
    getConnectionName( pvfsPath ), "/" + uri.getPath() );
}