org.apache.hadoop.fs.Path#makeQualified ( )源码实例Demo

下面列出了org.apache.hadoop.fs.Path#makeQualified ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: RDFS   文件: KosmosFileSystem.java
public FileStatus getFileStatus(Path path) throws IOException {
Path absolute = makeAbsolute(path);
       String srep = absolute.toUri().getPath();
       if (!kfsImpl.exists(srep)) {
         throw new FileNotFoundException("File " + path + " does not exist.");
       }
       if (kfsImpl.isDirectory(srep)) {
           // System.out.println("Status of path: " + path + " is dir");
           return new FileStatus(0, true, 1, 0, kfsImpl.getModificationTime(srep), 
                                 path.makeQualified(this));
       } else {
           // System.out.println("Status of path: " + path + " is file");
           return new FileStatus(kfsImpl.filesize(srep), false, 
                                 kfsImpl.getReplication(srep),
                                 getDefaultBlockSize(),
                                 kfsImpl.getModificationTime(srep),
                                 path.makeQualified(this));
       }
   }
 
源代码2 项目: hbase   文件: TestBulkLoadHFiles.java
@Test
public void testLoadTooMayHFiles() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("testLoadTooMayHFiles");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  for (int i = 0; i <= MAX_FILES_PER_REGION_PER_FAMILY; i++) {
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_" + i),
      FAMILY, QUALIFIER, from, to, 1000);
  }

  try {
    BulkLoadHFiles.create(util.getConfiguration())
      .bulkLoad(TableName.valueOf("mytable_testLoadTooMayHFiles"), dir);
    fail("Bulk loading too many files should fail");
  } catch (IOException ie) {
    assertTrue(ie.getMessage()
      .contains("Trying to load more than " + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles"));
  }
}
 
源代码3 项目: RDFS   文件: S3FileSystem.java
@Override
public FileStatus[] listStatus(Path f) throws IOException {
  Path absolutePath = makeAbsolute(f);
  INode inode = store.retrieveINode(absolutePath);
  if (inode == null) {
    return null;
  }
  if (inode.isFile()) {
    return new FileStatus[] {
      new S3FileStatus(f.makeQualified(this), inode)
    };
  }
  ArrayList<FileStatus> ret = new ArrayList<FileStatus>();
  for (Path p : store.listSubPaths(absolutePath)) {
    ret.add(getFileStatus(p.makeQualified(this)));
  }
  return ret.toArray(new FileStatus[0]);
}
 
源代码4 项目: hadoop   文件: S3FileSystem.java
@Override
public FileStatus[] listStatus(Path f) throws IOException {
  Path absolutePath = makeAbsolute(f);
  INode inode = store.retrieveINode(absolutePath);
  if (inode == null) {
    throw new FileNotFoundException("File " + f + " does not exist.");
  }
  if (inode.isFile()) {
    return new FileStatus[] {
      new S3FileStatus(f.makeQualified(this), inode)
    };
  }
  ArrayList<FileStatus> ret = new ArrayList<FileStatus>();
  for (Path p : store.listSubPaths(absolutePath)) {
    ret.add(getFileStatus(p.makeQualified(this)));
  }
  return ret.toArray(new FileStatus[0]);
}
 
源代码5 项目: crail   文件: CrailHadoopFileSystem.java
@Override
public FileStatus getFileStatus(Path path) throws IOException {
	CrailNode directFile = null;
	try {
		directFile = dfs.lookup(path.toUri().getRawPath()).get();
	} catch (Exception e) {
		throw new IOException(e);
	}
	if (directFile == null) {
		throw new FileNotFoundException("File does not exist: " + path);
	}
	FsPermission permission = FsPermission.getFileDefault();
	if (directFile.getType().isDirectory()) {
		permission = FsPermission.getDirDefault();
	}
	FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, path.makeQualified(this.getUri(), this.workingDir));
	return status;
}
 
源代码6 项目: hadoop-gpu   文件: KosmosFileSystem.java
public FileStatus getFileStatus(Path path) throws IOException {
Path absolute = makeAbsolute(path);
       String srep = absolute.toUri().getPath();
       if (!kfsImpl.exists(srep)) {
         throw new FileNotFoundException("File " + path + " does not exist.");
       }
       if (kfsImpl.isDirectory(srep)) {
           // System.out.println("Status of path: " + path + " is dir");
           return new FileStatus(0, true, 1, 0, kfsImpl.getModificationTime(srep), 
                                 path.makeQualified(this));
       } else {
           // System.out.println("Status of path: " + path + " is file");
           return new FileStatus(kfsImpl.filesize(srep), false, 
                                 kfsImpl.getReplication(srep),
                                 getDefaultBlockSize(),
                                 kfsImpl.getModificationTime(srep),
                                 path.makeQualified(this));
       }
   }
 
源代码7 项目: hadoop-book   文件: TeraSort.java
public int run(String[] args) throws Exception {
  LOG.info("starting");
  JobConf job = (JobConf) getConf();
  Path inputDir = new Path(args[0]);
  inputDir = inputDir.makeQualified(inputDir.getFileSystem(job));
  Path partitionFile = new Path(inputDir, TeraInputFormat.PARTITION_FILENAME);
  URI partitionUri = new URI(partitionFile.toString() +
                             "#" + TeraInputFormat.PARTITION_FILENAME);
  TeraInputFormat.setInputPaths(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  job.setJobName("TeraSort");
  job.setJarByClass(TeraSort.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setInputFormat(TeraInputFormat.class);
  job.setOutputFormat(TeraOutputFormat.class);
  job.setPartitionerClass(TotalOrderPartitioner.class);
  TeraInputFormat.writePartitionFile(job, partitionFile);
  DistributedCache.addCacheFile(partitionUri, job);
  DistributedCache.createSymlink(job);
  job.setInt("dfs.replication", 1);
  TeraOutputFormat.setFinalSync(job, true);
  JobClient.runJob(job);
  LOG.info("done");
  return 0;
}
 
源代码8 项目: hadoop-gpu   文件: FileOutputCommitter.java
Path getTempTaskOutputPath(TaskAttemptContext taskContext) {
  JobConf conf = taskContext.getJobConf();
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    Path p = new Path(outputPath,
                   (FileOutputCommitter.TEMP_DIR_NAME + Path.SEPARATOR +
                    "_" + taskContext.getTaskAttemptID().toString()));
    try {
      FileSystem fs = p.getFileSystem(conf);
      return p.makeQualified(fs);
    } catch (IOException ie) {
      LOG.warn(StringUtils .stringifyException(ie));
      return p;
    }
  }
  return null;
}
 
源代码9 项目: big-c   文件: S3FileSystem.java
/**
 * FileStatus for S3 file systems. 
 */
@Override
public FileStatus getFileStatus(Path f)  throws IOException {
  INode inode = store.retrieveINode(makeAbsolute(f));
  if (inode == null) {
    throw new FileNotFoundException(f + ": No such file or directory.");
  }
  return new S3FileStatus(f.makeQualified(this), inode);
}
 
源代码10 项目: incubator-retired-blur   文件: CsvBlurMapper.java
@Override
protected void setup(Context context) throws IOException, InterruptedException {
  super.setup(context);
  Configuration configuration = context.getConfiguration();
  _autoGenerateRecordIdAsHashOfData = isAutoGenerateRecordIdAsHashOfData(configuration);
  _autoGenerateRowIdAsHashOfData = isAutoGenerateRowIdAsHashOfData(configuration);
  if (_autoGenerateRecordIdAsHashOfData || _autoGenerateRowIdAsHashOfData) {
    try {
      _digest = MessageDigest.getInstance("MD5");
    } catch (NoSuchAlgorithmException e) {
      throw new IOException(e);
    }
  }
  _columnNameMap = getFamilyAndColumnNameMap(configuration);
  _separator = new String(Base64.decodeBase64(configuration.get(BLUR_CSV_SEPARATOR_BASE64, _separator)), UTF_8);
  _splitter = Splitter.on(_separator);
  Path fileCurrentlyProcessing = getCurrentFile(context);
  Collection<String> families = configuration.getStringCollection(BLUR_CSV_FAMILY_PATH_MAPPINGS_FAMILIES);
  OUTER: for (String family : families) {
    Collection<String> pathStrCollection = configuration
        .getStringCollection(BLUR_CSV_FAMILY_PATH_MAPPINGS_FAMILY_PREFIX + family);
    for (String pathStr : pathStrCollection) {
      Path path = new Path(pathStr);
      FileSystem fileSystem = path.getFileSystem(configuration);
      path = path.makeQualified(fileSystem.getUri(), fileSystem.getWorkingDirectory());
      if (isParent(path, fileCurrentlyProcessing)) {
        _familyFromPath = family;
        _familyNotInFile = true;
        break OUTER;
      }
    }
  }
}
 
源代码11 项目: hbase   文件: CommonFSUtils.java
private static boolean isValidWALRootDir(Path walDir, final Configuration c) throws IOException {
  Path rootDir = getRootDir(c);
  FileSystem fs = walDir.getFileSystem(c);
  Path qualifiedWalDir = walDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  if (!qualifiedWalDir.equals(rootDir)) {
    if (qualifiedWalDir.toString().startsWith(rootDir.toString() + "/")) {
      throw new IllegalStateException("Illegal WAL directory specified. " +
          "WAL directories are not permitted to be under the root directory if set.");
    }
  }
  return true;
}
 
/** Initialize the OutputStream to the next file to write to.
 */
private void openNextFile() throws IOException {
  StringBuffer sb = new StringBuffer();
  Formatter fmt = new Formatter(sb);
  fmt.format("%05d", this.fileNum++);
  String filename = filePrefix + fmt.toString();
  if (codec != null) {
    filename = filename + codec.getDefaultExtension();
  }
  Path destFile = new Path(destDir, filename);
  FileSystem fs = destFile.getFileSystem(conf);
  LOG.debug("Opening next output file: " + destFile);
  if (fs.exists(destFile)) {
    Path canonicalDest = destFile.makeQualified(fs);
    throw new IOException("Destination file " + canonicalDest
        + " already exists");
  }

  OutputStream fsOut = fs.create(destFile);

  // Count how many actual bytes hit HDFS.
  this.countingFilterStream = new CountingOutputStream(fsOut);

  if (codec != null) {
    // Wrap that in a compressing stream.
    this.writeStream = codec.createOutputStream(this.countingFilterStream);
  } else {
    // Write to the counting stream directly.
    this.writeStream = this.countingFilterStream;
  }
}
 
源代码13 项目: hadoop   文件: HftpFileSystem.java
@Override
public FSDataInputStream open(Path f, int buffersize) throws IOException {
  f = f.makeQualified(getUri(), getWorkingDirectory());
  String path = "/data" + ServletUtil.encodePath(f.toUri().getPath());
  String query = addDelegationTokenParam("ugi=" + getEncodedUgiParameter());
  URL u = getNamenodeURL(path, query);
  return new FSDataInputStream(new RangeHeaderInputStream(connectionFactory, u));
}
 
源代码14 项目: hbase   文件: TestExportSnapshotV1NoCluster.java
/**
 * Setup for test. Returns path to test data dir. Sets configuration into the passed
 * hctu.getConfiguration.
 */
static Path setup(FileSystem fs, HBaseCommonTestingUtility hctu) throws IOException {
  Path testDir = hctu.getDataTestDir().makeQualified(fs.getUri(), fs.getWorkingDirectory());
  hctu.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
  hctu.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  hctu.getConfiguration().setInt("hbase.client.pause", 250);
  hctu.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  hctu.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
  hctu.getConfiguration().setInt("mapreduce.map.maxattempts", 10);
  hctu.getConfiguration().set(HConstants.HBASE_DIR, testDir.toString());
  return testDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
 
源代码15 项目: big-c   文件: NativeAzureFileSystem.java
private FileStatus newDirectory(FileMetadata meta, Path path) {
  return new FileStatus (
      0,
      true,
      1,
      blockSize,
      meta == null ? 0 : meta.getLastModified(),
      0,
      meta == null ? FsPermission.getDefault() : meta.getPermissionStatus().getPermission(),
      meta == null ? "" : meta.getPermissionStatus().getUserName(),
      meta == null ? "" : meta.getPermissionStatus().getGroupName(),
      path.makeQualified(getUri(), getWorkingDirectory()));
}
 
源代码16 项目: RDFS   文件: TestDFSShell.java
public void testCount() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    String root = createTree(dfs, "count");

    // Verify the counts
    runCount(root, 2, 4, conf);
    runCount(root + "2", 2, 1, conf);
    runCount(root + "2/f1", 0, 1, conf);
    runCount(root + "2/sub", 1, 0, conf);

    final FileSystem localfs = FileSystem.getLocal(conf);
    Path localpath = new Path(TEST_ROOT_DIR, "testcount");
    localpath = localpath.makeQualified(localfs);
    localfs.mkdirs(localpath);

    final String localstr = localpath.toString();
    System.out.println("localstr=" + localstr);
    runCount(localstr, 1, 0, conf);
    assertEquals(0, new Count(new String[]{root, localstr}, 0, conf).runAll());
  } finally {
    try {
      dfs.close();
    } catch (Exception e) {
    }
    cluster.shutdown();
  }
}
 
源代码17 项目: hbase   文件: TestBulkLoadHFiles.java
/**
 * Test that tags survive through a bulk load that needs to split hfiles. This test depends on the
 * "hbase.client.rpc.codec" = KeyValueCodecWithTags so that the client can get tags in the
 * responses.
 */
@Test
public void testTagsSurviveBulkLoadSplit() throws Exception {
  Path dir = util.getDataTestDirOnTestFS(tn.getMethodName());
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));
  // table has these split points
  byte[][] tableSplitKeys = new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"),
    Bytes.toBytes("jjj"), Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), };

  // creating an hfile that has values that span the split points.
  byte[] from = Bytes.toBytes("ddd");
  byte[] to = Bytes.toBytes("ooo");
  HFileTestUtil.createHFileWithTags(util.getConfiguration(), fs,
    new Path(familyDir, tn.getMethodName() + "_hfile"), FAMILY, QUALIFIER, from, to, 1000);
  int expectedRows = 1000;

  TableName tableName = TableName.valueOf(tn.getMethodName());
  TableDescriptor htd = buildHTD(tableName, BloomType.NONE);
  util.getAdmin().createTable(htd, tableSplitKeys);

  BulkLoadHFiles.create(util.getConfiguration()).bulkLoad(tableName, dir);

  Table table = util.getConnection().getTable(tableName);
  try {
    assertEquals(expectedRows, countRows(table));
    HFileTestUtil.verifyTags(table);
  } finally {
    table.close();
  }

  util.deleteTable(tableName);
}
 
源代码18 项目: incubator-tajo   文件: SmallBlockS3FileSystem.java
/**
 * FileStatus for S3 file systems.
 */
@Override
public FileStatus getFileStatus(Path f)  throws IOException {
  INode inode = store.retrieveINode(makeAbsolute(f));
  if (inode == null) {
    throw new FileNotFoundException(f + ": No such file or directory.");
  }
  return new S3FileStatus(f.makeQualified(this), inode);
}
 
源代码19 项目: emodb   文件: FileSystemUtil.java
/**
 * Qualifies a path so it includes the schema and authority from the root path.
 */
private static Path qualified(Path rootPath, Path path) {
    URI rootUri = rootPath.toUri();
    return path.makeQualified(rootUri, new Path(rootUri.getPath()));
}
 
源代码20 项目: hadoop-gpu   文件: NativeS3FileSystem.java
private FileStatus newDirectory(Path path) {
  return new FileStatus(0, true, 1, MAX_S3_FILE_SIZE, 0,
      path.makeQualified(this));
}