类org.apache.hadoop.hbase.util.FSUtils源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.util.FSUtils的API类实例代码及写法,或者点击链接到github查看源代码。

/**
 * Check all files in a column family dir.
 */
protected void checkColFamDir(Path cfDir) throws IOException {
  FileStatus[] statuses = null;
  try {
    statuses = fs.listStatus(cfDir); // use same filter as scanner.
  } catch (FileNotFoundException fnfe) {
    // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
    LOG.warn("Colfam Directory " + cfDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(cfDir);
    return;
  }

  List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
  // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
  if (hfs.isEmpty() && !fs.exists(cfDir)) {
    LOG.warn("Colfam Directory " + cfDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(cfDir);
    return;
  }
  for (FileStatus hfFs : hfs) {
    Path hf = hfFs.getPath();
    checkHFile(hf);
  }
}
 
源代码2 项目: spliceengine   文件: SplitRegionScanner.java
void createAndRegisterClientSideRegionScanner(Table table, Scan newScan, Partition partition) throws Exception {
    if (LOG.isDebugEnabled())
        SpliceLogUtils.debug(LOG, "createAndRegisterClientSideRegionScanner with table=%s, scan=%s, tableConfiguration=%s", table, newScan, table.getConfiguration());
    Configuration conf = table.getConfiguration();
    if (System.getProperty("hbase.rootdir") != null) {
        conf.set("hbase.rootdir", System.getProperty("hbase.rootdir"));
        jobConfig.set("hbase.rootdir", System.getProperty("hbase.rootdir"));
    }

    SkeletonClientSideRegionScanner skeletonClientSideRegionScanner =
            new HBaseClientSideRegionScanner(table,
                    jobConfig,
                    FSUtils.getCurrentFileSystem(conf),
                    FSUtils.getRootDir(conf),
                    ((HPartitionDescriptor)partition.getDescriptor()).getDescriptor(),
                    ((RangedClientPartition) partition).getRegionInfo(),
                    newScan, partition.owningServer().getHostAndPort());
    this.region = skeletonClientSideRegionScanner.getRegion();
    registerRegionScanner(skeletonClientSideRegionScanner);
}
 
源代码3 项目: spliceengine   文件: BackupEndpointObserver.java
@Override
public void start(CoprocessorEnvironment e) throws IOException {
    try {
        region = (HRegion) ((RegionCoprocessorEnvironment) e).getRegion();
        String[] name = region.getTableDescriptor().getTableName().getNameAsString().split(":");
        if (name.length == 2) {
            namespace = name[0];
            tableName = name[1];
        }
        else {
            tableName = name[0];
        }
        regionName = region.getRegionInfo().getEncodedName();

        conf = HConfiguration.unwrapDelegate();
        rootDir = FSUtils.getRootDir(conf);
        fs = FSUtils.getCurrentFileSystem(conf);
        backupDir = new Path(rootDir, BackupRestoreConstants.BACKUP_DIR + "/data/splice/" + tableName + "/" + regionName);
        preparing = new AtomicBoolean(false);
        isCompacting = new AtomicBoolean(false);
        isSplitting = new AtomicBoolean(false);
    } catch (Throwable t) {
        throw CoprocessorUtils.getIOException(t);
    }
}
 
源代码4 项目: gemfirexd-oss   文件: AbstractHoplog.java
private synchronized void initHoplogSizeTimeInfo() {
  if (hoplogSize != null && hoplogModificationTime != null) {
    // time and size info is already initialized. no work needed here
    return;
  }

  try {
    FileStatus[] filesInfo = FSUtils.listStatus(fsProvider.getFS(), path, null);
    if (filesInfo != null && filesInfo.length == 1) {
      this.hoplogModificationTime = filesInfo[0].getModificationTime();
      this.hoplogSize = filesInfo[0].getLen();
    }
    // TODO else condition may happen if user deletes hoplog from the file system.
  } catch (IOException e) {
    logger.error(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE, path, e);
    throw new HDFSIOException(
        LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path),e);
  }
}
 
源代码5 项目: phoenix-tephra   文件: TransactionProcessorTest.java
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1}, null);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
      new LocalRegionServerServices(conf, ServerName.valueOf(
          InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
源代码6 项目: phoenix-tephra   文件: TransactionProcessorTest.java
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1}, null);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
      new LocalRegionServerServices(conf, ServerName.valueOf(
          InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
源代码7 项目: phoenix-tephra   文件: TransactionProcessorTest.java
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, tableName + ".hlog");
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  WAL hLog = walFactory.getWAL(info);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
      new LocalRegionServerServices(conf, ServerName.valueOf(
          InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
源代码8 项目: phoenix-tephra   文件: TransactionProcessorTest.java
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1});
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
                     new LocalRegionServerServices(conf, ServerName.valueOf(
                       InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
源代码9 项目: phoenix-tephra   文件: TransactionProcessorTest.java
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1});
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
                     new LocalRegionServerServices(conf, ServerName.valueOf(
                       InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
源代码10 项目: phoenix-tephra   文件: TransactionProcessorTest.java
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
 
源代码11 项目: gemfirexd-oss   文件: AbstractHoplog.java
private synchronized void initHoplogSizeTimeInfo() {
  if (hoplogSize != null && hoplogModificationTime != null) {
    // time and size info is already initialized. no work needed here
    return;
  }

  try {
    FileStatus[] filesInfo = FSUtils.listStatus(fsProvider.getFS(), path, null);
    if (filesInfo != null && filesInfo.length == 1) {
      this.hoplogModificationTime = filesInfo[0].getModificationTime();
      this.hoplogSize = filesInfo[0].getLen();
    }
    // TODO else condition may happen if user deletes hoplog from the file system.
  } catch (IOException e) {
    logger.error(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE, path, e);
    throw new HDFSIOException(
        LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path),e);
  }
}
 
源代码12 项目: hbase   文件: WALSplitUtil.java
/**
 * This method will check 3 places for finding the max sequence id file. One is the expected
 * place, another is the old place under the region directory, and the last one is the wrong one
 * we introduced in HBASE-20734. See HBASE-22617 for more details.
 * <p/>
 * Notice that, you should always call this method instead of
 * {@link #getMaxRegionSequenceId(FileSystem, Path)} until 4.0.0 release.
 * @deprecated Only for compatibility, will be removed in 4.0.0.
 */
@Deprecated
public static long getMaxRegionSequenceId(Configuration conf, RegionInfo region,
  IOExceptionSupplier<FileSystem> rootFsSupplier, IOExceptionSupplier<FileSystem> walFsSupplier)
  throws IOException {
  FileSystem rootFs = rootFsSupplier.get();
  FileSystem walFs = walFsSupplier.get();
  Path regionWALDir =
    CommonFSUtils.getWALRegionDir(conf, region.getTable(), region.getEncodedName());
  // This is the old place where we store max sequence id file
  Path regionDir = FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(conf), region);
  // This is for HBASE-20734, where we use a wrong directory, see HBASE-22617 for more details.
  Path wrongRegionWALDir =
    CommonFSUtils.getWrongWALRegionDir(conf, region.getTable(), region.getEncodedName());
  long maxSeqId = getMaxRegionSequenceId(walFs, regionWALDir);
  maxSeqId = Math.max(maxSeqId, getMaxRegionSequenceId(rootFs, regionDir));
  maxSeqId = Math.max(maxSeqId, getMaxRegionSequenceId(walFs, wrongRegionWALDir));
  return maxSeqId;
}
 
源代码13 项目: hbase   文件: CompactionTool.java
/**
 * return the top hosts of the store files, used by the Split
 */
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
    throws IOException {
  FileStatus[] files = CommonFSUtils.listStatus(fs, path);
  if (files == null) {
    return new String[] {};
  }

  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  for (FileStatus hfileStatus: files) {
    HDFSBlocksDistribution storeFileBlocksDistribution =
      FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
    hdfsBlocksDistribution.add(storeFileBlocksDistribution);
  }

  List<String> hosts = hdfsBlocksDistribution.getTopHosts();
  return hosts.toArray(new String[hosts.size()]);
}
 
源代码14 项目: hbase   文件: CachedClusterId.java
/**
 * Attempts to fetch the cluster ID from the file system. If no attempt is already in progress,
 * synchronously fetches the cluster ID and sets it. If an attempt is already in progress,
 * returns right away and the caller is expected to wait for the fetch to finish.
 * @return true if the attempt is done, false if another thread is already fetching it.
 */
private boolean attemptFetch() {
  if (fetchInProgress.compareAndSet(false, true)) {
    // A fetch is not in progress, so try fetching the cluster ID synchronously and then notify
    // the waiting threads.
    try {
      cacheMisses.incrementAndGet();
      setClusterId(FSUtils.getClusterId(fs, rootDir));
    } catch (IOException e) {
      LOG.warn("Error fetching cluster ID", e);
    } finally {
      Preconditions.checkState(fetchInProgress.compareAndSet(true, false));
      synchronized (fetchInProgress) {
        fetchInProgress.notifyAll();
      }
    }
    return true;
  }
  return false;
}
 
源代码15 项目: hbase   文件: RestoreSnapshotHelper.java
/**
 * @return the set of the regions contained in the table
 */
private List<RegionInfo> getTableRegions() throws IOException {
  LOG.debug("get table regions: " + tableDir);
  FileStatus[] regionDirs =
    CommonFSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
  if (regionDirs == null) {
    return null;
  }

  List<RegionInfo> regions = new ArrayList<>(regionDirs.length);
  for (int i = 0; i < regionDirs.length; ++i) {
    RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath());
    regions.add(hri);
  }
  LOG.debug("found " + regions.size() + " regions for table=" +
    tableDesc.getTableName().getNameAsString());
  return regions;
}
 
源代码16 项目: hbase   文件: HFileCorruptionChecker.java
/**
 * Check all files in a column family dir.
 *
 * @param cfDir
 *          column family directory
 * @throws IOException
 */
protected void checkColFamDir(Path cfDir) throws IOException {
  FileStatus[] statuses = null;
  try {
    statuses = fs.listStatus(cfDir); // use same filter as scanner.
  } catch (FileNotFoundException fnfe) {
    // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
    LOG.warn("Colfam Directory " + cfDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(cfDir);
    return;
  }

  List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
  // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
  if (hfs.isEmpty() && !fs.exists(cfDir)) {
    LOG.warn("Colfam Directory " + cfDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(cfDir);
    return;
  }
  for (FileStatus hfFs : hfs) {
    Path hf = hfFs.getPath();
    checkHFile(hf);
  }
}
 
源代码17 项目: hbase   文件: HFileCorruptionChecker.java
/**
 * Check all files in a mob column family dir.
 *
 * @param cfDir
 *          mob column family directory
 * @throws IOException
 */
protected void checkMobColFamDir(Path cfDir) throws IOException {
  FileStatus[] statuses = null;
  try {
    statuses = fs.listStatus(cfDir); // use same filter as scanner.
  } catch (FileNotFoundException fnfe) {
    // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
    LOG.warn("Mob colfam Directory " + cfDir +
        " does not exist.  Likely the table is deleted. Skipping.");
    missedMobFiles.add(cfDir);
    return;
  }

  List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
  // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
  if (hfs.isEmpty() && !fs.exists(cfDir)) {
    LOG.warn("Mob colfam Directory " + cfDir +
        " does not exist.  Likely the table is deleted. Skipping.");
    missedMobFiles.add(cfDir);
    return;
  }
  for (FileStatus hfFs : hfs) {
    Path hf = hfFs.getPath();
    checkMobFile(hf);
  }
}
 
源代码18 项目: hbase   文件: MetricsRegionServerWrapperImpl.java
public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) {
  this.regionServer = regionServer;
  initBlockCache();
  initMobFileCache();

  this.period = regionServer.getConfiguration().getLong(HConstants.REGIONSERVER_METRICS_PERIOD,
    HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD);

  this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
  this.runnable = new RegionServerMetricsWrapperRunnable();
  this.executor.scheduleWithFixedDelay(this.runnable, this.period, this.period,
    TimeUnit.MILLISECONDS);
  this.metricsWALSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
  this.allocator = regionServer.getRpcServer().getByteBuffAllocator();

  try {
    this.dfsHedgedReadMetrics = FSUtils.getDFSHedgedReadMetrics(regionServer.getConfiguration());
  } catch (IOException e) {
    LOG.warn("Failed to get hedged metrics", e);
  }
  if (LOG.isInfoEnabled()) {
    LOG.info("Computing regionserver metrics every " + this.period + " milliseconds");
  }
}
 
源代码19 项目: hbase   文件: HRegionFileSystem.java
/**
 * Bulk load: Add a specified store file to the specified family.
 * If the source file is on the same different file-system is moved from the
 * source location to the destination location, otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath {@link Path} to the file to import
 * @param seqNum Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Pair<Path, Path> bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
    throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  srcPath = srcFs.resolvePath(srcPath);
  FileSystem realSrcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSUtils.isSameHdfs(conf, realSrcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
        "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(realSrcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return new Pair<>(srcPath, preCommitStoreFile(familyName, srcPath, seqNum, true));
}
 
源代码20 项目: hbase   文件: HRegionFileSystem.java
/**
 * Clean up any split detritus that may have been left around from previous
 * split attempts.
 * Call this method on initial region deploy.
 * @throws IOException
 */
void cleanupAnySplitDetritus() throws IOException {
  Path splitdir = this.getSplitsDir();
  if (!fs.exists(splitdir)) return;
  // Look at the splitdir.  It could have the encoded names of the daughter
  // regions we tried to make.  See if the daughter regions actually got made
  // out under the tabledir.  If here under splitdir still, then the split did
  // not complete.  Try and do cleanup.  This code WILL NOT catch the case
  // where we successfully created daughter a but regionserver crashed during
  // the creation of region b.  In this case, there'll be an orphan daughter
  // dir in the filesystem.  TOOD: Fix.
  FileStatus[] daughters = CommonFSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
  if (daughters != null) {
    for (FileStatus daughter: daughters) {
      Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
      if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
        throw new IOException("Failed delete of " + daughterDir);
      }
    }
  }
  cleanupSplitsDir();
  LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
}
 
源代码21 项目: hbase   文件: StoreFileInfo.java
/**
 * helper function to compute HDFS blocks distribution of a given reference file.For reference
 * file, we don't compute the exact value. We use some estimate instead given it might be good
 * enough. we assume bottom part takes the first half of reference file, top part takes the second
 * half of the reference file. This is just estimate, given midkey ofregion != midkey of HFile,
 * also the number and size of keys vary. If this estimate isn't good enough, we can improve it
 * later.
 * @param fs The FileSystem
 * @param reference The reference
 * @param status The reference FileStatus
 * @return HDFS blocks distribution
 */
private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(final FileSystem fs,
    final Reference reference, final FileStatus status) throws IOException {
  if (status == null) {
    return null;
  }

  long start = 0;
  long length = 0;

  if (Reference.isTopFileRegion(reference.getFileRegion())) {
    start = status.getLen() / 2;
    length = status.getLen() - status.getLen() / 2;
  } else {
    start = 0;
    length = status.getLen() / 2;
  }
  return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
}
 
源代码22 项目: hbase   文件: HFile.java
/**
 * Returns all HFiles belonging to the given region directory. Could return an
 * empty list.
 *
 * @param fs  The file system reference.
 * @param regionDir  The region directory to scan.
 * @return The list of files found.
 * @throws IOException When scanning the files fails.
 */
public static List<Path> getStoreFiles(FileSystem fs, Path regionDir)
    throws IOException {
  List<Path> regionHFiles = new ArrayList<>();
  PathFilter dirFilter = new FSUtils.DirFilter(fs);
  FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter);
  for(FileStatus dir : familyDirs) {
    FileStatus[] files = fs.listStatus(dir.getPath());
    for (FileStatus file : files) {
      if (!file.isDirectory() &&
          (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) &&
          (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR))) {
        regionHFiles.add(file.getPath());
      }
    }
  }
  return regionHFiles;
}
 
源代码23 项目: hbase   文件: TestHbckChore.java
@Test
public void testOrphanRegionsOnFS() throws Exception {
  TableName tableName = TableName.valueOf("testOrphanRegionsOnFS");
  RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
  Configuration conf = util.getConfiguration();

  hbckChore.choreForTesting();
  assertEquals(0, hbckChore.getOrphanRegionsOnFS().size());

  HRegion.createRegionDir(conf, regionInfo, CommonFSUtils.getRootDir(conf));
  hbckChore.choreForTesting();
  assertEquals(1, hbckChore.getOrphanRegionsOnFS().size());
  assertTrue(hbckChore.getOrphanRegionsOnFS().containsKey(regionInfo.getEncodedName()));

  FSUtils.deleteRegionDir(conf, regionInfo);
  hbckChore.choreForTesting();
  assertEquals(0, hbckChore.getOrphanRegionsOnFS().size());
}
 
源代码24 项目: hbase   文件: TestCreateTableProcedure.java
@Override
protected Flow executeFromState(MasterProcedureEnv env,
  MasterProcedureProtos.CreateTableState state) throws InterruptedException {

  if (!failOnce &&
    state == MasterProcedureProtos.CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT) {
    try {
      // To emulate an HDFS failure, create only the first region directory
      RegionInfo regionInfo = getFirstRegionInfo();
      Configuration conf = env.getMasterConfiguration();
      MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
      Path tempdir = mfs.getTempDir();
      Path tableDir = CommonFSUtils.getTableDir(tempdir, regionInfo.getTable());
      Path regionDir = FSUtils.getRegionDirFromTableDir(tableDir, regionInfo);
      FileSystem fs = FileSystem.get(conf);
      fs.mkdirs(regionDir);

      failOnce = true;
      return Flow.HAS_MORE_STATE;
    } catch (IOException e) {
      fail("failed to create a region directory: " + e);
    }
  }

  return super.executeFromState(env, state);
}
 
源代码25 项目: hbase   文件: MasterProcedureTestingUtility.java
public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
    final String family) throws IOException {
  // verify htd
  TableDescriptor htd = master.getTableDescriptors().get(tableName);
  assertTrue(htd != null);
  assertFalse(htd.hasColumnFamily(Bytes.toBytes(family)));

  // verify fs
  final FileSystem fs = master.getMasterFileSystem().getFileSystem();
  final Path tableDir =
    CommonFSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
  for (Path regionDir : FSUtils.getRegionDirs(fs, tableDir)) {
    final Path familyDir = new Path(regionDir, family);
    assertFalse(family + " family dir should not exist", fs.exists(familyDir));
  }
}
 
源代码26 项目: hbase   文件: TestTruncateTableProcedure.java
@Override
protected Flow executeFromState(MasterProcedureEnv env,
  MasterProcedureProtos.TruncateTableState state) throws InterruptedException {

  if (!failOnce &&
    state == MasterProcedureProtos.TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT) {
    try {
      // To emulate an HDFS failure, create only the first region directory
      RegionInfo regionInfo = getFirstRegionInfo();
      Configuration conf = env.getMasterConfiguration();
      MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
      Path tempdir = mfs.getTempDir();
      Path tableDir = CommonFSUtils.getTableDir(tempdir, regionInfo.getTable());
      Path regionDir = FSUtils.getRegionDirFromTableDir(tableDir, regionInfo);
      FileSystem fs = FileSystem.get(conf);
      fs.mkdirs(regionDir);

      failOnce = true;
      return Flow.HAS_MORE_STATE;
    } catch (IOException e) {
      fail("failed to create a region directory: " + e);
    }
  }

  return super.executeFromState(env, state);
}
 
/**
 * Check all column families in a region dir.
 */
protected void checkRegionDir(Path regionDir) throws IOException {
  FileStatus[] statuses = null;
  try {
    statuses = fs.listStatus(regionDir);
  } catch (FileNotFoundException fnfe) {
    // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist.
    LOG.warn("Region Directory " + regionDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(regionDir);
    return;
  }

  List<FileStatus> cfs = FSUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs));
  // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
  if (cfs.isEmpty() && !fs.exists(regionDir)) {
    LOG.warn("Region Directory " + regionDir +
        " does not exist.  Likely due to concurrent split/compaction. Skipping.");
    missing.add(regionDir);
    return;
  }

  for (FileStatus cfFs : cfs) {
    Path cfDir = cfFs.getPath();
    checkColFamDir(cfDir);
  }
}
 
源代码28 项目: mizo   文件: MizoRegionFamilyCellsIterator.java
/**
 * Given a path to HFiles, gets a list of the HFiles residing in the directory,
 * create a Cells iterator per each HFile and return a collection of these iterators,
 * removing iterators that have no items
 * @param regionEdgesFamilyPath Path to HFiles
 * @return Collection of non-empty iterators of the given HFiles
 */
protected Iterable<Iterator<Cell>> createHFilesIterators(String regionEdgesFamilyPath) throws IOException {
    Path path = new Path(regionEdgesFamilyPath);
    FileSystem fs = path.getFileSystem(new Configuration());

    return Arrays.stream(fs.listStatus(path, new FSUtils.HFileFilter(fs)))
            .map(FileStatus::getPath)
            .map(hfilePath -> MizoHFileIterator.createIterator(fs, hfilePath))
            .filter(Iterator::hasNext)
            .collect(Collectors.toList());
}
 
源代码29 项目: mizo   文件: MizoRDD.java
/**
 * Given a path with wildcards, where regions are located,
 * gets the paths of regions that satisfy these wildcards
 * @param regionDirectoryPaths Paths to get regions from, with wildcards
 * @return Collection of regions paths
 */
protected static List<String> getRegionsPaths(String regionDirectoryPaths) {
    try {
        Path regionDirectory = new Path(regionDirectoryPaths);
        FileSystem fs = regionDirectory.getFileSystem(new Configuration());

        return Arrays.stream(fs.globStatus(regionDirectory, new FSUtils.RegionDirFilter(fs)))
                .map(file -> file.getPath().toString())
                .collect(Collectors.toList());
    } catch (IOException e) {
        log.error("Failed to get partitions due to inner exception: {}", e);

        return Collections.emptyList();
    }
}
 
源代码30 项目: gemfirexd-oss   文件: HdfsSortedOplogOrganizer.java
protected FileStatus[] getExpiryMarkers() throws IOException {
  FileSystem fs = store.getFileSystem();
  if (hoplogReadersController.hoplogs == null
      || hoplogReadersController.hoplogs.size() == 0) {
    // there are no hoplogs in the system. May be the bucket is not existing
    // at all.
    if (!fs.exists(bucketPath)) {
      logger.fine("This bucket is unused, skipping expired hoplog check");
      return null;
    }
  }
  
  FileStatus files[] = FSUtils.listStatus(fs, bucketPath, new PathFilter() {
    @Override
    public boolean accept(Path file) {
      // All expired hoplog end with expire extension and must match the valid file regex
      String fileName = file.getName();
      if (! fileName.endsWith(EXPIRED_HOPLOG_EXTENSION)) {
        return false;
      }
      fileName = truncateExpiryExtension(fileName);
      Matcher matcher = SORTED_HOPLOG_PATTERN.matcher(fileName);
      return matcher.find();
    }

  });
  return files;
}
 
 类所在包
 同包方法