org.apache.hadoop.hbase.HConstants#OLDEST_TIMESTAMP源码实例Demo

下面列出了org.apache.hadoop.hbase.HConstants#OLDEST_TIMESTAMP 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hbase   文件: MemStoreCompactorSegmentsIterator.java
/**
 * Creates the scanner for compacting the pipeline.
 * @return the scanner
 */
private InternalScanner createScanner(HStore store, List<KeyValueScanner> scanners)
    throws IOException {
  InternalScanner scanner = null;
  boolean success = false;
  try {
    RegionCoprocessorHost cpHost = store.getCoprocessorHost();
    ScanInfo scanInfo;
    if (cpHost != null) {
      scanInfo = cpHost.preMemStoreCompactionCompactScannerOpen(store);
    } else {
      scanInfo = store.getScanInfo();
    }
    scanner = new StoreScanner(store, scanInfo, scanners, ScanType.COMPACT_RETAIN_DELETES,
        store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
    if (cpHost != null) {
      InternalScanner scannerFromCp = cpHost.preMemStoreCompactionCompact(store, scanner);
      if (scannerFromCp == null) {
        throw new CoprocessorException("Got a null InternalScanner when calling" +
            " preMemStoreCompactionCompact which is not acceptable");
      }
      success = true;
      return scannerFromCp;
    } else {
      success = true;
      return scanner;
    }
  } finally {
    if (!success) {
      Closeables.close(scanner, true);
      scanners.forEach(KeyValueScanner::close);
    }
  }
}
 
源代码2 项目: hbase   文件: ScanQueryMatcher.java
/**
 * Check before the delete logic.
 * @return null means continue.
 */
protected final MatchCode preCheck(Cell cell) {
  if (currentRow == null) {
    // Since the curCell is null it means we are already sure that we have moved over to the next
    // row
    return MatchCode.DONE;
  }
  // if row key is changed, then we know that we have moved over to the next row
  if (rowComparator.compareRows(currentRow, cell) != 0) {
    return MatchCode.DONE;
  }

  if (this.columns.done()) {
    return MatchCode.SEEK_NEXT_ROW;
  }

  long timestamp = cell.getTimestamp();
  // check if this is a fake cell. The fake cell is an optimization, we should make the scanner
  // seek to next column or next row. See StoreFileScanner.requestSeek for more details.
  // check for early out based on timestamp alone
  if (timestamp == HConstants.OLDEST_TIMESTAMP || columns.isDone(timestamp)) {
    return columns.getNextRowOrNextColumn(cell);
  }
  // check if the cell is expired by cell TTL
  if (isCellTTLExpired(cell, this.oldestUnexpiredTS, this.now)) {
    return MatchCode.SKIP;
  }
  return null;
}
 
源代码3 项目: hbase   文件: Compactor.java
/**
 * Extracts some details about the files to compact that are commonly needed by compactors.
 * @param filesToCompact Files.
 * @param allFiles Whether all files are included for compaction
 * @return The result.
 */
private FileDetails getFileDetails(
    Collection<HStoreFile> filesToCompact, boolean allFiles) throws IOException {
  FileDetails fd = new FileDetails();
  long oldestHFileTimestampToKeepMVCC = System.currentTimeMillis() -
    (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);

  for (HStoreFile file : filesToCompact) {
    if(allFiles && (file.getModificationTimestamp() < oldestHFileTimestampToKeepMVCC)) {
      // when isAllFiles is true, all files are compacted so we can calculate the smallest
      // MVCC value to keep
      if(fd.minSeqIdToKeep < file.getMaxMemStoreTS()) {
        fd.minSeqIdToKeep = file.getMaxMemStoreTS();
      }
    }
    long seqNum = file.getMaxSequenceId();
    fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
    StoreFileReader r = file.getReader();
    if (r == null) {
      LOG.warn("Null reader for " + file.getPath());
      continue;
    }
    // NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
    // blooms can cause progress to be miscalculated or if the user switches bloom
    // type (e.g. from ROW to ROWCOL)
    long keyCount = r.getEntries();
    fd.maxKeyCount += keyCount;
    // calculate the latest MVCC readpoint in any of the involved store files
    Map<byte[], byte[]> fileInfo = r.loadFileInfo();

    // calculate the total size of the compacted files
    fd.totalCompactedFilesSize += r.length();

    byte[] tmp = null;
    // Get and set the real MVCCReadpoint for bulk loaded files, which is the
    // SeqId number.
    if (r.isBulkLoaded()) {
      fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
    }
    else {
      tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
      if (tmp != null) {
        fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
      }
    }
    tmp = fileInfo.get(HFileInfo.MAX_TAGS_LEN);
    if (tmp != null) {
      fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
    }
    // If required, calculate the earliest put timestamp of all involved storefiles.
    // This is used to remove family delete marker during compaction.
    long earliestPutTs = 0;
    if (allFiles) {
      tmp = fileInfo.get(EARLIEST_PUT_TS);
      if (tmp == null) {
        // There's a file with no information, must be an old one
        // assume we have very old puts
        fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
      } else {
        earliestPutTs = Bytes.toLong(tmp);
        fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
      }
    }
    tmp = fileInfo.get(TIMERANGE_KEY);
    fd.latestPutTs = tmp == null ? HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax();
    LOG.debug("Compacting {}, keycount={}, bloomtype={}, size={}, "
            + "encoding={}, compression={}, seqNum={}{}",
        (file.getPath() == null? null: file.getPath().getName()),
        keyCount,
        r.getBloomFilterType().toString(),
        TraditionalBinaryPrefix.long2String(r.length(), "", 1),
        r.getHFileReader().getDataBlockEncoding(),
        compactionCompression,
        seqNum,
        (allFiles? ", earliestPutTs=" + earliestPutTs: ""));
  }
  return fd;
}
 
源代码4 项目: phoenix   文件: UpgradeRequiredException.java
public UpgradeRequiredException() {
    this(HConstants.OLDEST_TIMESTAMP);
}