下面列出了怎么用org.apache.hadoop.hbase.regionserver.Store的API类实例代码及写法,或者点击链接到github查看源代码。
private Region mockRegionWithHFileLinks(Collection<Long> storeSizes, Collection<Long> hfileSizes) {
final Region r = mock(Region.class);
final RegionInfo info = mock(RegionInfo.class);
when(r.getRegionInfo()).thenReturn(info);
List<Store> stores = new ArrayList<>();
when(r.getStores()).thenReturn((List) stores);
assertEquals(
"Logic error, storeSizes and linkSizes must be equal in size", storeSizes.size(),
hfileSizes.size());
Iterator<Long> storeSizeIter = storeSizes.iterator();
Iterator<Long> hfileSizeIter = hfileSizes.iterator();
while (storeSizeIter.hasNext() && hfileSizeIter.hasNext()) {
final long storeSize = storeSizeIter.next();
final long hfileSize = hfileSizeIter.next();
final Store s = mock(Store.class);
stores.add(s);
when(s.getStorefilesSize()).thenReturn(storeSize);
when(s.getHFilesSize()).thenReturn(hfileSize);
}
return r;
}
@Override
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s,
CompactionRequest request)
throws IOException {
// Get the latest tx snapshot state for the compaction
TransactionVisibilityState snapshot = cache.getLatestState();
// Record tx state before the compaction
if (compactionState != null) {
compactionState.record(request, snapshot);
}
// silently close the passed scanner as we're returning a brand-new one
try (InternalScanner temp = s) { }
// Also make sure to use the same snapshot for the compaction
return createStoreScanner(c.getEnvironment(), "compaction", snapshot, store, scanners, scanType, earliestPutTs);
}
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionInfo().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}
@Override
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s,
CompactionRequest request)
throws IOException {
// Get the latest tx snapshot state for the compaction
TransactionVisibilityState snapshot = cache.getLatestState();
// Record tx state before the compaction
if (compactionState != null) {
compactionState.record(request, snapshot);
}
// silently close the passed scanner as we're returning a brand-new one
try (InternalScanner temp = s) { }
// Also make sure to use the same snapshot for the compaction
return createStoreScanner(c.getEnvironment(), "compaction", snapshot, store, scanners, scanType, earliestPutTs);
}
public void finish(Map<byte[], List<Cell>> familyMaps) {
if (!isEnable()) {
return;
}
for (Map.Entry<byte[], List<Cell>> e : familyMaps.entrySet()) {
Store store = this.region.getStore(e.getKey());
if (store == null || e.getValue() == null) {
continue;
}
if (e.getValue().size() > this.parallelPutToStoreThreadLimitCheckMinColumnCount) {
AtomicInteger counter = preparePutToStoreMap.get(e.getKey());
// preparePutToStoreMap will be cleared when changing the configuration, so it may turn
// into a negative value. It will be not accuracy in a short time, it's a trade-off for
// performance.
if (counter != null && counter.decrementAndGet() < 0) {
counter.incrementAndGet();
}
}
}
}
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}
@Override
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s,
CompactionRequest request)
throws IOException {
// Get the latest tx snapshot state for the compaction
TransactionVisibilityState snapshot = cache.getLatestState();
// Record tx state before the compaction
if (compactionState != null) {
compactionState.record(request, snapshot);
}
// silently close the passed scanner as we're returning a brand-new one
try (InternalScanner temp = s) { }
// Also make sure to use the same snapshot for the compaction
return createStoreScanner(c.getEnvironment(), "compaction", snapshot, store, scanners, scanType, earliestPutTs);
}
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}
@Override
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s,
CompactionRequest request)
throws IOException {
// Get the latest tx snapshot state for the compaction
TransactionVisibilityState snapshot = cache.getLatestState();
// Record tx state before the compaction
if (compactionState != null) {
compactionState.record(request, snapshot);
}
// silently close the passed scanner as we're returning a brand-new one
try (InternalScanner temp = s) { }
// Also make sure to use the same snapshot for the compaction
return createStoreScanner(c.getEnvironment(), "compaction", snapshot, store, scanners, scanType, earliestPutTs);
}
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}
public static boolean OmidCompactionEnabled(ObserverContext<RegionCoprocessorEnvironment> env,
Store store,
String cfFlagValue) {
HTableDescriptor desc = env.getEnvironment().getRegion().getTableDesc();
HColumnDescriptor famDesc
= desc.getFamily(Bytes.toBytes(store.getColumnFamilyName()));
return Boolean.valueOf(famDesc.getValue(cfFlagValue));
}
@Override
public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
KeyValueScanner memstoreScanner, InternalScanner scanner)
throws IOException {
// silently close the passed scanner as we're returning a brand-new one
try (InternalScanner temp = scanner) { }
return createStoreScanner(c.getEnvironment(), "flush", cache.getLatestState(), store,
Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
HConstants.OLDEST_TIMESTAMP);
}
private long numStoreFilesForRegion(ObserverContext<RegionCoprocessorEnvironment> c) {
long numStoreFiles = 0;
for (Store store : c.getEnvironment().getRegion().getStores()) {
numStoreFiles += store.getStorefiles().size();
}
return numStoreFiles;
}
private long numStoreFilesForRegion(ObserverContext<RegionCoprocessorEnvironment> c) {
long numStoreFiles = 0;
for (Store store : c.getEnvironment().getRegion().getStores().values()) {
numStoreFiles += store.getStorefiles().size();
}
return numStoreFiles;
}
@Override
public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
KeyValueScanner memstoreScanner, InternalScanner scanner)
throws IOException {
// silently close the passed scanner as we're returning a brand-new one
try (InternalScanner temp = scanner) { }
return createStoreScanner(c.getEnvironment(), "flush", cache.getLatestState(), store,
Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
HConstants.OLDEST_TIMESTAMP);
}
@Override
public void postFlush(ObserverContext<RegionCoprocessorEnvironment> c,
Store store, StoreFile resultFile, FlushLifeCycleTracker tracker) throws IOException {
ctPostFlush.incrementAndGet();
if (throwOnPostFlush.get()){
throw new IOException("throwOnPostFlush is true in postFlush");
}
}
@Override
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, StoreFile resultFile,
CompactionRequest request) throws IOException {
// Persist the compaction state after a successful compaction
if (compactionState != null) {
compactionState.persist();
}
}
private long numStoreFilesForRegion(ObserverContext<RegionCoprocessorEnvironment> c) {
long numStoreFiles = 0;
for (Store store : c.getEnvironment().getRegion().getStores()) {
numStoreFiles += store.getStorefiles().size();
}
return numStoreFiles;
}
@Override
public void postCompact(
org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c,
Store store, StoreFile resultFile,
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker tracker,
CompactionRequest request) throws IOException {
// Persist the compaction state after a successful compaction
if (compactionState != null) {
compactionState.persist();
}
}
private long numStoreFilesForRegion(ObserverContext<RegionCoprocessorEnvironment> c) {
long numStoreFiles = 0;
for (Store store : c.getEnvironment().getRegion().getStores()) {
numStoreFiles += store.getStorefiles().size();
}
return numStoreFiles;
}
@Override
public void postCompactSelection(
ObserverContext<RegionCoprocessorEnvironment> c, Store store,
List<? extends StoreFile> selected, CompactionLifeCycleTracker tracker,
CompactionRequest request) {
if (selected != null) {
filesCompactedCounter.increment(selected.size());
}
}
/**
* Creates a region with a number of Stores equal to the length of {@code storeSizes}. Each
* {@link Store} will have a reported size corresponding to the element in {@code storeSizes}.
*
* @param storeSizes A list of sizes for each Store.
* @return A mocked Region.
*/
private Region mockRegionWithSize(Collection<Long> storeSizes) {
final Region r = mock(Region.class);
final RegionInfo info = mock(RegionInfo.class);
when(r.getRegionInfo()).thenReturn(info);
List<Store> stores = new ArrayList<>();
when(r.getStores()).thenReturn((List) stores);
for (Long storeSize : storeSizes) {
final Store s = mock(Store.class);
stores.add(s);
when(s.getHFilesSize()).thenReturn(storeSize);
}
return r;
}
@Override
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, StoreFile resultFile,
CompactionRequest request) throws IOException {
// Persist the compaction state after a successful compaction
if (compactionState != null) {
compactionState.persist();
}
}
private long numStoreFilesForRegion(ObserverContext<RegionCoprocessorEnvironment> c) {
long numStoreFiles = 0;
for (Store store : c.getEnvironment().getRegion().getStores().values()) {
numStoreFiles += store.getStorefiles().size();
}
return numStoreFiles;
}
@Override
public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
KeyValueScanner memstoreScanner, InternalScanner scanner)
throws IOException {
// silently close the passed scanner as we're returning a brand-new one
try (InternalScanner temp = scanner) { }
return createStoreScanner(c.getEnvironment(), "flush", cache.getLatestState(), store,
Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
HConstants.OLDEST_TIMESTAMP);
}
@Override
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, StoreFile resultFile,
CompactionRequest request) throws IOException {
// Persist the compaction state after a successful compaction
if (compactionState != null) {
compactionState.persist();
}
}
private long numStoreFilesForRegion(ObserverContext<RegionCoprocessorEnvironment> c) {
long numStoreFiles = 0;
for (Store store : c.getEnvironment().getRegion().getStores()) {
numStoreFiles += store.getStorefiles().size();
}
return numStoreFiles;
}
@Override
public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
KeyValueScanner memstoreScanner, InternalScanner scanner)
throws IOException {
// silently close the passed scanner as we're returning a brand-new one
try (InternalScanner temp = scanner) { }
return createStoreScanner(c.getEnvironment(), "flush", cache.getLatestState(), store,
Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
HConstants.OLDEST_TIMESTAMP);
}
private long numStoreFilesForRegion(ObserverContext<RegionCoprocessorEnvironment> c) {
long numStoreFiles = 0;
for (Store store : c.getEnvironment().getRegion().getStores().values()) {
numStoreFiles += store.getStorefiles().size();
}
return numStoreFiles;
}