org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest#isMajor ( )源码实例Demo

下面列出了org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest#isMajor ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: phoenix-tephra   文件: CompactionState.java
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
源代码2 项目: phoenix-tephra   文件: CompactionState.java
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
源代码3 项目: phoenix-tephra   文件: CompactionState.java
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
源代码4 项目: phoenix-tephra   文件: CompactionState.java
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
源代码5 项目: phoenix-tephra   文件: CompactionState.java
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
源代码6 项目: phoenix-tephra   文件: CompactionState.java
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
源代码7 项目: phoenix-tephra   文件: CompactionState.java
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
源代码8 项目: phoenix-tephra   文件: CompactionState.java
/**
 * Records the transaction state used for a compaction. This method is called when the compaction starts.
 *
 * @param request {@link CompactionRequest} for the compaction
 * @param snapshot transaction state that will be used for the compaction
 */
public void record(CompactionRequest request, @Nullable TransactionVisibilityState snapshot) {
  if (request.isMajor() && snapshot != null) {
    Transaction tx = TxUtils.createDummyTransaction(snapshot);
    pruneUpperBound = TxUtils.getPruneUpperBound(tx);
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        String.format("Computed prune upper bound %s for compaction request %s using transaction state from time %s",
                      pruneUpperBound, request, snapshot.getTimestamp()));
    }
  } else {
    pruneUpperBound = -1;
  }
}
 
@Override
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
        InternalScanner s, ScanType scanType, CompactionLifeCycleTracker tracker,
        CompactionRequest request) throws IOException {

    if (!IndexUtil.isLocalIndexStore(store)) { return s; }
    if (!store.hasReferences()) {
        InternalScanner repairScanner = null;
        if (request.isMajor() && (!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store))) {
            LOGGER.info("we have found inconsistent data for local index for region:"
                    + c.getEnvironment().getRegion().getRegionInfo());
            if (c.getEnvironment().getConfiguration().getBoolean(LOCAL_INDEX_AUTOMATIC_REPAIR, true)) {
                LOGGER.info("Starting automatic repair of local Index for region:"
                        + c.getEnvironment().getRegion().getRegionInfo());
                repairScanner = getRepairScanner(c.getEnvironment(), store);
            }
        }
        if (repairScanner != null) {
            if (s!=null) {
                s.close();
            }
            return repairScanner;
        } else {
            return s;
        }
    }
    return s;
}
 
源代码10 项目: spliceengine   文件: SIObserver.java
@Override
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException {
    try {
        // We can't return null, there's a check in org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preCompact
        // return a dummy implementation instead
        if (scanner == null || scanner == DummyScanner.INSTANCE)
            return DummyScanner.INSTANCE;

        if(tableEnvMatch){
            SIDriver driver=SIDriver.driver();
            SimpleCompactionContext context = new SimpleCompactionContext();
            SICompactionState state = new SICompactionState(driver.getTxnSupplier(),
                    driver.getConfiguration().getActiveTransactionMaxCacheSize(), context, driver.getRejectingExecutorService());
            SConfiguration conf = driver.getConfiguration();
            PurgeConfig purgeConfig;
            if (conf.getOlapCompactionAutomaticallyPurgeDeletedRows()) {
                if (request.isMajor())
                    purgeConfig = PurgeConfig.purgeDuringMajorCompactionConfig();
                else
                    purgeConfig = PurgeConfig.purgeDuringMinorCompactionConfig();
            } else {
                purgeConfig = PurgeConfig.noPurgeConfig();
            }
            SICompactionScanner siScanner = new SICompactionScanner(
                    state, scanner, purgeConfig,
                    conf.getOlapCompactionResolutionShare(), conf.getLocalCompactionResolutionBufferSize(), context);
            siScanner.start();
            return siScanner;
        }
        return scanner;
    } catch (Throwable t) {
        throw CoprocessorUtils.getIOException(t);
    }
}
 
源代码11 项目: phoenix   文件: UngroupedAggregateRegionObserver.java
@Override
public void preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
        ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker,
        final CompactionRequest request) throws IOException {
    // Compaction and split upcalls run with the effective user context of the requesting user.
    // This will lead to failure of cross cluster RPC if the effective user is not
    // the login user. Switch to the login user context to ensure we have the expected
    // security context.
    final String fullTableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
    // since we will make a call to syscat, do nothing if we are compacting syscat itself
    if (request.isMajor() && !PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME.equals(fullTableName)) {
        User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                // If the index is disabled, keep the deleted cells so the rebuild doesn't corrupt the index
                try (PhoenixConnection conn =
                        QueryUtil.getConnectionOnServer(compactionConfig).unwrap(PhoenixConnection.class)) {
                    PTable table = PhoenixRuntime.getTableNoCache(conn, fullTableName);
                    List<PTable> indexes = PTableType.INDEX.equals(table.getType()) ? Lists.newArrayList(table) : table.getIndexes();
                    // FIXME need to handle views and indexes on views as well
                    for (PTable index : indexes) {
                        if (index.getIndexDisableTimestamp() != 0) {
                            LOGGER.info(
                                "Modifying major compaction scanner to retain deleted cells for a table with disabled index: "
                                        + fullTableName);
                            options.setKeepDeletedCells(KeepDeletedCells.TRUE);
                            options.readAllVersions();
                            options.setTTL(Long.MAX_VALUE);
                        }
                    }
                } catch (Exception e) {
                    if (e instanceof TableNotFoundException) {
                        LOGGER.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName);
                        // non-Phoenix HBase tables won't be found, do nothing
                    } else {
                        LOGGER.error("Unable to modify compaction scanner to retain deleted cells for a table with disabled Index; "
                                + fullTableName,
                                e);
                    }
                }
                return null;
            }
        });
    }
}