下面列出了org.apache.hadoop.hbase.client.HTablePool#org.apache.hadoop.hbase.client.HTableInterface 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Override
public void initialize(Configuration conf) throws IOException {
this.conf = conf;
this.hBaseAdmin = new HBaseAdmin(conf);
this.connection = HConnectionManager.createConnection(conf);
final TableName stateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
LOG.info("Initializing plugin with state table {}:{}", stateTable.getNamespaceAsString(),
stateTable.getNameAsString());
createPruneTable(stateTable);
this.dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
@Override
public HTableInterface get() throws IOException {
return connection.getTable(stateTable);
}
});
}
/**
* Delete prune upper bounds for the regions that are not in the given exclude set, and the
* prune upper bound is less than the given value.
* After the invalid list is pruned up to deletionPruneUpperBound, we do not need entries for regions that have
* prune upper bound less than deletionPruneUpperBound. We however limit the deletion to only regions that are
* no longer in existence (due to deletion, etc.), to avoid update/delete race conditions.
*
* @param deletionPruneUpperBound prune upper bound below which regions will be deleted
* @param excludeRegions set of regions that should not be deleted
* @throws IOException when not able to delete data in HBase
*/
public void deletePruneUpperBounds(long deletionPruneUpperBound, SortedSet<byte[]> excludeRegions)
throws IOException {
try (HTableInterface stateTable = stateTableSupplier.get()) {
byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY);
Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP);
scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL);
try (ResultScanner scanner = stateTable.getScanner(scan)) {
Result next;
while ((next = scanner.next()) != null) {
byte[] region = getRegionFromKey(next.getRow());
if (!excludeRegions.contains(region)) {
byte[] timeBytes = next.getValue(FAMILY, PRUNE_UPPER_BOUND_COL);
if (timeBytes != null) {
long pruneUpperBoundRegion = Bytes.toLong(timeBytes);
if (pruneUpperBoundRegion < deletionPruneUpperBound) {
stateTable.delete(new Delete(next.getRow()));
}
}
}
}
}
}
}
private Path writeLargeCellToHdfs(String resPath, byte[] largeColumn, HTableInterface table) throws IOException {
Path redirectPath = bigCellHDFSPath(resPath);
Configuration hconf = HadoopUtil.getCurrentConfiguration();
FileSystem fileSystem = FileSystem.get(hconf);
if (fileSystem.exists(redirectPath)) {
fileSystem.delete(redirectPath, true);
}
FSDataOutputStream out = fileSystem.create(redirectPath);
try {
out.write(largeColumn);
} finally {
IOUtils.closeQuietly(out);
}
return redirectPath;
}
/**
* Return the set of regions saved for the time at or before the given time. This method finds the greatest time
* that is less than or equal to the given time, and then returns all regions with that exact time, but none that are
* older than that.
*
* @param time timestamp in milliseconds
* @return set of regions and time at which they were recorded, or null if no regions found
* @throws IOException when not able to read the data from HBase
*/
@Nullable
public TimeRegions getRegionsOnOrBeforeTime(long time) throws IOException {
try (HTableInterface stateTable = stateTableSupplier.get()) {
TimeRegions timeRegions;
while ((timeRegions = getNextSetOfTimeRegions(stateTable, time)) != null) {
int count = getRegionCountForTime(stateTable, timeRegions.getTime());
if (count != -1 && count == timeRegions.getRegions().size()) {
return timeRegions;
} else {
LOG.warn(String.format("Got incorrect count for regions saved at time %s, expected = %s but actual = %s",
timeRegions.getTime(), count, timeRegions.getRegions().size()));
time = timeRegions.getTime() - 1;
}
}
return null;
}
}
protected KVSerializable getRow(String tableName, Class clazz, byte[] key) {
HTableInterface table = getHTableInterface(tableName);
Get get = new Get(key);
HTableInterface htable;
try {
htable = getHTableInterface(tableName);
KVSerializable kvInst = (KVSerializable) clazz.getConstructors()[0].newInstance();
Result result = htable.get(get);
if (result != null) {
kvInst.fromKV(key, result.getValue(CF, V_DATA));
return kvInst;
}
} catch (Exception ex) {
logger.error("Scan metric meta error, class:{}", clazz.getSimpleName(), ex);
} finally {
closeTable(table);
}
return null;
}
/**
* Return regions that were recorded as empty after the given time.
*
* @param time time in milliseconds
* @param includeRegions If not null, the returned set will be an intersection of the includeRegions set
* and the empty regions after the given time
*/
public SortedSet<byte[]> getEmptyRegionsAfterTime(long time, @Nullable SortedSet<byte[]> includeRegions)
throws IOException {
SortedSet<byte[]> emptyRegions = new TreeSet<>(Bytes.BYTES_COMPARATOR);
try (HTableInterface stateTable = stateTableSupplier.get()) {
Scan scan = new Scan(makeEmptyRegionTimeKey(Bytes.toBytes(time + 1), EMPTY_BYTE_ARRAY),
EMPTY_REGION_TIME_KEY_PREFIX_STOP);
scan.addColumn(FAMILY, EMPTY_REGION_TIME_COL);
try (ResultScanner scanner = stateTable.getScanner(scan)) {
Result next;
while ((next = scanner.next()) != null) {
byte[] emptyRegion = getEmptyRegionFromKey(next.getRow());
if (includeRegions == null || includeRegions.contains(emptyRegion)) {
emptyRegions.add(emptyRegion);
}
}
}
}
return Collections.unmodifiableSortedSet(emptyRegions);
}
@Override
protected void putResourceImpl(String resPath, InputStream content, long ts) throws IOException {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
IOUtils.copy(content, bout);
bout.close();
HTableInterface table = getConnection().getTable(getAllInOneTableName());
try {
byte[] row = Bytes.toBytes(resPath);
Put put = buildPut(resPath, ts, row, bout.toByteArray(), table);
table.put(put);
table.flushCommits();
} finally {
IOUtils.closeQuietly(table);
}
}
@Test
public void testCorrectlyCleansUpResources() throws Exception{
ExecutorService exec = Executors.newFixedThreadPool(1);
FakeTableFactory factory = new FakeTableFactory(
Collections.<ImmutableBytesPtr, HTableInterface> emptyMap());
ParallelWriterIndexCommitter writer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
Abortable mockAbort = Mockito.mock(Abortable.class);
Stoppable mockStop = Mockito.mock(Stoppable.class);
// create a simple writer
writer.setup(factory, exec, mockAbort, mockStop, 1);
// stop the writer
writer.stop(this.test.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
Mockito.verifyZeroInteractions(mockAbort, mockStop);
}
@Override
public long createSequence(String tenantId, String schemaName, String sequenceName, long startWith, long incrementBy, int cacheSize, long timestamp)
throws SQLException {
SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName);
Sequence newSequences = new Sequence(sequenceKey);
Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
if (sequence == null) {
sequence = newSequences;
}
try {
sequence.getLock().lock();
// Now that we have the lock we need, create the sequence
Append append = sequence.createSequence(startWith, incrementBy, cacheSize, timestamp);
HTableInterface htable = this.getTable(PhoenixDatabaseMetaData.SEQUENCE_TABLE_NAME_BYTES);
try {
Result result = htable.append(append);
return sequence.createSequence(result);
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
}
} finally {
sequence.getLock().unlock();
}
}
public static ResultScanner scanWithCoprocessorIfBeneficial(CubeSegment segment, Cuboid cuboid, TupleFilter tupleFiler, //
Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context, HTableInterface table, Scan scan) throws IOException {
if (context.isCoprocessorEnabled() == false) {
return table.getScanner(scan);
}
CoprocessorRowType type = CoprocessorRowType.fromCuboid(segment, cuboid);
CoprocessorFilter filter = CoprocessorFilter.fromFilter(segment, tupleFiler);
CoprocessorProjector projector = CoprocessorProjector.makeForObserver(segment, cuboid, groupBy);
ObserverAggregators aggrs = ObserverAggregators.fromValueDecoders(rowValueDecoders);
if (DEBUG_LOCAL_COPROCESSOR) {
RegionScanner innerScanner = new RegionScannerAdapter(table.getScanner(scan));
AggregationScanner aggrScanner = new AggregationScanner(type, filter, projector, aggrs, innerScanner);
return new ResultScannerAdapter(aggrScanner);
} else {
scan.setAttribute(AggregateRegionObserver.COPROCESSOR_ENABLE, new byte[] { 0x01 });
scan.setAttribute(AggregateRegionObserver.TYPE, CoprocessorRowType.serialize(type));
scan.setAttribute(AggregateRegionObserver.PROJECTOR, CoprocessorProjector.serialize(projector));
scan.setAttribute(AggregateRegionObserver.AGGREGATORS, ObserverAggregators.serialize(aggrs));
scan.setAttribute(AggregateRegionObserver.FILTER, CoprocessorFilter.serialize(filter));
return table.getScanner(scan);
}
}
private void verifyRows(HTableInterface table, Get get, List<byte[]> expectedValues) throws Exception {
Result result = table.get(get);
if (expectedValues == null) {
assertTrue(result.isEmpty());
} else {
assertFalse(result.isEmpty());
byte[] family = TestBytes.family;
byte[] col = TestBytes.qualifier;
if (get.hasFamilies()) {
family = get.getFamilyMap().keySet().iterator().next();
col = get.getFamilyMap().get(family).first();
}
Iterator<Cell> it = result.getColumnCells(family, col).iterator();
for (byte[] expectedValue : expectedValues) {
Assert.assertTrue(it.hasNext());
assertArrayEquals(expectedValue, CellUtil.cloneValue(it.next()));
}
}
}
private void verifyRows(HTableInterface table, Get get, List<byte[]> expectedValues) throws Exception {
Result result = table.get(get);
if (expectedValues == null) {
assertTrue(result.isEmpty());
} else {
assertFalse(result.isEmpty());
byte[] family = TestBytes.family;
byte[] col = TestBytes.qualifier;
if (get.hasFamilies()) {
family = get.getFamilyMap().keySet().iterator().next();
col = get.getFamilyMap().get(family).first();
}
Iterator<Cell> it = result.getColumnCells(family, col).iterator();
for (byte[] expectedValue : expectedValues) {
Assert.assertTrue(it.hasNext());
assertArrayEquals(expectedValue, CellUtil.cloneValue(it.next()));
}
}
}
@Test
public void testCorrectlyCleansUpResources() throws Exception{
ExecutorService exec = Executors.newFixedThreadPool(1);
FakeTableFactory factory = new FakeTableFactory(
Collections.<ImmutableBytesPtr, HTableInterface> emptyMap());
ParallelWriterIndexCommitter writer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
Abortable mockAbort = Mockito.mock(Abortable.class);
Stoppable mockStop = Mockito.mock(Stoppable.class);
// create a simple writer
writer.setup(factory, exec, mockAbort, mockStop, 1);
// stop the writer
writer.stop(this.test.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
Mockito.verifyZeroInteractions(mockAbort, mockStop);
}
@Test
public void testCorrectlyCleansUpResources() throws Exception{
ExecutorService exec = Executors.newFixedThreadPool(1);
FakeTableFactory factory = new FakeTableFactory(
Collections.<ImmutableBytesPtr, HTableInterface> emptyMap());
ParallelWriterIndexCommitter writer = new ParallelWriterIndexCommitter();
Abortable mockAbort = Mockito.mock(Abortable.class);
Stoppable mockStop = Mockito.mock(Stoppable.class);
// create a simple writer
writer.setup(factory, exec, mockAbort, mockStop, 1);
// stop the writer
writer.stop(this.test.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
Mockito.verifyZeroInteractions(mockAbort, mockStop);
}
/**
* Gets a list of {@link RegionPruneInfo} for given regions. Returns all regions if the given regions set is null.
*
* @param regions a set of regions
* @return list of {@link RegionPruneInfo}s.
* @throws IOException when not able to read the data from HBase
*/
public List<RegionPruneInfo> getPruneInfoForRegions(@Nullable SortedSet<byte[]> regions) throws IOException {
List<RegionPruneInfo> regionPruneInfos = new ArrayList<>();
try (HTableInterface stateTable = stateTableSupplier.get()) {
byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY);
Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP);
scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL);
try (ResultScanner scanner = stateTable.getScanner(scan)) {
Result next;
while ((next = scanner.next()) != null) {
byte[] region = getRegionFromKey(next.getRow());
if (regions == null || regions.contains(region)) {
Cell cell = next.getColumnLatestCell(FAMILY, PRUNE_UPPER_BOUND_COL);
if (cell != null) {
byte[] pruneUpperBoundBytes = CellUtil.cloneValue(cell);
long timestamp = cell.getTimestamp();
regionPruneInfos.add(new RegionPruneInfo(region, Bytes.toStringBinary(region),
Bytes.toLong(pruneUpperBoundBytes), timestamp));
}
}
}
}
}
return Collections.unmodifiableList(regionPruneInfos);
}
/**
* Delete prune upper bounds for the regions that are not in the given exclude set, and the
* prune upper bound is less than the given value.
* After the invalid list is pruned up to deletionPruneUpperBound, we do not need entries for regions that have
* prune upper bound less than deletionPruneUpperBound. We however limit the deletion to only regions that are
* no longer in existence (due to deletion, etc.), to avoid update/delete race conditions.
*
* @param deletionPruneUpperBound prune upper bound below which regions will be deleted
* @param excludeRegions set of regions that should not be deleted
* @throws IOException when not able to delete data in HBase
*/
public void deletePruneUpperBounds(long deletionPruneUpperBound, SortedSet<byte[]> excludeRegions)
throws IOException {
try (HTableInterface stateTable = stateTableSupplier.get()) {
byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY);
Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP);
scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL);
try (ResultScanner scanner = stateTable.getScanner(scan)) {
Result next;
while ((next = scanner.next()) != null) {
byte[] region = getRegionFromKey(next.getRow());
if (!excludeRegions.contains(region)) {
byte[] timeBytes = next.getValue(FAMILY, PRUNE_UPPER_BOUND_COL);
if (timeBytes != null) {
long pruneUpperBoundRegion = Bytes.toLong(timeBytes);
if (pruneUpperBoundRegion < deletionPruneUpperBound) {
stateTable.delete(new Delete(next.getRow()));
}
}
}
}
}
}
}
@Override
public void updateUser(UserDetails user) {
HTableInterface htable = null;
try {
byte[] userAuthorities = serialize(user.getAuthorities());
htable = HBaseConnection.get(hbaseUrl).getTable(userTableName);
Put put = new Put(Bytes.toBytes(user.getUsername()));
put.add(Bytes.toBytes(USER_AUTHORITY_FAMILY), Bytes.toBytes(USER_AUTHORITY_COLUMN), userAuthorities);
htable.put(put);
htable.flushCommits();
} catch (IOException e) {
throw new RuntimeException(e.getMessage(), e);
} finally {
IOUtils.closeQuietly(htable);
}
}
private void verifyRows(HTableInterface table, Get get, List<byte[]> expectedValues) throws Exception {
Result result = table.get(get);
if (expectedValues == null) {
assertTrue(result.isEmpty());
} else {
assertFalse(result.isEmpty());
byte[] family = TestBytes.family;
byte[] col = TestBytes.qualifier;
if (get.hasFamilies()) {
family = get.getFamilyMap().keySet().iterator().next();
col = get.getFamilyMap().get(family).first();
}
Iterator<Cell> it = result.getColumnCells(family, col).iterator();
for (byte[] expectedValue : expectedValues) {
Assert.assertTrue(it.hasNext());
assertArrayEquals(expectedValue, CellUtil.cloneValue(it.next()));
}
}
}
@Override
public void insertEventData(final byte[] body) {
LOG.debug("Inserting searchclicks table row content event!");
hbaseTemplate.execute("searchclicks", new TableCallback<Object>() {
@Override
public Object doInTable(HTableInterface table) throws Throwable {
String rowId = UUID.randomUUID().toString();
Put p = new Put(Bytes.toBytes(rowId));
LOG.debug("Inserting searchclicks table row id: {}", rowId);
p.add(HbaseJsonEventSerializer.COLUMFAMILY_CLIENT_BYTES, Bytes.toBytes("eventid"), body);
table.put(p);
table.close();
return null;
}
});
LOG.debug("Inserting searchclicks table row content event done!");
}
@Test
public void testMappingHbaseTableToPhoenixTable() throws Exception {
final TableName tableName = TableName.valueOf("MTEST");
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class);
HBaseAdmin admin = conn.getQueryServices().getAdmin();
try {
// Create table then get the single region for our new table.
HTableDescriptor descriptor = new HTableDescriptor(tableName);
HColumnDescriptor columnDescriptor = new HColumnDescriptor(Bytes.toBytes("cf"));
descriptor.addFamily(columnDescriptor);
admin.createTable(descriptor);
HTableInterface t = conn.getQueryServices().getTable(Bytes.toBytes("MTEST"));
insertData(tableName.getName(), admin, t);
t.close();
try {
testCreateTableMismatchedType();
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(),e.getErrorCode());
}
} finally {
admin.close();
}
}
private static void assertTenantIds(Expression e, HTableInterface htable, Filter filter, String[] tenantIds) throws IOException {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
Scan scan = new Scan();
scan.setFilter(filter);
ResultScanner scanner = htable.getScanner(scan);
Result result = null;
ResultTuple tuple = new ResultTuple();
List<String> actualTenantIds = Lists.newArrayListWithExpectedSize(tenantIds.length);
List<String> expectedTenantIds = Arrays.asList(tenantIds);
while ((result = scanner.next()) != null) {
tuple.setResult(result);
e.evaluate(tuple, ptr);
String tenantId = (String)PVarchar.INSTANCE.toObject(ptr);
actualTenantIds.add(tenantId == null ? "" : tenantId);
}
// Need to sort because of salting
Collections.sort(actualTenantIds);
assertEquals(expectedTenantIds, actualTenantIds);
}
public HTableInterface getTable(String tableName) throws IOException {
long threadId = Thread.currentThread().getId();
tables.putIfAbsent(threadId, new HashMap<>());
HashMap<String, HTableInterface> tableMap = tables.get(threadId);
HTableInterface table = tableMap.get(tableName);
if (table == null) {
table = connection.getTable(tableName);
table.setAutoFlushTo(true);
tableMap.put(tableName, table);
}
return table;
}
public void printTableDesc(String tableName) {
try {
HTableInterface table = getTable(tableName);
HTableDescriptor desc = table.getTableDescriptor();
LOGGER.info(">>>> Print Table {} Desc", tableName);
for (HColumnDescriptor colDesc : desc.getColumnFamilies()) {
LOGGER.info(">>>> family column: {}", colDesc.getNameAsString());
}
} catch (Exception ex) {
LOGGER.error(">>>> Print table desc error:", ex);
}
}
@Override
public Status act(UpstreamJob job, byte[] bytes) {
HBaseConnection connection = connectionMap.get(job.getTopic());
if (connection == null) {
LogUtils.logErrorInfo("HBASE_error", "no hbase connection for topic=" + job.getTopic());
return FAIL;
}
if (CollectionUtils.isNotEmpty(job.getHbaseCommands())) {
try {
for (HbaseCommand hbaseCommand : job.getHbaseCommands()) {
HTableInterface table = connection.getTable(hbaseCommand.getTableName());
Mutation mutation = hbaseCommand.getMutation();
if (mutation instanceof Put) {
table.put((Put) mutation);
} else if (mutation instanceof Delete) {
table.delete((Delete) mutation);
} else if (mutation instanceof Append) {
table.append((Append) mutation);
} else if (mutation instanceof Increment) {
table.increment((Increment) mutation);
}
}
MetricUtils.qpsAndFilterMetric(job, MetricUtils.ConsumeResult.SUCCESS);
return FINISH;
} catch (IOException e) {
LogUtils.logErrorInfo("HBASE_error", "job=" + job, e);
return FAIL;
}
} else {
LogUtils.logErrorInfo("HBASE_error", "no hbase command found, group:{}, topic:{}", group, job.getTopic());
return FAIL;
}
}
public CompactionState(final RegionCoprocessorEnvironment env, final TableName stateTable, long pruneFlushInterval) {
this.regionName = env.getRegion().getRegionName();
this.regionNameAsString = env.getRegion().getRegionNameAsString();
DataJanitorState dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
@Override
public HTableInterface get() throws IOException {
return env.getTable(stateTable);
}
});
this.pruneUpperBoundWriterSupplier = new PruneUpperBoundWriterSupplier(stateTable, dataJanitorState,
pruneFlushInterval);
this.pruneUpperBoundWriter = pruneUpperBoundWriterSupplier.get();
}
/**
* Initialize the Invalid List Debug Tool.
* @param conf {@link Configuration}
* @throws IOException when not able to create an HBase connection
*/
@Override
@SuppressWarnings("WeakerAccess")
public void initialize(final Configuration conf) throws IOException {
LOG.debug("InvalidListPruningDebugMain : initialize method called");
connection = HConnectionManager.createConnection(conf);
tableName = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
@Override
public HTableInterface get() throws IOException {
return connection.getTable(tableName);
}
});
}
@VisibleForTesting
int getRegionCountForTime(HTableInterface stateTable, long time) throws IOException {
Get get = new Get(makeTimeRegionCountKey(Bytes.toBytes(getInvertedTime(time))));
get.addColumn(FAMILY, REGION_TIME_COL);
Result result = stateTable.get(get);
byte[] value = result.getValue(FAMILY, REGION_TIME_COL);
return value == null ? -1 : Bytes.toInt(value);
}
/**
* Delete all inactive transaction bounds recorded for a time less than the given time
*
* @param time time in milliseconds
* @throws IOException when not able to delete data in HBase
*/
public void deleteInactiveTransactionBoundsOnOrBeforeTime(long time) throws IOException {
try (HTableInterface stateTable = stateTableSupplier.get()) {
Scan scan = new Scan(makeInactiveTransactionBoundTimeKey(Bytes.toBytes(getInvertedTime(time))),
INACTIVE_TRANSACTION_BOUND_TIME_KEY_PREFIX_STOP);
scan.addColumn(FAMILY, INACTIVE_TRANSACTION_BOUND_TIME_COL);
deleteFromScan(stateTable, scan);
}
}
/**
* Save the given region as empty as of the given time.
*
* @param time time in milliseconds
* @param regionId region id
*/
public void saveEmptyRegionForTime(long time, byte[] regionId) throws IOException {
byte[] timeBytes = Bytes.toBytes(time);
try (HTableInterface stateTable = stateTableSupplier.get()) {
Put put = new Put(makeEmptyRegionTimeKey(timeBytes, regionId));
put.add(FAMILY, EMPTY_REGION_TIME_COL, COL_VAL);
stateTable.put(put);
}
}
public final List<E> scanByRowPrefix(String prefix) {
HTableInterface hTableInterface = getHTableInterface();
try {
Scan scan = new Scan();
scan.setFilter(new PrefixFilter(Bytes.toBytes(prefix)));
ResultScanner resultScanner = hTableInterface.getScanner(scan);
return parse(resultScanner);
} catch (Exception cause) {
throw new RuntimeException(cause);
} finally {
closeHTableInterface(hTableInterface);
}
}