下面列出了org.apache.hadoop.hbase.HConstants#EMPTY_END_ROW 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private Scan getVertexIndexScanWithLimit(String label, boolean isUnique, String key, Object from, int limit, boolean reversed) {
byte[] prefix = serializeForRead(label, isUnique, key, null);
byte[] startRow = from != null
? serializeForRead(label, isUnique, key, from)
: prefix;
byte[] stopRow = HConstants.EMPTY_END_ROW;
if (graph.configuration().getInstanceType() == HBaseGraphConfiguration.InstanceType.BIGTABLE) {
if (reversed) {
throw new UnsupportedOperationException("Reverse scans not supported by Bigtable");
} else {
// PrefixFilter in Bigtable does not automatically stop
// See https://github.com/GoogleCloudPlatform/cloud-bigtable-client/issues/1087
stopRow = HBaseGraphUtils.incrementBytes(prefix);
}
}
if (reversed) startRow = HBaseGraphUtils.incrementBytes(startRow);
Scan scan = new Scan(startRow, stopRow);
FilterList filterList = new FilterList();
filterList.addFilter(new PrefixFilter(prefix));
filterList.addFilter(new PageFilter(limit));
scan.setFilter(filterList);
scan.setReversed(reversed);
return scan;
}
/**
* <p>When scanning for a prefix the scan should stop immediately after the the last row that
* has the specified prefix. This method calculates the closest next rowKey immediately following
* the given rowKeyPrefix.</p>
* <p><b>IMPORTANT: This converts a rowKey<u>Prefix</u> into a rowKey</b>.</p>
* <p>If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can
* simply increment the last byte of the array.
* But if your application uses real binary rowids you may run into the scenario that your
* prefix is something like:</p>
* <b>{ 0x12, 0x23, 0xFF, 0xFF }</b><br/>
* Then this stopRow needs to be fed into the actual scan<br/>
* <b>{ 0x12, 0x24 }</b> (Notice that it is shorter now)<br/>
* This method calculates the correct stop row value for this usecase.
*
* @param rowKeyPrefix the rowKey<u>Prefix</u>.
* @return the closest next rowKey immediately following the given rowKeyPrefix.
*/
public static byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
// Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
// Search for the place where the trailing 0xFFs start
int offset = rowKeyPrefix.length;
while (offset > 0) {
if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
break;
}
offset--;
}
if (offset == 0) {
// We got an 0xFFFF... (only FFs) stopRow value which is
// the last possible prefix before the end of the table.
// So set it to stop at the 'end of the table'
return HConstants.EMPTY_END_ROW;
}
// Copy the right length of the original
byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
// And increment the last one
newStopRow[newStopRow.length - 1]++;
return newStopRow;
}
public static byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
// Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
// Search for the place where the trailing 0xFFs start
int offset = rowKeyPrefix.length;
while (offset > 0) {
if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
break;
}
offset--;
}
if (offset == 0) {
// We got an 0xFFFF... (only FFs) stopRow value which is
// the last possible prefix before the end of the table.
// So set it to stop at the 'end of the table'
return HConstants.EMPTY_END_ROW;
}
// Copy the right length of the original
byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
// And increment the last one
newStopRow[newStopRow.length - 1]++;
return newStopRow;
}
@Override
public void initialize(RequestContext requestContext) {
super.initialize(requestContext);
tupleDescription = new HBaseTupleDescription(requestContext);
splits = new ArrayList<>();
currentRegionIndex = 0;
scanStartKey = HConstants.EMPTY_START_ROW;
scanEndKey = HConstants.EMPTY_END_ROW;
}
/**
* Initializes HBaseAccessor based on GPDB table description and
* initializes the scan start and end keys of the HBase table to default values.
*
* @param requestContext data provided in the request
*/
@Override
public void initialize(RequestContext requestContext) {
super.initialize(requestContext);
tupleDescription = new HBaseTupleDescription(context);
split = null;
scanStartKey = HConstants.EMPTY_START_ROW;
scanEndKey = HConstants.EMPTY_END_ROW;
}
private Scan getEdgesScanWithLimit(Vertex vertex, Direction direction, boolean isUnique, String key, String label,
Object fromValue, int limit, boolean reversed) {
LOGGER.trace("Executing Scan, type: {}, id: {}", "key-limit", vertex.id());
byte[] prefix = serializeForRead(vertex, direction, isUnique, key, label, null);
byte[] startRow = fromValue != null
? serializeForRead(vertex, direction, isUnique, key, label, fromValue)
: prefix;
byte[] stopRow = HConstants.EMPTY_END_ROW;
if (graph.configuration().getInstanceType() == HBaseGraphConfiguration.InstanceType.BIGTABLE) {
if (reversed) {
throw new UnsupportedOperationException("Reverse scans not supported by Bigtable");
} else {
// PrefixFilter in Bigtable does not automatically stop
// See https://github.com/GoogleCloudPlatform/cloud-bigtable-client/issues/1087
stopRow = HBaseGraphUtils.incrementBytes(prefix);
}
}
if (reversed) startRow = HBaseGraphUtils.incrementBytes(startRow);
Scan scan = new Scan(startRow, stopRow);
FilterList filterList = new FilterList();
filterList.addFilter(new PrefixFilter(prefix));
filterList.addFilter(new PageFilter(limit));
scan.setFilter(filterList);
scan.setReversed(reversed);
return scan;
}
@Override
protected Pair<byte[][], byte[][]> getStartEndKeys() throws IOException {
if (isMock()) {
return new Pair<>(new byte[][]{HConstants.EMPTY_START_ROW}, new byte[][]{HConstants.EMPTY_END_ROW});
} else {
return super.getStartEndKeys();
}
}
private void verifyGetOrScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end,
Collection<HStoreFile> results) throws Exception {
start = start != null ? start : HConstants.EMPTY_START_ROW;
end = end != null ? end : HConstants.EMPTY_END_ROW;
Collection<HStoreFile> sfs = manager.getFilesForScan(start, true, end, false);
assertEquals(results.size(), sfs.size());
for (HStoreFile result : results) {
assertTrue(sfs.contains(result));
}
}
private List<byte[]> getStartKeysInRange(byte[] start, byte[] end) throws IOException {
if (start == null) {
start = HConstants.EMPTY_START_ROW;
}
if (end == null) {
end = HConstants.EMPTY_END_ROW;
}
return getKeysAndRegionsInRange(start, end, true).getFirst();
}
public HBaseFilterBuilder(HBaseTupleDescription tupleDescription) {
this.filterQueue = new LinkedList<>();
this.tupleDescription = tupleDescription;
this.startKey = HConstants.EMPTY_START_ROW;
this.endKey = HConstants.EMPTY_END_ROW;
}
private byte[] getEndKey(int index) {
return index == SPLIT_KEYS.length ? HConstants.EMPTY_END_ROW : SPLIT_KEYS[index];
}
private static byte[] checkEndKey(byte[] endKey) {
return endKey == null? HConstants.EMPTY_END_ROW: endKey;
}
/**
* Private constructor used constructing MutableRegionInfo for the
* first meta regions
*/
private MutableRegionInfo(long regionId, TableName tableName, int replicaId) {
this(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId,
replicaId, false);
}
public static Pair<byte[], byte[]>[] getDistributedIntervals(
byte[] originalStartKey, byte[] originalStopKey,
byte[] regionStartKey, byte[] regionStopKey,
String prefixList) throws IOException {
LOG.debug("".format("OSRT: (%s) OSTP: (%s) RSRT: (%s) RSTP: (%s) PRFX: (%s)",
Bytes.toString(originalStartKey),
Bytes.toString(originalStopKey),
Bytes.toString(regionStartKey),
Bytes.toString(regionStopKey),
prefixList
));
byte[][] startKeys;
byte[][] stopKeys;
if(Arrays.equals(regionStartKey, HConstants.EMPTY_START_ROW)
&& Arrays.equals(regionStopKey, HConstants.EMPTY_END_ROW) ) {
startKeys = getAllKeys(originalStartKey, prefixList);
stopKeys = getAllKeys(originalStopKey, prefixList);
} else if(Arrays.equals(regionStartKey, HConstants.EMPTY_START_ROW)) {
startKeys = getAllKeysWithStop(originalStartKey, prefixList, regionStopKey[0]);
stopKeys = getAllKeysWithStop(originalStopKey, prefixList, regionStopKey[0]);
} else if(Arrays.equals(regionStopKey, HConstants.EMPTY_END_ROW)) {
startKeys = getAllKeysWithStart(originalStartKey, prefixList, regionStartKey[0]);
stopKeys = getAllKeysWithStart(originalStopKey, prefixList, regionStartKey[0]);
} else {
startKeys = getAllKeysInRange(originalStartKey, prefixList, regionStartKey[0], regionStopKey[0]);
stopKeys = getAllKeysInRange(originalStopKey, prefixList, regionStartKey[0], regionStopKey[0]);
}
if( startKeys.length != stopKeys.length) {
throw new IOException("LENGTH of START Keys and STOP Keys DO NOT match");
}
if( Arrays.equals(originalStartKey, HConstants.EMPTY_START_ROW)
&& Arrays.equals(originalStopKey, HConstants.EMPTY_END_ROW) ) {
Arrays.sort(stopKeys, Bytes.BYTES_RAWCOMPARATOR);
// stop keys are the start key of the next interval
for (int i = startKeys.length - 1; i >= 1; i--) {
startKeys[i] = startKeys[i - 1];
}
startKeys[0] = HConstants.EMPTY_START_ROW;
stopKeys[stopKeys.length - 1] = HConstants.EMPTY_END_ROW;
} else if (Arrays.equals(originalStartKey, HConstants.EMPTY_START_ROW)) {
Arrays.sort(stopKeys, Bytes.BYTES_RAWCOMPARATOR);
// stop keys are the start key of the next interval
for (int i = startKeys.length - 1; i >= 1; i--) {
startKeys[i] = startKeys[i - 1];
}
startKeys[0] = HConstants.EMPTY_START_ROW;
} else if (Arrays.equals(originalStopKey, HConstants.EMPTY_END_ROW)) {
Arrays.sort(startKeys, Bytes.BYTES_RAWCOMPARATOR);
// stop keys are the start key of the next interval
for (int i = 0; i < stopKeys.length - 1; i++) {
stopKeys[i] = stopKeys[i + 1];
}
stopKeys[stopKeys.length - 1] = HConstants.EMPTY_END_ROW;
}
Pair<byte[], byte[]>[] intervals = new Pair[startKeys.length];
for (int i = 0; i < startKeys.length; i++) {
intervals[i] = new Pair<byte[], byte[]>(startKeys[i], stopKeys[i]);
}
return intervals;
}
public ScanRange(final byte[] startRow, final byte[] endRow) {
this.startRow = startRow == HConstants.EMPTY_START_ROW ? null
: startRow;
this.endRow = endRow == HConstants.EMPTY_END_ROW ? null : endRow;
}
@Override public byte[] getEndKey(){ return HConstants.EMPTY_END_ROW; }