下面列出了java.util.NavigableSet# first ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/**
* If the proposer of the previous block is missing, the validator with an Address above the
* previous will become the next validator for the first round of the next block.
*
* <p>And validators will change from there.
*/
private Address handleMissingProposer(
final Address prevBlockProposer,
final Collection<Address> validatorsForRound,
final ConsensusRoundIdentifier roundIdentifier) {
final NavigableSet<Address> validatorSet = new TreeSet<>(validatorsForRound);
final NavigableSet<Address> latterValidators = validatorSet.tailSet(prevBlockProposer, false);
final Address nextProposer;
if (latterValidators.isEmpty()) {
// i.e. prevBlockProposer was at the end of the validator list, so the right validator for
// the start of this round is the first.
nextProposer = validatorSet.first();
} else {
// Else, use the first validator after the dropped entry.
nextProposer = latterValidators.first();
}
return calculateRoundSpecificValidator(
nextProposer, validatorsForRound, roundIdentifier.getRoundNumber());
}
@Test
public void testBasics() throws Exception {
BadQueryHistory history = BadQueryHistoryManager.getInstance(getTestConfig())
.getBadQueriesForProject("default");
System.out.println(JsonUtil.writeValueAsIndentString(history));
NavigableSet<BadQueryEntry> entries = history.getEntries();
assertEquals(3, entries.size());
BadQueryEntry entry1 = entries.first();
assertEquals("Pushdown", entry1.getAdj());
assertEquals("sandbox.hortonworks.com", entry1.getServer());
assertEquals("select * from test_kylin_fact limit 10", entry1.getSql());
entries.pollFirst();
BadQueryEntry entry2 = entries.first();
assertTrue(entry2.getStartTime() > entry1.getStartTime());
}
@Test
public void testBasics() throws Exception {
BadQueryHistory history = BadQueryHistoryManager.getInstance(getTestConfig())
.getBadQueriesForProject("default");
System.out.println(JsonUtil.writeValueAsIndentString(history));
NavigableSet<BadQueryEntry> entries = history.getEntries();
assertEquals(3, entries.size());
BadQueryEntry entry1 = entries.first();
assertEquals("Pushdown", entry1.getAdj());
assertEquals("sandbox.hortonworks.com", entry1.getServer());
assertEquals("select * from test_kylin_fact limit 10", entry1.getSql());
entries.pollFirst();
BadQueryEntry entry2 = entries.first();
assertTrue(entry2.getStartTime() > entry1.getStartTime());
}
public static UserScanQueryMatcher create(Scan scan, ScanInfo scanInfo,
NavigableSet<byte[]> columns, long oldestUnexpiredTS, long now,
RegionCoprocessorHost regionCoprocessorHost) throws IOException {
boolean hasNullColumn =
!(columns != null && columns.size() != 0 && columns.first().length != 0);
Pair<DeleteTracker, ColumnTracker> trackers = getTrackers(regionCoprocessorHost, columns,
scanInfo, oldestUnexpiredTS, scan);
DeleteTracker deleteTracker = trackers.getFirst();
ColumnTracker columnTracker = trackers.getSecond();
if (scan.isRaw()) {
return RawScanQueryMatcher.create(scan, scanInfo, columnTracker, hasNullColumn,
oldestUnexpiredTS, now);
} else {
return NormalUserScanQueryMatcher.create(scan, scanInfo, columnTracker, deleteTracker,
hasNullColumn, oldestUnexpiredTS, now);
}
}
/**
* Verifies {@link IndexSegment#find(StoreKey)} to make sure that it returns/does not return values.
* @param referenceIndex the index entries to be used as reference.
* @param segment the {@link IndexSegment} to test
* @throws StoreException
*/
private void verifyFind(NavigableMap<MockId, NavigableSet<IndexValue>> referenceIndex, IndexSegment segment)
throws StoreException {
for (Map.Entry<MockId, NavigableSet<IndexValue>> entry : referenceIndex.entrySet()) {
NavigableSet<IndexValue> referenceValues = entry.getValue();
NavigableSet<IndexValue> values = segment.find(entry.getKey());
assertNotNull("Values obtained from segment is null", values);
IndexValue valueFromSegment = values.first();
for (IndexValue referenceValue : referenceValues) {
assertEquals("Offset is not equal", referenceValue.getOffset(), valueFromSegment.getOffset());
assertEquals("Value is not equal", referenceValue.getBytes(), valueFromSegment.getBytes());
valueFromSegment = values.higher(valueFromSegment);
}
assertNull("There should be no more values in the segment", valueFromSegment);
}
// try to find a key that does not exist.
MockId id = generateIds(referenceIndex, 1).get(0);
assertNull("Should have failed to find non existent key", segment.find(id));
}
/**
* Adds delete entries to {@code segment.}
* @param idsToDelete the {@link Set} of IDs to create delete entries for.
* @param segment the {@link IndexSegment} to add the entries to.
* @param referenceIndex the {@link NavigableMap} to add all the entries to. This repreents the source of truth for
* all checks.
* @throws StoreException
*/
private void addDeleteEntries(Set<MockId> idsToDelete, IndexSegment segment,
NavigableMap<MockId, NavigableSet<IndexValue>> referenceIndex) throws StoreException {
for (MockId id : idsToDelete) {
Offset offset = segment.getEndOffset();
NavigableSet<IndexValue> values = segment.find(id);
IndexValue value;
if (values == null) {
// create an index value with a random log segment name
value = IndexValueTest.getIndexValue(1, new Offset(TestUtils.getRandomString(1), 0), Utils.Infinite_Time,
time.milliseconds(), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM),
(short) 0, formatVersion);
} else if (values.last().isDelete()) {
throw new IllegalArgumentException(id + " is already deleted");
} else {
value = values.first();
// update the expiration time if required
if (values.size() > 1 && value.getExpiresAtMs() != values.last().getExpiresAtMs()) {
value.setFlag(IndexValue.Flags.Ttl_Update_Index);
value.setExpiresAtMs(values.last().getExpiresAtMs());
}
}
IndexValue newValue = IndexValueTest.getIndexValue(value, formatVersion);
newValue.setFlag(IndexValue.Flags.Delete_Index);
newValue.setNewOffset(offset);
newValue.setNewSize(DELETE_FILE_SPAN_SIZE);
segment.addEntry(new IndexEntry(id, newValue),
new Offset(offset.getName(), offset.getOffset() + DELETE_FILE_SPAN_SIZE));
referenceIndex.computeIfAbsent(id, k -> new TreeSet<>()).add(newValue);
}
}
ParentChildFilteredTermsEnum(TermsEnum tenum, NavigableSet<BytesRef> parentTypes) {
super(tenum, true);
this.parentTypes = parentTypes;
this.seekTerm = parentTypes.isEmpty() ? null : parentTypes.first();
}
private int getPayloadSizeForResultIfConstantOrZeroOtherwise(NavigableSet<Integer> allPayloadSizes)
{
return allPayloadSizes.size() == 1 ? allPayloadSizes.first() : 0;
}
@Nullable
@Override
public ByteBuffer findMinRecord(UUID dataId, @Nullable ByteBuffer from) {
NavigableSet<ByteBuffer> records = _records.get(dataId);
return from == null ? (records.isEmpty() ? null : records.first()) : records.ceiling(from);
}
/**
* Create the list of arrows to follow the current thread on a CPU
*
* @param trace
* trace displayed in the view
* @param entryList
* entry list for this trace
* @param intervals
* sorted collection of the current thread intervals for a CPU
* @return the list of arrows to follow the current thread on a CPU
* @throws StateSystemDisposedException
* If the query is sent after the state system has been disposed
*/
private List<@NonNull TimeGraphArrow> createCpuArrows(ITmfStateSystem ss, NavigableSet<ITmfStateInterval> intervals)
throws StateSystemDisposedException {
if (intervals.isEmpty()) {
return Collections.emptyList();
}
/*
* Add the previous interval if it is the first query iteration and the first
* interval has currentThread=0. Add the following interval if the last interval
* has currentThread=0. These are diagonal arrows crossing the query iteration
* range.
*/
ITmfStateInterval first = intervals.first();
long start = first.getStartTime() - 1;
if (start >= ss.getStartTime() && Objects.equals(first.getValue(), 0)) {
intervals.add(ss.querySingleState(start, first.getAttribute()));
}
ITmfStateInterval last = intervals.last();
long end = last.getEndTime() + 1;
if (end <= ss.getCurrentEndTime() && Objects.equals(last.getValue(), 0)) {
intervals.add(ss.querySingleState(end, last.getAttribute()));
}
List<@NonNull TimeGraphArrow> linkList = new ArrayList<>();
long prevEnd = 0;
long lastEnd = 0;
long prevEntry = -1;
for (ITmfStateInterval currentThreadInterval : intervals) {
long time = currentThreadInterval.getStartTime();
if (time != lastEnd) {
/*
* Don't create links where there are gaps in intervals due to the resolution
*/
prevEntry = -1;
prevEnd = 0;
}
Integer tid = (Integer) currentThreadInterval.getValue();
lastEnd = currentThreadInterval.getEndTime() + 1;
long nextEntry = -1;
if (tid != null && tid > 0) {
nextEntry = findEntry(tid, time);
if (prevEntry >= 0 && nextEntry >= 0) {
TimeGraphArrow arrow = new TimeGraphArrow(prevEntry, nextEntry, prevEnd, time - prevEnd, getElementStyle(LINK_VALUE));
linkList.add(arrow);
}
prevEntry = nextEntry;
prevEnd = lastEnd;
}
}
return linkList;
}
private static <R, S, P extends Message, Q extends Message, T extends Message> void findMedian(
CompletableFuture<R> future, AsyncTable<AdvancedScanResultConsumer> table,
ColumnInterpreter<R, S, P, Q, T> ci, Scan scan, NavigableMap<byte[], S> sumByRegion) {
double halfSum = ci.divideForAvg(sumByRegion.values().stream().reduce(ci::add).get(), 2L);
S movingSum = null;
byte[] startRow = null;
for (Map.Entry<byte[], S> entry : sumByRegion.entrySet()) {
startRow = entry.getKey();
S newMovingSum = ci.add(movingSum, entry.getValue());
if (ci.divideForAvg(newMovingSum, 1L) > halfSum) {
break;
}
movingSum = newMovingSum;
}
if (startRow != null) {
scan.withStartRow(startRow);
}
// we can not pass movingSum directly to an anonymous class as it is not final.
S baseSum = movingSum;
byte[] family = scan.getFamilies()[0];
NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(family);
byte[] weightQualifier = qualifiers.last();
byte[] valueQualifier = qualifiers.first();
table.scan(scan, new AdvancedScanResultConsumer() {
private S sum = baseSum;
private R value = null;
@Override
public void onNext(Result[] results, ScanController controller) {
try {
for (Result result : results) {
Cell weightCell = result.getColumnLatestCell(family, weightQualifier);
R weight = ci.getValue(family, weightQualifier, weightCell);
sum = ci.add(sum, ci.castToReturnType(weight));
if (ci.divideForAvg(sum, 1L) > halfSum) {
if (value != null) {
future.complete(value);
} else {
future.completeExceptionally(new NoSuchElementException());
}
controller.terminate();
return;
}
Cell valueCell = result.getColumnLatestCell(family, valueQualifier);
value = ci.getValue(family, valueQualifier, valueCell);
}
} catch (IOException e) {
future.completeExceptionally(e);
controller.terminate();
}
}
@Override
public void onError(Throwable error) {
future.completeExceptionally(error);
}
@Override
public void onComplete() {
if (!future.isDone()) {
// we should not reach here as the future should be completed in onNext.
future.completeExceptionally(new NoSuchElementException());
}
}
});
}
/**
* Verifies that the state in {@link PersistentIndex} is the same as the one in {@link #referenceIndex}.
* @param isLogSegmented {@code true} if segmented. {@code false} otherwise.
* @throws StoreException
*/
private void verifyState(boolean isLogSegmented) throws StoreException {
verifyRealIndexSanity();
assertEquals("Incorrect log segment count", isLogSegmented ? 3 : 1, index.getLogSegmentCount());
NavigableMap<Offset, IndexSegment> realIndex = index.getIndexSegments();
assertEquals("Number of index segments does not match expected", referenceIndex.size(), realIndex.size());
Map.Entry<Offset, IndexSegment> realIndexEntry = realIndex.firstEntry();
for (Map.Entry<Offset, TreeMap<MockId, TreeSet<IndexValue>>> referenceIndexEntry : referenceIndex.entrySet()) {
assertEquals("Offset of index segment does not match expected", referenceIndexEntry.getKey(),
realIndexEntry.getKey());
TreeMap<MockId, TreeSet<IndexValue>> referenceIndexSegment = referenceIndexEntry.getValue();
IndexSegment realIndexSegment = realIndexEntry.getValue();
for (Map.Entry<MockId, TreeSet<IndexValue>> referenceIndexSegmentEntry : referenceIndexSegment.entrySet()) {
MockId id = referenceIndexSegmentEntry.getKey();
NavigableSet<IndexValue> referenceValues = referenceIndexSegmentEntry.getValue();
NavigableSet<IndexValue> values = realIndexSegment.find(id);
assertNotNull("No values returned from real index segment for " + id, values);
assertTrue("No values returned from real index segment for " + id, values.size() > 0);
IndexValue value = values.first();
for (IndexValue referenceValue : referenceValues) {
assertEquals("Offset does not match", referenceValue.getOffset(), value.getOffset());
assertEquals("ExpiresAtMs does not match", referenceValue.getExpiresAtMs(), value.getExpiresAtMs());
assertEquals("Size does not match", referenceValue.getSize(), value.getSize());
assertEquals("Account ID does not match", referenceValue.getAccountId(), value.getAccountId());
assertEquals("Container ID does not match", referenceValue.getContainerId(), value.getContainerId());
assertEquals(
"Original message offset does not match " + id.toString() + " " + referenceValue + " " + value.toString(),
referenceValue.getOriginalMessageOffset(), value.getOriginalMessageOffset());
assertEquals("Flags do not match " + id.toString() + " " + referenceValue.toString() + " " + value.toString(),
referenceValue.getFlags(), value.getFlags());
if (index.hardDeleter.enabled.get() && !deletedKeys.contains(referenceIndexSegmentEntry.getKey())) {
assertEquals("Operation time does not match", referenceValue.getOperationTimeInMs(),
value.getOperationTimeInMs());
assertEquals("Value from IndexSegment does not match expected", referenceValue.getBytes(),
value.getBytes());
}
value = values.higher(value);
}
assertNull("There are more values in the real index", value);
}
realIndexEntry = realIndex.higherEntry(realIndexEntry.getKey());
}
assertNull("There should no more index segments left", realIndexEntry);
// all the elements in the last segment should be in the journal
assertNotNull("There is no offset in the log that corresponds to the last index segment start offset",
logOrder.get(referenceIndex.lastKey()));
Map.Entry<Offset, Pair<MockId, LogEntry>> logEntry = logOrder.floorEntry(referenceIndex.lastKey());
List<JournalEntry> entries = index.journal.getEntriesSince(referenceIndex.lastKey(), true);
for (JournalEntry entry : entries) {
assertNotNull("There are no more entries in the reference log but there are entries in the journal", logEntry);
assertEquals("Offset in journal not as expected", logEntry.getKey(), entry.getOffset());
assertEquals("Key in journal not as expected", logEntry.getValue().getFirst(), entry.getKey());
logEntry = logOrder.higherEntry(logEntry.getKey());
}
assertNull("There should be no more entries in the reference log", logEntry);
}