下面列出了java.util.PriorityQueue#addAll ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/**
* 根据属性排序子节点
* @param assistantNodes
* @return
*/
public static AssistantNode[] sortAssistantNodes(List<AssistantNode> assistantNodes) {
if (assistantNodes == null || assistantNodes.size() == 0) {
return new AssistantNode[0];
}
PriorityQueue<AssistantNode> sorted = new PriorityQueue<>(assistantNodes.size(), new Comparator<AssistantNode>() {
@Override
public int compare(AssistantNode lhs, AssistantNode rhs) {
int lhsP = lhs.calculatePriority();
int rhsP = rhs.calculatePriority();
return rhsP- lhsP;
}
});
sorted.addAll(assistantNodes);
return sorted.toArray(new AssistantNode[assistantNodes.size()]);
}
public void deleteWMAssertedTraitProxies( InternalFactHandle handle, RuleImpl rule, TerminalNode terminalNode ) {
TraitableBean traitableBean = (TraitableBean) handle.getObject();
if( traitableBean.hasTraits() ){
PriorityQueue<TraitProxy> removedTypes =
new PriorityQueue<TraitProxy>( traitableBean._getTraitMap().values().size() );
removedTypes.addAll( traitableBean._getTraitMap().values() );
while ( ! removedTypes.isEmpty() ) {
TraitProxy proxy = removedTypes.poll();
if ( ! proxy._isVirtual() ) {
InternalFactHandle proxyHandle = (InternalFactHandle) getFactHandle( proxy );
if ( proxyHandle.getEqualityKey() == null || proxyHandle.getEqualityKey().getLogicalFactHandle() != proxyHandle ) {
entryPoint.delete( proxyHandle,
rule,
terminalNode );
}
}
}
}
}
private PriorityQueue<ChunkMetadata> sortUnseqChunkMetadatasByEndtime() throws IOException {
PriorityQueue<ChunkMetadata> chunkMetadataList =
new PriorityQueue<>(
(o1, o2) -> {
long endTime1 = o1.getEndTime();
long endTime2 = o2.getEndTime();
if (endTime1 < endTime2) {
return 1;
} else if (endTime1 > endTime2) {
return -1;
}
return Long.compare(o2.getVersion(), o1.getVersion());
});
for (TimeseriesMetadata timeseriesMetadata : unseqTimeseriesMetadataList) {
if (timeseriesMetadata != null) {
chunkMetadataList.addAll(timeseriesMetadata.loadChunkMetadataList());
}
}
return chunkMetadataList;
}
public List<MatchResult> find(String inputText, List<String> libraries){
List<MatchResult> resList = new LinkedList<>();
for (IDictionary dict: this.dictionaries){
resList.addAll(dict.find(inputText,libraries));
}
List<MatchResult> deDupledList = new LinkedList<>();
PriorityQueue<MatchResult> pqMatches = new PriorityQueue<>(Collections.reverseOrder());
pqMatches.addAll(resList);
List<int[]> trackedRanges = new LinkedList<>();
while(!pqMatches.isEmpty()) {
MatchResult en = pqMatches.poll();
boolean entityOverlaped = false;
for (int[] range: trackedRanges){
if (en.start>=range[0] & en.end<=range[1]){
entityOverlaped = true;
break;
}
}
if (!entityOverlaped){
deDupledList.add(en);
int[] newRange ={en.start,en.end};
trackedRanges.add(newRange);
}
}
return deDupledList;
}
@Test
public void fixIteratorOrderOnPriorityQueue() {
// comparable
PriorityQueue<String> queueComparable = new PriorityQueue<>();
queueComparable.addAll(Arrays.asList("1", "333", "22", "55555", "4444"));
List<String> sortedComparable = asStream(queueComparable).collect(Collectors.toList());
assertThat(sortedComparable).containsExactly("1", "22", "333", "4444", "55555");
assertThat(queueComparable).isNotEmpty();
// comparator
PriorityQueue<String> queueComparator = new PriorityQueue<>(new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
return o1.length() - o2.length();
}
});
queueComparator.addAll(Arrays.asList("1", "333", "22", "55555", "4444"));
List<String> sortedComparator = asStream(queueComparator).collect(Collectors.toList());
assertThat(sortedComparator).containsExactly("1", "22", "333", "4444", "55555");
assertThat(queueComparator).isNotEmpty();
// draining the queue
PriorityQueue<String> queueToBeDrained = new PriorityQueue<>();
queueToBeDrained.addAll(Arrays.asList("1", "333", "22", "55555", "4444"));
List<String> sortedDrainedQueue = Stream.generate(queueToBeDrained::poll)
.limit(queueToBeDrained.size())
.collect(Collectors.toList());
assertThat(sortedDrainedQueue)
.containsExactly("1", "22", "333", "4444", "55555");
assertThat(queueToBeDrained).isEmpty();
}
/**
* addAll(null) throws NPE
*/
public void testAddAll1() {
PriorityQueue q = new PriorityQueue(1);
try {
q.addAll(null);
shouldThrow();
} catch (NullPointerException success) {}
}
@Test
void solution_1() throws Exception {
PriorityQueue<String> queue = new PriorityQueue<>(Comparator.comparing(String::length));
queue.addAll(Arrays.asList("1", "333", "22", "55555", "4444"));
List<String> result = Stream.generate(queue::poll)
.limit(queue.size())
.collect(Collectors.toList());
assertThat(result).containsExactly("1", "22", "333", "4444", "55555");
assertThat(queue).isEmpty();
}
@Test
void solution_2() throws Exception {
PriorityQueue<String> queue = new PriorityQueue<>(Comparator.comparing(String::length));
queue.addAll(Arrays.asList("1", "333", "22", "55555", "4444"));
List<String> result = queue.stream()
.sorted(queue.comparator())
.collect(Collectors.toList());
assertThat(result).containsExactly("1", "22", "333", "4444", "55555");
assertThat(queue).isNotEmpty();
}
/**
* java.util.PriorityQueue#PriorityQueue(int, Comparator<? super E>)
*/
public void test_ConstructorILjava_util_Comparator_cast() {
MockComparatorCast<Object> objectComparator = new MockComparatorCast<Object>();
PriorityQueue<Integer> integerQueue = new PriorityQueue<Integer>(100,
objectComparator);
assertNotNull(integerQueue);
assertEquals(0, integerQueue.size());
assertEquals(objectComparator, integerQueue.comparator());
Integer[] array = { 2, 45, 7, -12, 9 };
List<Integer> list = Arrays.asList(array);
integerQueue.addAll(list);
assertEquals(list.size(), integerQueue.size());
// just test here no cast exception raises.
}
public void render() {
SortByRender comparator = new SortByRender();
PriorityQueue<Entity> renderQueue = new PriorityQueue<Entity>(entities.size()+1, comparator);
renderQueue.addAll(entities);
while(!renderQueue.isEmpty()) {
renderQueue.poll().render();
}
}
/**
* addAll of a collection with null elements throws NPE
*/
public void testAddAll2() {
PriorityQueue q = new PriorityQueue(SIZE);
try {
q.addAll(Arrays.asList(new Integer[SIZE]));
shouldThrow();
} catch (NullPointerException success) {}
}
/**
* Render.
*/
public void render() {
SortByRender comparator = new SortByRender();
PriorityQueue<Entity> renderQueue = new PriorityQueue<Entity>(entities.size()+1, comparator);
renderQueue.addAll(entities);
while(!renderQueue.isEmpty()) {
renderQueue.poll().render();
}
}
/**
* Get the broker Id that has the resource. Here we need to apply the proper placement policy.
*
* @param brokerQueue the list of brokers that are sorted in resource usage
* @param oosReplica out of sync replicas
* @param inBoundReq inbound traffic
* @param outBoundReq outbound traffc
* @param preferredBroker preferred broker id
* @return a BrokerId to KafkaBroker mapping
*/
public Map<Integer, KafkaBroker> getAlternativeBrokers(
PriorityQueue<KafkaBroker> brokerQueue,
OutOfSyncReplica oosReplica,
double inBoundReq,
double outBoundReq,
int preferredBroker
) {
boolean success = true;
Map<Integer, KafkaBroker> result = new HashMap<>();
Set<KafkaBroker> unusableBrokers = new HashSet<>();
for (int oosBrokerId : oosReplica.outOfSyncBrokers) {
// we will get the broker with the least network usage
success = findNextBrokerForOosReplica(
brokerQueue,
unusableBrokers,
oosReplica.replicaBrokers,
result,
oosBrokerId,
oosReplica.topicPartition,
inBoundReq,
outBoundReq,
preferredBroker
);
// short circuit if failed to find available broker
if (!success) {
break;
}
}
// push the brokers back to brokerQueue to keep invariant true
brokerQueue.addAll(unusableBrokers);
return success ? result : null;
}
/**
* addAll of a collection with any null elements throws NPE after
* possibly adding some elements
*/
public void testAddAll3() {
PriorityQueue q = new PriorityQueue(SIZE);
Integer[] ints = new Integer[SIZE];
for (int i = 0; i < SIZE - 1; ++i)
ints[i] = new Integer(i);
try {
q.addAll(Arrays.asList(ints));
shouldThrow();
} catch (NullPointerException success) {}
}
private Collection<Map<String, List<T>>> doProcess(
final SharedBufferAccessor<T> sharedBufferAccessor,
final NFAState nfaState,
final EventWrapper event,
final AfterMatchSkipStrategy afterMatchSkipStrategy,
final TimerService timerService) throws Exception {
final PriorityQueue<ComputationState> newPartialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR);
final PriorityQueue<ComputationState> potentialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR);
// iterate over all current computations
for (ComputationState computationState : nfaState.getPartialMatches()) {
final Collection<ComputationState> newComputationStates = computeNextStates(
sharedBufferAccessor,
computationState,
event,
timerService);
if (newComputationStates.size() != 1) {
nfaState.setStateChanged();
} else if (!newComputationStates.iterator().next().equals(computationState)) {
nfaState.setStateChanged();
}
//delay adding new computation states in case a stop state is reached and we discard the path.
final Collection<ComputationState> statesToRetain = new ArrayList<>();
//if stop state reached in this path
boolean shouldDiscardPath = false;
for (final ComputationState newComputationState : newComputationStates) {
if (isFinalState(newComputationState)) {
potentialMatches.add(newComputationState);
} else if (isStopState(newComputationState)) {
//reached stop state. release entry for the stop state
shouldDiscardPath = true;
sharedBufferAccessor.releaseNode(newComputationState.getPreviousBufferEntry());
} else {
// add new computation state; it will be processed once the next event arrives
statesToRetain.add(newComputationState);
}
}
if (shouldDiscardPath) {
// a stop state was reached in this branch. release branch which results in removing previous event from
// the buffer
for (final ComputationState state : statesToRetain) {
sharedBufferAccessor.releaseNode(state.getPreviousBufferEntry());
}
} else {
newPartialMatches.addAll(statesToRetain);
}
}
if (!potentialMatches.isEmpty()) {
nfaState.setStateChanged();
}
List<Map<String, List<T>>> result = new ArrayList<>();
if (afterMatchSkipStrategy.isSkipStrategy()) {
processMatchesAccordingToSkipStrategy(sharedBufferAccessor,
nfaState,
afterMatchSkipStrategy,
potentialMatches,
newPartialMatches,
result);
} else {
for (ComputationState match : potentialMatches) {
Map<String, List<T>> materializedMatch =
sharedBufferAccessor.materializeMatch(
sharedBufferAccessor.extractPatterns(
match.getPreviousBufferEntry(),
match.getVersion()).get(0)
);
result.add(materializedMatch);
sharedBufferAccessor.releaseNode(match.getPreviousBufferEntry());
}
}
nfaState.setNewPartialMatches(newPartialMatches);
return result;
}
private Collection<Map<String, List<T>>> doProcess(
final SharedBufferAccessor<T> sharedBufferAccessor,
final NFAState nfaState,
final EventWrapper event,
final AfterMatchSkipStrategy afterMatchSkipStrategy,
final TimerService timerService) throws Exception {
final PriorityQueue<ComputationState> newPartialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR);
final PriorityQueue<ComputationState> potentialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR);
// iterate over all current computations
for (ComputationState computationState : nfaState.getPartialMatches()) {
final Collection<ComputationState> newComputationStates = computeNextStates(
sharedBufferAccessor,
computationState,
event,
timerService);
if (newComputationStates.size() != 1) {
nfaState.setStateChanged();
} else if (!newComputationStates.iterator().next().equals(computationState)) {
nfaState.setStateChanged();
}
//delay adding new computation states in case a stop state is reached and we discard the path.
final Collection<ComputationState> statesToRetain = new ArrayList<>();
//if stop state reached in this path
boolean shouldDiscardPath = false;
for (final ComputationState newComputationState : newComputationStates) {
if (isFinalState(newComputationState)) {
potentialMatches.add(newComputationState);
} else if (isStopState(newComputationState)) {
//reached stop state. release entry for the stop state
shouldDiscardPath = true;
sharedBufferAccessor.releaseNode(newComputationState.getPreviousBufferEntry());
} else {
// add new computation state; it will be processed once the next event arrives
statesToRetain.add(newComputationState);
}
}
if (shouldDiscardPath) {
// a stop state was reached in this branch. release branch which results in removing previous event from
// the buffer
for (final ComputationState state : statesToRetain) {
sharedBufferAccessor.releaseNode(state.getPreviousBufferEntry());
}
} else {
newPartialMatches.addAll(statesToRetain);
}
}
if (!potentialMatches.isEmpty()) {
nfaState.setStateChanged();
}
List<Map<String, List<T>>> result = new ArrayList<>();
if (afterMatchSkipStrategy.isSkipStrategy()) {
processMatchesAccordingToSkipStrategy(sharedBufferAccessor,
nfaState,
afterMatchSkipStrategy,
potentialMatches,
newPartialMatches,
result);
} else {
for (ComputationState match : potentialMatches) {
Map<String, List<T>> materializedMatch =
sharedBufferAccessor.materializeMatch(
sharedBufferAccessor.extractPatterns(
match.getPreviousBufferEntry(),
match.getVersion()).get(0)
);
result.add(materializedMatch);
sharedBufferAccessor.releaseNode(match.getPreviousBufferEntry());
}
}
nfaState.setNewPartialMatches(newPartialMatches);
return result;
}
/**
* If there are FlowFiles waiting on the swap queue, move them to the active
* queue until we meet our threshold. This prevents us from having to swap
* them to disk & then back out.
*
* This method MUST be called with the writeLock held.
*/
private void migrateSwapToActive() {
// Migrate as many FlowFiles as we can from the Swap Queue to the Active Queue, so that we don't
// have to swap them out & then swap them back in.
// If we don't do this, we could get into a situation where we have potentially thousands of FlowFiles
// sitting on the Swap Queue but not getting processed because there aren't enough to be swapped out.
// In particular, this can happen if the queue is typically filled with surges.
// For example, if the queue has 25,000 FlowFiles come in, it may process 20,000 of them and leave
// 5,000 sitting on the Swap Queue. If it then takes an hour for an additional 5,000 FlowFiles to come in,
// those FlowFiles sitting on the Swap Queue will sit there for an hour, waiting to be swapped out and
// swapped back in again.
// Calling this method when records are polled prevents this condition by migrating FlowFiles from the
// Swap Queue to the Active Queue. However, we don't do this if there are FlowFiles already swapped out
// to disk, because we want them to be swapped back in in the same order that they were swapped out.
if (!activeQueue.isEmpty()) {
return;
}
// If there are swap files waiting to be swapped in, swap those in first. We do this in order to ensure that those that
// were swapped out first are then swapped back in first. If we instead just immediately migrated the FlowFiles from the
// swap queue to the active queue, and we never run out of FlowFiles in the active queue (because destination cannot
// keep up with queue), we will end up always processing the new FlowFiles first instead of the FlowFiles that arrived
// first.
if (!swapLocations.isEmpty()) {
swapIn();
return;
}
// this is the most common condition (nothing is swapped out), so do the check first and avoid the expense
// of other checks for 99.999% of the cases.
final FlowFileQueueSize size = getFlowFileQueueSize();
if (size.getSwappedCount() == 0 && swapQueue.isEmpty()) {
return;
}
if (size.getSwappedCount() > swapQueue.size()) {
// we already have FlowFiles swapped out, so we won't migrate the queue; we will wait for
// the files to be swapped back in first
return;
}
// Swap Queue is not currently ordered. We want to migrate the highest priority FlowFiles to the Active Queue, then re-queue the lowest priority items.
final PriorityQueue<FlowFileRecord> tempQueue = new PriorityQueue<>(swapQueue.size(), new QueuePrioritizer(getPriorities()));
tempQueue.addAll(swapQueue);
int recordsMigrated = 0;
long bytesMigrated = 0L;
while (activeQueue.size() < swapThreshold) {
final FlowFileRecord toMigrate = tempQueue.poll();
if (toMigrate == null) {
break;
}
activeQueue.add(toMigrate);
bytesMigrated += toMigrate.getSize();
recordsMigrated++;
}
swapQueue.clear();
FlowFileRecord toRequeue;
while ((toRequeue = tempQueue.poll()) != null) {
swapQueue.add(toRequeue);
}
if (recordsMigrated > 0) {
incrementActiveQueueSize(recordsMigrated, bytesMigrated);
incrementSwapQueueSize(-recordsMigrated, -bytesMigrated, 0);
logger.debug("Migrated {} FlowFiles from swap queue to active queue for {}", recordsMigrated, this);
}
if (size.getSwappedCount() == 0) {
swapMode = false;
}
}
public void render() {
Renderer.pushMatrix();
Renderer.scale(2, 2);
Renderer.render(bg, 0, 0, 1, 1, 0, 0, 240, 160, 1);
Renderer.drawRectangle(0, 0, 240, 160, 1, new Color(0,0,0,darkness));
if (shakeTimer > 0) {
shakeTimer -= Game.getDeltaSeconds();
if (prevShakeTimer - shakeTimer > SHAKE_INTERVAL) {
float factor = Math.min(shakeTimer * 1.2f, 1.0f);
shakeX *= -factor;
shakeY *= -factor;
prevShakeTimer = shakeTimer;
}
if (shakeTimer < 0) {
shakeTimer = 0;
prevShakeTimer = 0;
shakeX = 0;
shakeY = 0;
}
}
// Shake
Renderer.translate((int) shakeX, (int) shakeY);
SortByRender comparator = new SortByRender();
PriorityQueue<Entity> renderQueue = new PriorityQueue<Entity>(entities.size()+1, comparator);
renderQueue.addAll(entities);
while(!renderQueue.isEmpty()) {
Entity e = renderQueue.poll();
Renderer.pushMatrix();
if(e.renderDepth > HUD_DEPTH && !(e instanceof BackgroundEffect)) {
Renderer.translate(cameraOffset, 0);
}
e.render();
Renderer.popMatrix();
}
// Undo shake translation
Renderer.popMatrix();
Renderer.removeClip();
}
private static void realMain(String[] args) throws Throwable {
final PriorityQueue<Integer> q = new PriorityQueue<>();
Iterator<Integer> it;
//----------------------------------------------------------------
// Empty
//----------------------------------------------------------------
checkQ(q);
check(q.isEmpty());
check(! q.contains(1));
it = q.iterator();
removeIsCurrentlyIllegal(it);
noMoreElements(it);
q.clear();
check(q.isEmpty());
//----------------------------------------------------------------
// Singleton
//----------------------------------------------------------------
q.add(1);
checkQ(q, 1);
check(! q.isEmpty());
check(q.contains(1));
it = q.iterator();
removeIsCurrentlyIllegal(it);
check(it.hasNext());
equal(it.next(), 1);
noMoreElements(it);
remove(it, q);
check(q.isEmpty());
noMoreElements(it);
checkQ(q);
q.clear();
//----------------------------------------------------------------
// @see PriorityQueue.forgetMeNot
//----------------------------------------------------------------
final Integer[] a = {0, 4, 1, 6, 7, 2, 3}; // Carefully chosen!
q.addAll(Arrays.asList(a));
checkQ(q, a);
it = q.iterator();
checkQ(q, a);
removeIsCurrentlyIllegal(it);
checkQ(q, a);
check(it.hasNext());
removeIsCurrentlyIllegal(it);
checkQ(q, a);
check(it.hasNext());
equal(it.next(), 0);
equal(it.next(), 4);
equal(it.next(), 1);
equal(it.next(), 6);
check(it.hasNext());
checkQ(q, a);
remove(it, q);
checkQ(q, 0, 3, 1, 4, 7, 2);
check(it.hasNext());
removeIsCurrentlyIllegal(it);
equal(it.next(), 7);
remove(it, q);
checkQ(q, 0, 2, 1, 4, 3);
check(it.hasNext());
removeIsCurrentlyIllegal(it);
check(it.hasNext());
equal(it.next(), 3);
equal(it.next(), 2);
check(! it.hasNext());
remove(it, q);
checkQ(q, 0, 3, 1, 4);
check(! it.hasNext());
noMoreElements(it);
removeIsCurrentlyIllegal(it);
}
/**
* Search for the actual nearest neighbours for a query vector using an
* exhaustive linear search. For each vector a priority queue is created,
* the distance between the query and other vectors is used to sort the
* priority queue. The closest k neighbours show up at the head of the
* priority queue.
*
* @param dataset
* The data set with a bunch of vectors.
* @param query
* The query vector.
* @param resultSize
* The k nearest neighbours to find. Returns k vectors if the
* data set size is larger than k.
* @param measure
* The distance measure used to sort the priority queue with.
* @return The list of k nearest neighbours to the query vector, according
* to the given distance measure.
*/
public static List<Vector> linearSearch(List<Vector> dataset,final Vector query,int resultSize,DistanceMeasure measure){
DistanceComparator dc = new DistanceComparator(query, measure);
PriorityQueue<Vector> pq = new PriorityQueue<Vector>(dataset.size(),dc);
pq.addAll(dataset);
List<Vector> vectors = new ArrayList<Vector>();
for(int i = 0 ; i < resultSize;i++){
vectors.add(pq.poll());
}
return vectors;
}