下面列出了java.util.Queue#addAll ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Test
public void shouldCloseWithoutWaiting() throws Exception {
Payload payload1 = mock(Payload.class);
Payload payload2 = mock(Payload.class);
Queue<Payload> queue = new ConcurrentLinkedQueue<>();
queue.addAll(asList(payload1, payload2));
sut = new BufferedSender(new BufferedSender.Builder()
.queue(queue)
.sender(sender),
executorService);
sut.close(false);
assertThat(queue.size(), is(2));
verify(executorService).shutdown();
verify(sender, never()).send(payload1);
verify(sender, never()).send(payload2);
verify(sender).close();
}
/**
* Retrieve the parent of the child
*
* @param root the node to start search
* @param taskId task id
* @return the found node, null if not found
*/
public static Node searchParent(Node root, int taskId) {
Queue<Node> queue = new LinkedList<>();
queue.add(root);
while (queue.size() > 0) {
Node current = queue.poll();
if (current.getAllChildrenIds().contains(taskId)) {
return current;
} else {
queue.addAll(current.getChildren());
}
}
return null;
}
public boolean canVisitAllRooms(List<List<Integer>> rooms) {
Queue<Integer> keys = new LinkedList<>(rooms.get(0));
boolean[] visited = new boolean[rooms.size()];
visited[0] = true;
while (!keys.isEmpty()) {
int key = keys.poll();
if (!visited[key]) keys.addAll(rooms.get(key));
visited[key] = true;
}
for (boolean cell : visited) {
if (!cell) return false;
}
return true;
}
/**
* @hide
*/
public static Stream<SliceItem> stream(SliceItem slice) {
Queue<SliceItem> items = new LinkedList();
items.add(slice);
Iterator<SliceItem> iterator = new Iterator<SliceItem>() {
@Override
public boolean hasNext() {
return items.size() != 0;
}
@Override
public SliceItem next() {
SliceItem item = items.poll();
if (compareTypes(item, SliceItem.FORMAT_SLICE)
|| compareTypes(item, SliceItem.FORMAT_ACTION)) {
items.addAll(item.getSlice().getItems());
}
return item;
}
};
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(iterator, 0), false);
}
/**
* If the number of ClusteringTreeNodes exceeds the maximum bound, the
* global threshold T will be doubled and the tree will be rebuild with the
* new threshold.
*
*/
protected void rebuild() {
// Checks if the number of nodes in the tree exceeds the maximum number
while (this.rootCount > this.maxNumClusterFeatures) {
// Doubles the global threshold
this.T *= 2.0;
this.root.setThreshold(calcRSquared(1));
// Adds all nodes to the ClusteringFeature tree again
Queue<ClusteringTreeNode> Q = new LinkedList<ClusteringTreeNode>();
Q.addAll(this.root.getChildren());
this.root.clearChildren();
this.rootCount = 0;
while (!Q.isEmpty()) {
ClusteringTreeNode x = Q.element();
Q.addAll(x.getChildren());
x.clearChildren();
bicoCFUpdate(x);
Q.remove();
}
}
}
/**
* Check if the table contains homogeneous files that can be read by Drill. Eg: parquet, json csv etc.
* However if it contains more than one of these formats or a totally different file format that Drill cannot
* understand then we will raise an exception.
* @param tableName name of the table to be checked for homogeneous property
* @return true if table contains homogeneous files, false otherwise
* @throws IOException is case of problems accessing table files
*/
private boolean isHomogeneous(String tableName) throws IOException {
FileSelection fileSelection = FileSelection.create(getFS(), config.getLocation(), tableName, config.allowAccessOutsideWorkspace());
if (fileSelection == null) {
throw UserException
.validationError()
.message(String.format("Table [%s] not found", tableName))
.build(logger);
}
FormatMatcher matcher = null;
Queue<FileStatus> listOfFiles = new LinkedList<>(fileSelection.getStatuses(getFS()));
while (!listOfFiles.isEmpty()) {
FileStatus currentFile = listOfFiles.poll();
if (currentFile.isDirectory()) {
listOfFiles.addAll(DrillFileSystemUtil.listFiles(getFS(), currentFile.getPath(), true));
} else {
if (matcher != null) {
if (!matcher.isFileReadable(getFS(), currentFile)) {
return false;
}
} else {
matcher = findMatcher(currentFile);
// Did not match any of the file patterns, exit
if (matcher == null) {
return false;
}
}
}
}
return true;
}
/**
* @param open
* the openCollection to set
*/
public void setOpen(final Queue<BackPointerPath<N, A, V>> collection) {
this.openLock.lock();
try {
collection.clear();
collection.addAll(this.open);
this.open = collection;
} finally {
this.openLock.unlock();
}
}
private CrawlerResult crawl( ModuleRecord mainRecord )
throws ParserException, IOException, ModuleException {
CrawlerResult result = new CrawlerResult();
// start with main module record
Queue< ModuleSource > dependencies = new LinkedList<>();
result.addModuleRecord( mainRecord );
dependencies.addAll( this.crawlModule( mainRecord ) );
// walk through dependencies
while( dependencies.peek() != null ) {
ModuleSource module = dependencies.poll();
if( result.isRecordInResult( module.uri() ) ) {
continue;
}
if( ModuleCrawler.inCache( module.uri() ) ) {
result.addModuleRecord( ModuleCrawler.getRecordFromCache( module.uri() ) );
continue;
}
ModuleParser parser = new ModuleParser( parserConfiguration );
ModuleRecord p = parser.parse( module );
result.addModuleRecord( p );
dependencies.addAll( this.crawlModule( p ) );
}
return result;
}
private MIterator(AbstractNodeTree root) {
mQueue = new LinkedList<>();
Queue<AbstractNodeTree> tmpQueue = new LinkedList<>();
tmpQueue.add(root);
AbstractNodeTree current = null;
while ((current = tmpQueue.poll()) != null) {
mQueue.add(current);
if (current.getChildrenNodes() != null && current.getChildrenNodes().size() > 0) {
tmpQueue.addAll(current.getChildrenNodes());
}
}
}
/**
* Swap the messages to the ready queue
* @param dest the target
* @param dests message queue to switch to ready
*/
protected void merge(int dest, List<Object> dests) {
if (!readyToSend.containsKey(dest)) {
readyToSend.put(dest, new LinkedBlockingQueue<>(dests));
} else {
Queue<Object> ready = readyToSend.get(dest);
ready.addAll(dests);
}
dests.clear();
}
/**
* Method creates policy alternatives according to provided model. The model structure is modified in the process.
*
* @return created policy alternatives resulting from policy source model.
*/
private Collection<AssertionSet> createPolicyAlternatives(final PolicySourceModel model) throws PolicyException {
// creating global method variables
final ContentDecomposition decomposition = new ContentDecomposition();
// creating processing queue and starting the processing iterations
final Queue<RawPolicy> policyQueue = new LinkedList<RawPolicy>();
final Queue<Collection<ModelNode>> contentQueue = new LinkedList<Collection<ModelNode>>();
final RawPolicy rootPolicy = new RawPolicy(model.getRootNode(), new LinkedList<RawAlternative>());
RawPolicy processedPolicy = rootPolicy;
do {
Collection<ModelNode> processedContent = processedPolicy.originalContent;
do {
decompose(processedContent, decomposition);
if (decomposition.exactlyOneContents.isEmpty()) {
final RawAlternative alternative = new RawAlternative(decomposition.assertions);
processedPolicy.alternatives.add(alternative);
if (!alternative.allNestedPolicies.isEmpty()) {
policyQueue.addAll(alternative.allNestedPolicies);
}
} else { // we have a non-empty collection of exactly ones
final Collection<Collection<ModelNode>> combinations = PolicyUtils.Collections.combine(decomposition.assertions, decomposition.exactlyOneContents, false);
if (combinations != null && !combinations.isEmpty()) {
// processed alternative was split into some new alternatives, which we need to process
contentQueue.addAll(combinations);
}
}
} while ((processedContent = contentQueue.poll()) != null);
} while ((processedPolicy = policyQueue.poll()) != null);
// normalize nested policies to contain single alternative only
final Collection<AssertionSet> assertionSets = new LinkedList<AssertionSet>();
for (RawAlternative rootAlternative : rootPolicy.alternatives) {
final Collection<AssertionSet> normalizedAlternatives = normalizeRawAlternative(rootAlternative);
assertionSets.addAll(normalizedAlternatives);
}
return assertionSets;
}
@NotNull
private <T extends PsiElement> Collection<T> resolveElementsImpl(ResolveMode mode, Function<ProtoRootNode, Collection<T>> extractor) {
List<T> result = new ArrayList<>();
result.addAll(extractor.apply(this));
if (stopLookup(mode, result)) {
return result;
}
Queue<ImportNode> queue = new ArrayDeque<>();
queue.addAll(getImports());
Set<ProtoRootNode> processedProtos = new HashSet<>();
while (!queue.isEmpty()) {
ImportNode importNode = queue.poll();
ProtoRootNode targetProto = importNode.getTargetProto();
if (processedProtos.contains(targetProto)) {
// do not enter into endless loop
// if proto files refer to each other
continue;
}
processedProtos.add(targetProto);
if (targetProto != null) {
result.addAll(extractor.apply(targetProto));
queue.addAll(targetProto.getPublicImports());
if (stopLookup(mode, result)) {
break;
}
}
}
return result;
}
@Override
public void fillQueueForKey(String keyName,
Queue<EncryptedKeyVersion> keyQueue, int numKeys) throws IOException {
List<EncryptedKeyVersion> retEdeks =
new LinkedList<EncryptedKeyVersion>();
for (int i = 0; i < numKeys; i++) {
try {
retEdeks.add(keyProviderCryptoExtension.generateEncryptedKey(
keyName));
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
}
keyQueue.addAll(retEdeks);
}
public InitialisationState(Circuit circuit) {
Queue<MathConnection> queue = new LinkedList<>();
for (FunctionContact contact : circuit.getFunctionContacts()) {
if (contact.isDriver() && contact.getForcedInit()) {
Set<MathNode> initSet = contact.getInitToOne() ? highSet : lowSet;
if (initSet.add(contact)) {
queue.addAll(circuit.getConnections(contact));
}
}
}
while (!queue.isEmpty()) {
MathConnection connection = queue.remove();
MathNode fromNode = connection.getFirst();
Set<MathNode> nodeInitLevelSet = chooseNodeLevelSet(fromNode);
if ((nodeInitLevelSet != null) && nodeInitLevelSet.add(connection)) {
if (conflictSet.contains(fromNode)) {
conflictSet.add(connection);
}
MathNode toNode = connection.getSecond();
if (nodeInitLevelSet.add(toNode)) {
Node parent = toNode.getParent();
if (parent instanceof FunctionComponent) {
FunctionComponent component = (FunctionComponent) parent;
propagateValuesToOutputs(circuit, component, queue);
} else {
Set<MathConnection> connections = circuit.getConnections(toNode);
queue.addAll(connections);
}
}
}
}
problematicSet.addAll(ResetUtils.getProblematicPins(circuit));
}
/**
* Get all returns of this method.
* @return
*/
public Collection<_VirtualMethodReturn> getReturnInstructions() {
Queue<Instruction> instructionsToProcess = new LinkedList<>();
instructionsToProcess.add(this);
Set<Instruction> processedInstructions = new HashSet<>();
Set<_VirtualMethodReturn> returnInstructions = new HashSet<>();
while (!instructionsToProcess.isEmpty()) {
Instruction instruction = instructionsToProcess.poll();
if (processedInstructions.contains(instruction)) {
// branch already processed
continue;
}
processedInstructions.add(instruction);
if (instruction instanceof _VirtualMethodReturn) {
returnInstructions.add((_VirtualMethodReturn) instruction);
}
if (instruction.getNext() != null) {
instructionsToProcess.add(instruction.getNext());
}
if (instruction instanceof BranchInstruction && !(instruction instanceof _VirtualInstruction)) {
instructionsToProcess.addAll(((BranchInstruction) instruction).getOutgoingBranches());
}
}
return returnInstructions;
}
private boolean hasParent(File output, boolean hierarchy, HashSet<String> selectedPids, String[] models) {
HashSet<String> pidsToExport = new HashSet<>();
Queue<String> queueToExport = new LinkedList<String>();
queueToExport.addAll(selectedPids);
if (isMonographTitle(queueToExport, pidsToExport, output, hierarchy, models)) {
return true;
}
return selectedPidHasParent(pidsToExport, output, selectedPids, models);
}
/**
* Decomposes the unprocessed alternative content into two different collections:
* <p/>
* Content of 'EXACTLY_ONE' child nodes is expanded and placed in one list and
* 'ASSERTION' nodes are placed into other list. Direct 'ALL' and 'POLICY' child nodes are 'dissolved' in the process.
*
* Method reuses precreated ContentDecomposition object, which is reset before reuse.
*/
private void decompose(final Collection<ModelNode> content, final ContentDecomposition decomposition) throws PolicyException {
decomposition.reset();
final Queue<ModelNode> allContentQueue = new LinkedList<ModelNode>(content);
ModelNode node;
while ((node = allContentQueue.poll()) != null) {
// dissolving direct 'POLICY', 'POLICY_REFERENCE' and 'ALL' child nodes
switch (node.getType()) {
case POLICY :
case ALL :
allContentQueue.addAll(node.getChildren());
break;
case POLICY_REFERENCE :
allContentQueue.addAll(getReferencedModelRootNode(node).getChildren());
break;
case EXACTLY_ONE :
decomposition.exactlyOneContents.add(expandsExactlyOneContent(node.getChildren()));
break;
case ASSERTION :
decomposition.assertions.add(node);
break;
default :
throw LOGGER.logSevereException(new PolicyException(LocalizationMessages.WSP_0007_UNEXPECTED_MODEL_NODE_TYPE_FOUND(node.getType())));
}
}
}
/**
* Method creates policy alternatives according to provided model. The model structure is modified in the process.
*
* @return created policy alternatives resulting from policy source model.
*/
private Collection<AssertionSet> createPolicyAlternatives(final PolicySourceModel model) throws PolicyException {
// creating global method variables
final ContentDecomposition decomposition = new ContentDecomposition();
// creating processing queue and starting the processing iterations
final Queue<RawPolicy> policyQueue = new LinkedList<RawPolicy>();
final Queue<Collection<ModelNode>> contentQueue = new LinkedList<Collection<ModelNode>>();
final RawPolicy rootPolicy = new RawPolicy(model.getRootNode(), new LinkedList<RawAlternative>());
RawPolicy processedPolicy = rootPolicy;
do {
Collection<ModelNode> processedContent = processedPolicy.originalContent;
do {
decompose(processedContent, decomposition);
if (decomposition.exactlyOneContents.isEmpty()) {
final RawAlternative alternative = new RawAlternative(decomposition.assertions);
processedPolicy.alternatives.add(alternative);
if (!alternative.allNestedPolicies.isEmpty()) {
policyQueue.addAll(alternative.allNestedPolicies);
}
} else { // we have a non-empty collection of exactly ones
final Collection<Collection<ModelNode>> combinations = PolicyUtils.Collections.combine(decomposition.assertions, decomposition.exactlyOneContents, false);
if (combinations != null && !combinations.isEmpty()) {
// processed alternative was split into some new alternatives, which we need to process
contentQueue.addAll(combinations);
}
}
} while ((processedContent = contentQueue.poll()) != null);
} while ((processedPolicy = policyQueue.poll()) != null);
// normalize nested policies to contain single alternative only
final Collection<AssertionSet> assertionSets = new LinkedList<AssertionSet>();
for (RawAlternative rootAlternative : rootPolicy.alternatives) {
final Collection<AssertionSet> normalizedAlternatives = normalizeRawAlternative(rootAlternative);
assertionSets.addAll(normalizedAlternatives);
}
return assertionSets;
}
@Override
protected void executeImpl() {
// Go through all edges and split any that cover nearby nodes
final Queue<GMLEdge> remaining = new LinkedList<GMLEdge>();
final Collection<GMLNode> nodes = new HashSet<GMLNode>();
synchronized (editor.getMap()) {
remaining.addAll(editor.getMap().getEdges());
nodes.addAll(editor.getMap().getNodes());
}
setProgressLimit(remaining.size());
int count = 0;
while (!remaining.isEmpty()) {
GMLEdge next = remaining.remove();
Line2D line = GMLTools.toLine(next);
// Look for nodes that are close to the line
for (GMLNode node : nodes) {
if (node == next.getStart() || node == next.getEnd()) {
continue;
}
Point2D p = GMLTools.toPoint(node);
Point2D closest = GeometryTools2D.getClosestPointOnSegment(line, p);
if (GeometryTools2D.getDistance(p, closest) < threshold) {
// Split the edge
Collection<GMLEdge> newEdges;
synchronized (editor.getMap()) {
newEdges = editor.getMap().splitEdge(next, node);
editor.getMap().removeEdge(next);
newEdges.removeAll(editor.getMap().getEdges());
}
remaining.addAll(newEdges);
bumpMaxProgress(newEdges.size());
++count;
break;
}
}
bumpProgress();
}
if (count != 0) {
editor.setChanged();
editor.getViewer().repaint();
}
Logger.debug("Split " + count + " edges");
}
/**
* process constraints for the provided value using the provided constraint processors
*
* @param result - used to store the validation results
* @param value - the object on which constraints are to be processed - a collection or the value of an attribute
* @param definition - a Data Dictionary definition e.g. {@code ComplexAttributeDefinition} or {@code
* CollectionDefinition}
* @param attributeValueReader - a class that encapsulate access to both dictionary metadata and object field
* values
* @param doOptionalProcessing - true if the validation should do optional validation, false otherwise
*/
@SuppressWarnings("unchecked")
private void processConstraints(DictionaryValidationResult result,
List<? extends ConstraintProcessor> constraintProcessors, Object value, Constrainable definition,
AttributeValueReader attributeValueReader, boolean doOptionalProcessing, String validationState,
StateMapping stateMapping) {
//TODO: Implement custom validators
if (constraintProcessors != null) {
Constrainable selectedDefinition = definition;
AttributeValueReader selectedAttributeValueReader = attributeValueReader;
// First - take the constrainable definition and get its constraints
Queue<Constraint> constraintQueue = new LinkedList<Constraint>();
// Using a for loop to iterate through constraint processors because ordering is important
for (ConstraintProcessor<Object, Constraint> processor : constraintProcessors) {
// Let the calling method opt out of any optional processing
if (!doOptionalProcessing && processor.isOptional()) {
result.addSkipped(attributeValueReader, processor.getName());
continue;
}
Class<? extends Constraint> constraintType = processor.getConstraintType();
// Add all of the constraints for this constraint type for all providers to the queue
for (ConstraintProvider constraintProvider : constraintProviders) {
if (constraintProvider.isSupported(selectedDefinition)) {
Collection<Constraint> constraintList = constraintProvider.getConstraints(selectedDefinition,
constraintType);
if (constraintList != null) {
constraintQueue.addAll(constraintList);
}
}
}
// If there are no constraints provided for this definition, then just skip it
if (constraintQueue.isEmpty()) {
result.addSkipped(attributeValueReader, processor.getName());
continue;
}
Collection<Constraint> additionalConstraints = new LinkedList<Constraint>();
// This loop is functionally identical to a for loop, but it has the advantage of letting us keep the queue around
// and populate it with any new constraints contributed by the processor
while (!constraintQueue.isEmpty()) {
Constraint constraint = constraintQueue.poll();
// If this constraint is not one that this process handles, then skip and add to the queue for the next processor;
// obviously this would be redundant (we're only looking at constraints that this processor can process) except that
// the previous processor might have stuck a new constraint (or constraints) on the queue
if (!constraintType.isInstance(constraint)) {
result.addSkipped(attributeValueReader, processor.getName());
additionalConstraints.add(constraint);
continue;
}
constraint = ConstraintStateUtils.getApplicableConstraint(constraint, validationState,
stateMapping);
if (constraint != null) {
ProcessorResult processorResult = processor.process(result, value, constraint,
selectedAttributeValueReader);
Collection<Constraint> processorResultContraints = processorResult.getConstraints();
if (processorResultContraints != null && processorResultContraints.size() > 0) {
constraintQueue.addAll(processorResultContraints);
}
// Change the selected definition to whatever was returned from the processor
if (processorResult.isDefinitionProvided()) {
selectedDefinition = processorResult.getDefinition();
}
// Change the selected attribute value reader to whatever was returned from the processor
if (processorResult.isAttributeValueReaderProvided()) {
selectedAttributeValueReader = processorResult.getAttributeValueReader();
}
}
}
// After iterating through all the constraints for this processor, add the ones that werent consumed by this processor to the queue
constraintQueue.addAll(additionalConstraints);
}
}
}