下面列出了java.util.LinkedList#addAll ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
public static boolean processStringValues(Collection<ElementPattern<?>> patterns, final PairProcessor<ElementPattern<?>, Collection<Object>> valueProcessor) {
final LinkedList<ElementPattern<?>> stack = new LinkedList<ElementPattern<?>>();
for (final ElementPattern<?> next : patterns) {
stack.add(next);
while (!stack.isEmpty()) {
final ElementPattern<?> pattern = stack.removeFirst();
final ElementPatternCondition<?> patternCondition = pattern.getCondition();
final InitialPatternCondition<?> initialCondition = patternCondition.getInitialCondition();
if (initialCondition instanceof InitialPatternConditionPlus) {
stack.addAll(((InitialPatternConditionPlus<?>)initialCondition).getPatterns());
}
for (PatternCondition<?> condition : patternCondition.getConditions()) {
if (condition instanceof PatternConditionPlus) {
stack.add(((PatternConditionPlus)condition).getValuePattern());
}
else if (condition instanceof ValuePatternCondition) {
if (!valueProcessor.process(next, ((ValuePatternCondition)condition).getValues())) return false;
}
}
}
}
return true;
}
/**
* Finds all registered property sources of the given type.
*/
protected <S extends PropertySource<?>> List<S> findPropertySources(Class<S> sourceClass) {
List<S> managedSources = new LinkedList<>();
LinkedList<PropertySource<?>> sources = toLinkedList(environment.getPropertySources());
while (!sources.isEmpty()) {
PropertySource<?> source = sources.pop();
if (source instanceof CompositePropertySource) {
CompositePropertySource comp = (CompositePropertySource) source;
sources.addAll(comp.getPropertySources());
} else if (sourceClass.isInstance(source)) {
managedSources.add(sourceClass.cast(source));
}
}
return managedSources;
}
/**
* Given the location of the 'middle snake', split the diff in two parts
* and recurse.
* @param text1 Old string to be diffed.
* @param text2 New string to be diffed.
* @param x Index of split point in text1.
* @param y Index of split point in text2.
* @param deadline Time at which to bail if not yet complete.
* @return LinkedList of Diff objects.
*/
private LinkedList<Diff> diff_bisectSplit(String text1, String text2,
int x, int y, long deadline) {
String text1a = text1.substring(0, x);
String text2a = text2.substring(0, y);
String text1b = text1.substring(x);
String text2b = text2.substring(y);
// Compute both diffs serially.
LinkedList<Diff> diffs = diff_main(text1a, text2a, false, deadline);
LinkedList<Diff> diffsb = diff_main(text1b, text2b, false, deadline);
diffs.addAll(diffsb);
return diffs;
}
/**
* @generated
*/
public static List<CrossflowLinkDescriptor> getCsvSource_2001ContainedLinks(View view) {
CsvSource modelElement = (CsvSource) view.getElement();
LinkedList<CrossflowLinkDescriptor> result = new LinkedList<CrossflowLinkDescriptor>();
result.addAll(getOutgoingFeatureModelFacetLinks_Task_Output_4003(modelElement));
return result;
}
/**
* @generated
*/
public static List<ProcessLinkDescriptor> getActivity_2006ContainedLinks(View view) {
Activity modelElement = (Activity) view.getElement();
LinkedList<ProcessLinkDescriptor> result = new LinkedList<ProcessLinkDescriptor>();
result.addAll(getContainedTypeModelFacetLinks_TextAnnotationAttachment_4003(modelElement));
return result;
}
/**
* @generated
*/
public static List<ProcessLinkDescriptor> getEndMessageEvent_2011ContainedLinks(View view) {
EndMessageEvent modelElement = (EndMessageEvent) view.getElement();
LinkedList<ProcessLinkDescriptor> result = new LinkedList<ProcessLinkDescriptor>();
result.addAll(getContainedTypeModelFacetLinks_TextAnnotationAttachment_4003(modelElement));
return result;
}
@Test
public void testProofShortStack()
throws Exception
{
long location = 0;
int mb_size = 1;
String seed = "zing";
File tmp_dir = testFolder.newFolder();
long byte_len = mb_size * 1048576L;
File deck = new File(tmp_dir, "test.deck.a");
File snow = new File(tmp_dir, "test.snow");
new SnowFall(snow.getAbsolutePath(), seed, byte_len);
ByteString root_hash = new SnowMerkle(tmp_dir, "test" , true).getRootHash();
SnowMerkleProof proofGen = new SnowMerkleProof(tmp_dir, "test");
SnowPowProof real_proof = proofGen.getProof(location);
SnowPowProof.Builder fake_proof = SnowPowProof.newBuilder();
fake_proof.setWordIdx(location);
LinkedList<ByteString> fiends = new LinkedList<ByteString>();
fiends.addAll(real_proof.getMerkleComponentList());
MessageDigest md = MessageDigest.getInstance(Globals.SNOW_MERKLE_HASH_ALGO);
md.update(fiends.poll().toByteArray());
md.update(fiends.poll().toByteArray());
fiends.push(ByteString.copyFrom(md.digest()));
fake_proof.addAllMerkleComponent(fiends);
Assert.assertFalse(Validation.checkProof(fake_proof.build(), root_hash, byte_len));
}
/**
* @generated
*/
public static List<ProcessLinkDescriptor> getReceiveTask_2025ContainedLinks(View view) {
ReceiveTask modelElement = (ReceiveTask) view.getElement();
LinkedList<ProcessLinkDescriptor> result = new LinkedList<ProcessLinkDescriptor>();
result.addAll(getContainedTypeModelFacetLinks_TextAnnotationAttachment_4003(modelElement));
return result;
}
/**
* @generated
*/
public static List<ProcessLinkDescriptor> getIntermediateErrorCatchEvent_3032ContainedLinks(View view) {
IntermediateErrorCatchEvent modelElement = (IntermediateErrorCatchEvent) view.getElement();
LinkedList<ProcessLinkDescriptor> result = new LinkedList<ProcessLinkDescriptor>();
result.addAll(getContainedTypeModelFacetLinks_TextAnnotationAttachment_4003(modelElement));
return result;
}
/**
* @generated
*/
public static List<ProcessLinkDescriptor> getTask_3005ContainedLinks(View view) {
Task modelElement = (Task) view.getElement();
LinkedList<ProcessLinkDescriptor> result = new LinkedList<ProcessLinkDescriptor>();
result.addAll(getContainedTypeModelFacetLinks_TextAnnotationAttachment_4003(modelElement));
return result;
}
@Override
public LinkedList<Pair<String, Boolean>> visit(Imply node) {
isFirstNode = false;
LinkedList<Pair<String, Boolean>> retX = node.getX().accept(this);
LinkedList<Pair<String, Boolean>> retY = node.getY().accept(this);
if (retX != null && retY != null) {
retX.addAll(retY);
} else retX = retY;
return retX;
}
/**
* @generated
*/
public static List<ProcessLinkDescriptor> getScriptTask_3028OutgoingLinks(View view) {
ScriptTask modelElement = (ScriptTask) view.getElement();
LinkedList<ProcessLinkDescriptor> result = new LinkedList<ProcessLinkDescriptor>();
result.addAll(getOutgoingTypeModelFacetLinks_SequenceFlow_4001(modelElement));
return result;
}
/**
* @generated
*/
public static List<ProcessLinkDescriptor> getActivity_2006IncomingLinks(View view) {
Activity modelElement = (Activity) view.getElement();
Map<EObject, Collection<EStructuralFeature.Setting>> crossReferences = EcoreUtil.CrossReferencer
.find(view.eResource().getResourceSet().getResources());
LinkedList<ProcessLinkDescriptor> result = new LinkedList<ProcessLinkDescriptor>();
result.addAll(getIncomingTypeModelFacetLinks_SequenceFlow_4001(modelElement, crossReferences));
result.addAll(getIncomingTypeModelFacetLinks_TextAnnotationAttachment_4003(modelElement, crossReferences));
return result;
}
/**
* Find the differences between two texts. Assumes that the texts do not
* have any common prefix or suffix.
* @param text1 Old string to be diffed.
* @param text2 New string to be diffed.
* @param checklines Speedup flag. If false, then don't run a
* line-level diff first to identify the changed areas.
* If true, then run a faster slightly less optimal diff.
* @param deadline Time when the diff should be complete by.
* @return Linked List of Diff objects.
*/
private LinkedList<Diff> diff_compute(String text1, String text2,
boolean checklines, long deadline) {
LinkedList<Diff> diffs = new LinkedList<Diff>();
if (text1.length() == 0) {
// Just add some text (speedup).
diffs.add(new Diff(Operation.INSERT, text2));
return diffs;
}
if (text2.length() == 0) {
// Just delete some text (speedup).
diffs.add(new Diff(Operation.DELETE, text1));
return diffs;
}
String longtext = text1.length() > text2.length() ? text1 : text2;
String shorttext = text1.length() > text2.length() ? text2 : text1;
int i = longtext.indexOf(shorttext);
if (i != -1) {
// Shorter text is inside the longer text (speedup).
Operation op = (text1.length() > text2.length()) ? Operation.DELETE : Operation.INSERT;
diffs.add(new Diff(op, longtext.substring(0, i)));
diffs.add(new Diff(Operation.EQUAL, shorttext));
diffs.add(new Diff(op, longtext.substring(i + shorttext.length())));
return diffs;
}
if (shorttext.length() == 1) {
// Single character string.
// After the previous speedup, the character can't be an equality.
diffs.add(new Diff(Operation.DELETE, text1));
diffs.add(new Diff(Operation.INSERT, text2));
return diffs;
}
// Check to see if the problem can be split in two.
String[] hm = diff_halfMatch(text1, text2);
if (hm != null) {
// A half-match was found, sort out the return data.
String text1_a = hm[0];
String text1_b = hm[1];
String text2_a = hm[2];
String text2_b = hm[3];
String mid_common = hm[4];
// Send both pairs off for separate processing.
LinkedList<Diff> diffs_a = diff_main(text1_a, text2_a,
checklines, deadline);
LinkedList<Diff> diffs_b = diff_main(text1_b, text2_b,
checklines, deadline);
// Merge the results.
diffs = diffs_a;
diffs.add(new Diff(Operation.EQUAL, mid_common));
diffs.addAll(diffs_b);
return diffs;
}
if (checklines && text1.length() > 100 && text2.length() > 100) {
return diff_lineMode(text1, text2, deadline);
}
return diff_bisect(text1, text2, deadline);
}
private void setInitVars(RootStatement root) {
boolean thisVar = !method.hasModifier(CodeConstants.ACC_STATIC);
MethodDescriptor md = methodDescriptor;
if (thisVar) {
StructClass cl = (StructClass) DecompilerContext.getProperty(DecompilerContext.CURRENT_CLASS);
VarType clType = new VarType(CodeConstants.TYPE_OBJECT, 0, cl.qualifiedName);
mapExprentMinTypes.put(new VarVersionPair(0, 1), clType);
mapExprentMaxTypes.put(new VarVersionPair(0, 1), clType);
}
int varIndex = 0;
for (int i = 0; i < md.params.length; i++) {
mapExprentMinTypes.put(new VarVersionPair(varIndex + (thisVar ? 1 : 0), 1), md.params[i]);
mapExprentMaxTypes.put(new VarVersionPair(varIndex + (thisVar ? 1 : 0), 1), md.params[i]);
varIndex += md.params[i].stackSize;
}
// catch variables
LinkedList<Statement> stack = new LinkedList<>();
stack.add(root);
while (!stack.isEmpty()) {
Statement stat = stack.removeFirst();
List<VarExprent> lstVars = null;
if (stat.type == Statement.TYPE_CATCHALL) {
lstVars = ((CatchAllStatement) stat).getVars();
} else if (stat.type == Statement.TYPE_TRYCATCH) {
lstVars = ((CatchStatement) stat).getVars();
}
if (lstVars != null) {
for (VarExprent var : lstVars) {
mapExprentMinTypes.put(new VarVersionPair(var.getIndex(), 1), var.getVarType());
mapExprentMaxTypes.put(new VarVersionPair(var.getIndex(), 1), var.getVarType());
}
}
stack.addAll(stat.getStats());
}
}
private void scan(ScanTask task, LinkedList<File> list) {
setStateAndNotify(RUNNING);
task.info = "In Progress";
try {
// The FileFilter will tell us which files match and which don't.
//
final FileFilter filter = config.buildFileFilter();
// We have two condition to end the loop: either the list is
// empty, meaning there's nothing more to scan, or the state of
// the DirectoryScanner was asynchronously switched to STOPPED by
// another thread, e.g. because someone called "stop" on the
// ScanManagerMXBean
//
while (!list.isEmpty() && state == RUNNING) {
// Get and remove the first element in the list.
//
final File current = list.poll();
// Increment number of file scanned.
task.scanned++;
// If 'current' is a file, it's already been matched by our
// file filter (see below): act on it.
// Note that for the first iteration of this loop, there will
// be one single file in the list: the root directory for this
// scanner.
//
if (current.isFile()) {
task.matching++;
actOn(current);
}
// If 'current' is a directory, then
// find files and directories that match the file filter
// in this directory
//
if (current.isDirectory()) {
// Gets matching files and directories
final File[] content = current.listFiles(filter);
if (content == null) continue;
// Adds all matching file to the list.
list.addAll(0,Arrays.asList(content));
}
}
// The loop terminated. If the list is empty, then we have
// completed our task. If not, then somebody must have called
// stop() on this directory scanner.
//
if (list.isEmpty()) {
task.info = "Successfully Completed";
setStateAndNotify(COMPLETED);
}
} catch (Exception x) {
// We got an exception: stop the scan
//
task.info = "Failed: "+x;
if (LOG.isLoggable(Level.FINEST))
LOG.log(Level.FINEST,"scan task failed: "+x,x);
else if (LOG.isLoggable(Level.FINE))
LOG.log(Level.FINE,"scan task failed: "+x);
setStateAndNotify(STOPPED);
} catch (Error e) {
// We got an Error:
// Should not happen unless we ran out of memory or
// whatever - don't even try to notify, but
// stop the scan anyway!
//
state=STOPPED;
task.info = "Error: "+e;
// rethrow error.
//
throw e;
}
}
/**
* Find the differences between two texts. Assumes that the texts do not
* have any common prefix or suffix.
* @param text1 Old string to be diffed.
* @param text2 New string to be diffed.
* @param checklines Speedup flag. If false, then don't run a
* line-level ca.concordia.jdeodorant.eclipse.commandline.diff first to identify the changed areas.
* If true, then run a faster slightly less optimal ca.concordia.jdeodorant.eclipse.commandline.diff.
* @param deadline Time when the ca.concordia.jdeodorant.eclipse.commandline.diff should be complete by.
* @return Linked List of Diff objects.
*/
private LinkedList<Diff> diff_compute(String text1, String text2,
boolean checklines, long deadline) {
LinkedList<Diff> diffs = new LinkedList<Diff>();
if (text1.length() == 0) {
// Just add some text (speedup).
diffs.add(new Diff(Operation.INSERT, text2));
return diffs;
}
if (text2.length() == 0) {
// Just delete some text (speedup).
diffs.add(new Diff(Operation.DELETE, text1));
return diffs;
}
String longtext = text1.length() > text2.length() ? text1 : text2;
String shorttext = text1.length() > text2.length() ? text2 : text1;
int i = longtext.indexOf(shorttext);
if (i != -1) {
// Shorter text is inside the longer text (speedup).
Operation op = (text1.length() > text2.length()) ?
Operation.DELETE : Operation.INSERT;
diffs.add(new Diff(op, longtext.substring(0, i)));
diffs.add(new Diff(Operation.EQUAL, shorttext));
diffs.add(new Diff(op, longtext.substring(i + shorttext.length())));
return diffs;
}
if (shorttext.length() == 1) {
// Single character string.
// After the previous speedup, the character can't be an equality.
diffs.add(new Diff(Operation.DELETE, text1));
diffs.add(new Diff(Operation.INSERT, text2));
return diffs;
}
// Check to see if the problem can be split in two.
String[] hm = diff_halfMatch(text1, text2);
if (hm != null) {
// A half-match was found, sort out the return data.
String text1_a = hm[0];
String text1_b = hm[1];
String text2_a = hm[2];
String text2_b = hm[3];
String mid_common = hm[4];
// Send both pairs off for separate processing.
LinkedList<Diff> diffs_a = diff_main(text1_a, text2_a,
checklines, deadline);
LinkedList<Diff> diffs_b = diff_main(text1_b, text2_b,
checklines, deadline);
// Merge the results.
diffs = diffs_a;
diffs.add(new Diff(Operation.EQUAL, mid_common));
diffs.addAll(diffs_b);
return diffs;
}
if (checklines && text1.length() > 100 && text2.length() > 100) {
return diff_lineMode(text1, text2, deadline);
}
return diff_bisect(text1, text2, deadline);
}
private List<HashSet<UnqualifiedGenericType>> findDepClusters(HashMap<UnqualifiedGenericType, HashSet<UnqualifiedGenericType>> fromToDepsOn) {
List<HashSet<UnqualifiedGenericType>> loopGroups = new ArrayList<HashSet<UnqualifiedGenericType>>();
List<UnqualifiedGenericType> startingSet = fromToDepsOn.keySet().stream().filter(a -> !a.dependsOn.isEmpty()).collect(Collectors.toList());
for(UnqualifiedGenericType start : startingSet) {
//if start in loopGroup already - skip
if(loopGroups.stream().anyMatch(a -> a.contains(start))) {
continue;
}
boolean isLoopGroup = false;
HashSet<UnqualifiedGenericType> inGroup = new HashSet<UnqualifiedGenericType>();
LinkedList<UnqualifiedGenericType> visitNext = new LinkedList<UnqualifiedGenericType>();
//visitNext.add(start);
inGroup.add(start);
visitNext.addAll(fromToDepsOn.get(start));
while(!visitNext.isEmpty()) {
UnqualifiedGenericType item = visitNext.pop();
if(item == start) {
isLoopGroup = true;
continue;
}
if(inGroup.contains(item)) {
continue;
}
inGroup.add(item);
visitNext.addAll(fromToDepsOn.get(item));
}
if(isLoopGroup && !inGroup.isEmpty()) {
loopGroups.add(inGroup);
}
}
return loopGroups;
}
public boolean findPPandMM(RootStatement root) {
FlattenStatementsHelper flatthelper = new FlattenStatementsHelper();
DirectGraph dgraph = flatthelper.buildDirectGraph(root);
LinkedList<DirectNode> stack = new LinkedList<>();
stack.add(dgraph.first);
HashSet<DirectNode> setVisited = new HashSet<>();
boolean res = false;
while (!stack.isEmpty()) {
DirectNode node = stack.removeFirst();
if (setVisited.contains(node)) {
continue;
}
setVisited.add(node);
res |= processExprentList(node.exprents);
stack.addAll(node.succs);
}
return res;
}
public boolean findPPandMM(RootStatement root) {
FlattenStatementsHelper flatthelper = new FlattenStatementsHelper();
DirectGraph dgraph = flatthelper.buildDirectGraph(root);
LinkedList<DirectNode> stack = new LinkedList<>();
stack.add(dgraph.first);
HashSet<DirectNode> setVisited = new HashSet<>();
boolean res = false;
while (!stack.isEmpty()) {
DirectNode node = stack.removeFirst();
if (setVisited.contains(node)) {
continue;
}
setVisited.add(node);
res |= processExprentList(node.exprents);
stack.addAll(node.succs);
}
return res;
}