com.google.common.collect.Multimap#size ( )源码实例Demo

下面列出了com.google.common.collect.Multimap#size ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: incubator-pinot   文件: AnomaliesResource.java
public static Multimap<String, String> generateFilterSetWithDimensionMap(DimensionMap dimensionMap,
    Multimap<String, String> filterSet) {

  Multimap<String, String> newFilterSet = HashMultimap.create();

  // Dimension map gives more specified dimension information than filter set (i.e., Dimension Map should be a subset
  // of filterSet), so it needs to be processed first.
  if (MapUtils.isNotEmpty(dimensionMap)) {
    for (Map.Entry<String, String> dimensionMapEntry : dimensionMap.entrySet()) {
      newFilterSet.put(dimensionMapEntry.getKey(), dimensionMapEntry.getValue());
    }
  }

  if (filterSet != null && filterSet.size() != 0) {
    for (String key : filterSet.keySet()) {
      if (!newFilterSet.containsKey(key)) {
        newFilterSet.putAll(key, filterSet.get(key));
      }
    }
  }

  return newFilterSet;
}
 
源代码2 项目: fast-family-master   文件: MutliMapTest.java
public static void main(String[] args) {
    Multimap<String,String> myNultimap = ArrayListMultimap.create();
    myNultimap.put("Fruits","Bannana");
    myNultimap.put("Fruits","Apple");
    myNultimap.put("Fruits","Pear");
    myNultimap.put("Vegetables","Carrot");

    int size = myNultimap.size();
    System.out.println(size);

    Collection<String> fruits = myNultimap.get("Fruits");
    System.out.println(fruits);

    Collection<String> fruitsCol = myNultimap.get("Vegetables");
    System.out.println(fruitsCol);

    for (String value : myNultimap.values()){
        System.out.println(value);
    }

    myNultimap.removeAll("Fruits");
    System.out.println(myNultimap.get("Fruits"));
}
 
源代码3 项目: phoenix   文件: IndexWriter.java
/**
 * Convert the passed index updates to {@link HTableInterfaceReference}s.
 * @param indexUpdates from the index builder
 * @return pairs that can then be written by an {@link IndexWriter}.
 */
public static Multimap<HTableInterfaceReference, Mutation> resolveTableReferences(
    Collection<Pair<Mutation, byte[]>> indexUpdates) {
  Multimap<HTableInterfaceReference, Mutation> updates = ArrayListMultimap
      .<HTableInterfaceReference, Mutation> create();
  // simple map to make lookups easy while we build the map of tables to create
  Map<ImmutableBytesPtr, HTableInterfaceReference> tables =
      new HashMap<ImmutableBytesPtr, HTableInterfaceReference>(updates.size());
  for (Pair<Mutation, byte[]> entry : indexUpdates) {
    byte[] tableName = entry.getSecond();
    ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
    HTableInterfaceReference table = tables.get(ptr);
    if (table == null) {
      table = new HTableInterfaceReference(ptr);
      tables.put(ptr, table);
    }
    updates.put(table, entry.getFirst());
  }

  return updates;
}
 
源代码4 项目: xtext-extras   文件: ResolvedFeatures.java
protected List<IResolvedOperation> computeAllOperations() {
	JvmType rawType = getRawType();
	if (!(rawType instanceof JvmDeclaredType)) {
		return Collections.emptyList();
	}
	Multimap<String, AbstractResolvedOperation> processedOperations = LinkedHashMultimap.create();
	for (IResolvedOperation resolvedOperation : getDeclaredOperations()) {
		processedOperations.put(resolvedOperation.getDeclaration().getSimpleName(), (AbstractResolvedOperation) resolvedOperation);
	}
	if (targetVersion.isAtLeast(JavaVersion.JAVA8)) {
		computeAllOperationsFromSortedSuperTypes((JvmDeclaredType) rawType, processedOperations);
	} else {
		Set<JvmType> processedTypes = Sets.newHashSet(rawType);
		computeAllOperationsFromSuperTypes((JvmDeclaredType) rawType, processedOperations, processedTypes);
	}
	// make sure the declared operations are the first in the list
	List<IResolvedOperation> result = new ArrayList<IResolvedOperation>(processedOperations.size());
	result.addAll(getDeclaredOperations());
	for (AbstractResolvedOperation operation : processedOperations.values()) {
		if (operation.getDeclaration().getDeclaringType() != rawType) {
			result.add(operation);
		}
	}
	return Collections.unmodifiableList(result);
}
 
源代码5 项目: phoenix   文件: IndexWriter.java
/**
 * Convert the passed index updates to {@link HTableInterfaceReference}s.
 * @param indexUpdates from the index builder
 * @return pairs that can then be written by an {@link IndexWriter}.
 */
public static Multimap<HTableInterfaceReference, Mutation> resolveTableReferences(
    Collection<Pair<Mutation, byte[]>> indexUpdates) {
  Multimap<HTableInterfaceReference, Mutation> updates = ArrayListMultimap
      .<HTableInterfaceReference, Mutation> create();
  // simple map to make lookups easy while we build the map of tables to create
  Map<ImmutableBytesPtr, HTableInterfaceReference> tables =
      new HashMap<ImmutableBytesPtr, HTableInterfaceReference>(updates.size());
  for (Pair<Mutation, byte[]> entry : indexUpdates) {
    byte[] tableName = entry.getSecond();
    ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
    HTableInterfaceReference table = tables.get(ptr);
    if (table == null) {
      table = new HTableInterfaceReference(ptr);
      tables.put(ptr, table);
    }
    updates.put(table, entry.getFirst());
  }

  return updates;
}
 
源代码6 项目: presto   文件: SqlStageExecution.java
public synchronized Set<RemoteTask> scheduleSplits(InternalNode node, Multimap<PlanNodeId, Split> splits, Multimap<PlanNodeId, Lifespan> noMoreSplitsNotification)
{
    requireNonNull(node, "node is null");
    requireNonNull(splits, "splits is null");

    if (stateMachine.getState().isDone()) {
        return ImmutableSet.of();
    }
    splitsScheduled.set(true);

    checkArgument(stateMachine.getFragment().getPartitionedSources().containsAll(splits.keySet()), "Invalid splits");

    ImmutableSet.Builder<RemoteTask> newTasks = ImmutableSet.builder();
    Collection<RemoteTask> tasks = this.tasks.get(node);
    RemoteTask task;
    if (tasks == null) {
        // The output buffer depends on the task id starting from 0 and being sequential, since each
        // task is assigned a private buffer based on task id.
        TaskId taskId = new TaskId(stateMachine.getStageId(), nextTaskId.getAndIncrement());
        task = scheduleTask(node, taskId, splits, OptionalInt.empty());
        newTasks.add(task);
    }
    else {
        task = tasks.iterator().next();
        task.addSplits(splits);
    }
    if (noMoreSplitsNotification.size() > 1) {
        // The assumption that `noMoreSplitsNotification.size() <= 1` currently holds.
        // If this assumption no longer holds, we should consider calling task.noMoreSplits with multiple entries in one shot.
        // These kind of methods can be expensive since they are grabbing locks and/or sending HTTP requests on change.
        throw new UnsupportedOperationException("This assumption no longer holds: noMoreSplitsNotification.size() < 1");
    }
    for (Entry<PlanNodeId, Lifespan> entry : noMoreSplitsNotification.entries()) {
        task.noMoreSplits(entry.getKey(), entry.getValue());
    }
    return newTasks.build();
}
 
源代码7 项目: xtext-core   文件: ContextFinder.java
@Override
public Set<ISerializationContext> findByContents(EObject semanticObject, Iterable<ISerializationContext> contextCandidates) {
	if (semanticObject == null)
		throw new NullPointerException();

	initConstraints();

	Multimap<IConstraint, ISerializationContext> constraints;
	if (contextCandidates != null)
		constraints = getConstraints(semanticObject, contextCandidates);
	else
		constraints = getConstraints(semanticObject);

	if (constraints.size() < 2)
		return Sets.newLinkedHashSet(constraints.values());

	for (IConstraint cand : Lists.newArrayList(constraints.keySet()))
		if (!isValidValueQuantity(cand, semanticObject))
			constraints.removeAll(cand);

	if (constraints.size() < 2)
		return Sets.newLinkedHashSet(constraints.values());

	LinkedHashSet<ISerializationContext> result = Sets.newLinkedHashSet(constraints.values());
	for (EStructuralFeature feat : semanticObject.eClass().getEAllStructuralFeatures()) {
		if (transientValueUtil.isTransient(semanticObject, feat) != ValueTransient.NO)
			continue;
		if (feat.isMany() && ((List<?>) semanticObject.eGet(feat)).isEmpty())
			continue;
		Multimap<AbstractElement, ISerializationContext> assignments = collectAssignments(constraints, feat);
		Set<AbstractElement> assignedElements = findAssignedElements(semanticObject, feat, assignments);
		Set<ISerializationContext> keep = Sets.newHashSet();
		for (AbstractElement ele : assignedElements)
			keep.addAll(assignments.get(ele));
		result.retainAll(keep);
	}
	return result;
}
 
源代码8 项目: phoenix   文件: Indexer.java
@Override
public void postOpen(final ObserverContext<RegionCoprocessorEnvironment> c) {
  Multimap<HTableInterfaceReference, Mutation> updates = failedIndexEdits.getEdits(c.getEnvironment().getRegion());
  
  if (this.disabled) {
      return;
  }

  long start = EnvironmentEdgeManager.currentTimeMillis();
  try {
      //if we have no pending edits to complete, then we are done
      if (updates == null || updates.size() == 0) {
        return;
      }

      LOGGER.info("Found some outstanding index updates that didn't succeed during"
              + " WAL replay - attempting to replay now.");

      // do the usual writer stuff, killing the server again, if we can't manage to make the index
      // writes succeed again
      try {
          writer.writeAndHandleFailure(updates, true, ScanUtil.UNKNOWN_CLIENT_VERSION);
      } catch (IOException e) {
              LOGGER.error("During WAL replay of outstanding index updates, "
                      + "Exception is thrown instead of killing server during index writing", e);
      }
  } finally {
       long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
       if (duration >= slowPostOpenThreshold) {
           if (LOGGER.isDebugEnabled()) {
               LOGGER.debug(getCallTooSlowMessage("postOpen", duration, slowPostOpenThreshold));
           }
           metricSource.incrementNumSlowPostOpenCalls();
       }
       metricSource.updatePostOpenTime(duration);
  }
}
 
@Override
public long process(KEYIN key, RawRecordContainer event, Multimap<String,NormalizedContentInterface> eventFields,
                TaskInputOutputContext<KEYIN,? extends RawRecordContainer,KEYOUT,VALUEOUT> context, ContextWriter<KEYOUT,VALUEOUT> contextWriter)
                throws IOException, InterruptedException {
    
    // Hold some event-specific variables to avoid re-processing
    this.shardId = getShardId(event);
    
    if (tokenHelper.isVerboseShardCounters()) {
        context.getCounter("EVENT_SHARD_ID", new String(this.shardId)).increment(1);
    }
    
    this.eventDataTypeName = event.getDataType().outputName();
    this.eventUid = event.getId().toString();
    
    // write the standard set of keys
    Multimap<BulkIngestKey,Value> keys = super.processBulk(key, event, eventFields, new ContextWrappedStatusReporter(context));
    long count = keys.size();
    contextWriter.write(keys, context);
    
    StatusReporter reporter = new ContextWrappedStatusReporter(context);
    
    // gc before we get into the tokenization piece
    keys = null;
    
    // stream the tokens to the context writer here
    count += tokenizeEvent(event, context, contextWriter, reporter);
    
    // return the number of records written
    return count;
}
 
源代码10 项目: datamill   文件: ApiHandler.java
private static JSONObject toJson(Multimap<String, String> headers) {
    if (headers != null && headers.size() > 0) {
        JSONObject json = new JSONObject();
        for (Map.Entry<String, String> entry : headers.entries()) {
            json.put(entry.getKey(), entry.getValue());
        }

        return json;
    }

    return null;
}
 
源代码11 项目: easyccg   文件: EasyCCG.java
public static void printDetailedTiming(final Parser parser, DecimalFormat format)
{
  // Prints some statistic about how long each length sentence took to parse.
  int sentencesCovered = 0;
  Multimap<Integer, Long> sentenceLengthToParseTimeInNanos = parser.getSentenceLengthToParseTimeInNanos();
  int binNumber = 0;
  int binSize = 10;
  while (sentencesCovered < sentenceLengthToParseTimeInNanos.size()) {
    double totalTimeForBinInMillis = 0;
    int totalSentencesInBin = 0;
    for (int sentenceLength=binNumber*binSize + 1; sentenceLength < 1 + (binNumber+1) * binSize; sentenceLength=sentenceLength+1) {
      for (long time : sentenceLengthToParseTimeInNanos.get(sentenceLength)) {
        totalTimeForBinInMillis += ((double) time / 1000000);
        totalSentencesInBin++;
      }
    }
    sentencesCovered += totalSentencesInBin;
    double averageTimeInMillis = (double) totalTimeForBinInMillis / (totalSentencesInBin);
    if (totalSentencesInBin > 0) {
      System.err.println("Average time for sentences of length " + (1 + binNumber*binSize) + "-" + (binNumber+1) * binSize + " (" + totalSentencesInBin + "): " + format.format(averageTimeInMillis) + "ms");
    }
    
    binNumber++;
  }
  
  int totalSentencesTimes1000 = sentenceLengthToParseTimeInNanos.size() * 1000;
  long totalMillis = parser.getParsingTimeOnlyInMillis() + parser.getTaggingTimeOnlyInMillis();
  System.err.println("Just Parsing Time: " + parser.getParsingTimeOnlyInMillis() + "ms " + (totalSentencesTimes1000 / parser.getParsingTimeOnlyInMillis()) + " per second");
  System.err.println("Just Tagging Time: " + parser.getTaggingTimeOnlyInMillis() + "ms " + (totalSentencesTimes1000 / parser.getTaggingTimeOnlyInMillis()) + " per second");
  System.err.println("Total Time:        " + totalMillis + "ms " + (totalSentencesTimes1000 / totalMillis) + " per second");
}
 
源代码12 项目: grammaticus   文件: GrammaticalLabelFileTest.java
public void testDifferenceInRenameTabs() throws Exception {
    StringBuilder diffs = new StringBuilder();
    for (HumanLanguage language : LanguageProviderFactory.get().getAll()) {
        if (!LanguageDeclensionFactory.get().getDeclension(language).hasPlural()) continue;
        LanguageDictionary dictionary = loadDictionary(language);
        Multimap<String,String> nowHasPlural = TreeMultimap.create();
        Multimap<String,String> missingPlural = TreeMultimap.create();

        for (String entity : new TreeSet<String>(dictionary.getNounsByEntity().keySet())) {
            for (Noun n : dictionary.getNounsByEntity().get(entity)) {
                if (n.getNounType() == NounType.ENTITY) continue;
                boolean hasPluralForm = false;
                for (NounForm form : n.getAllDefinedValues().keySet()) {
                    if (form.getNumber() == LanguageNumber.PLURAL && form.getCase() == LanguageCase.NOMINATIVE) {
                        hasPluralForm = true;
                        break;
                    }
                }
                if (hasPluralForm != (n.getNounType() == NounType.FIELD)) {
                    if (hasPluralForm) {
                        nowHasPlural.put(entity, n.getName());
                    } else {
                        missingPlural.put(entity, n.getName());
                    }
                }
            }
        }
        if (nowHasPlural.size() > 0) {
            diffs.append(language).append(" can rename plural fields for: ").append(nowHasPlural).append('\n');
        }
        if (missingPlural.size() > 0) {
            diffs.append(language).append(" has these plural fields removed for rename: ").append(missingPlural).append('\n');
        }
    }
    System.out.println(diffs.toString());
}
 
源代码13 项目: grakn   文件: ReasonerUtils.java
/**
 * NB: assumes MATCH semantics - all types and their subs are considered
 * compute the map of compatible RelationTypes for a given set of Types
 * (intersection of allowed sets of relation types for each entry type) and compatible role types
 * @param types for which the set of compatible RelationTypes is to be computed
 * @param schemaConceptConverter converter between SchemaConcept and relation type-role entries
 * @param <T> type generic
 * @return map of compatible RelationTypes and their corresponding Roles
 */
public static <T extends SchemaConcept> Multimap<RelationType, Role> compatibleRelationTypesWithRoles(Set<T> types, SchemaConceptConverter<T> schemaConceptConverter) {
    Multimap<RelationType, Role> compatibleTypes = HashMultimap.create();
    if (types.isEmpty()) return compatibleTypes;
    Iterator<T> typeIterator = types.iterator();
    compatibleTypes.putAll(schemaConceptConverter.toRelationMultimap(typeIterator.next()));

    while(typeIterator.hasNext() && compatibleTypes.size() > 1) {
        compatibleTypes = multimapIntersection(compatibleTypes, schemaConceptConverter.toRelationMultimap(typeIterator.next()));
    }
    return compatibleTypes;
}
 
protected void logStatus(String folder, Multimap<File, File> trace2class) {
	String p = xtendAsPrimaryDebugSource ? "primary" : "secondary (via SMAP)";
	int n = trace2class.size();
	getLog().info("Installing Xtend files into " + n + " class files as " + p + " debug sources in: " + folder);
	getLog().debug("xtendAsPrimaryDebugSource=" + xtendAsPrimaryDebugSource);
	getLog().debug("hideSyntheticVariables=" + hideSyntheticVariables);
}
 
源代码15 项目: securify   文件: DestackerFallback.java
private void handleStackMerging(Stack<Variable> localStack, int jumpsrc, Instruction jumpI, int jumpdest) {
	// destination already destacked, may need to map current stack
	if (!canonicalStackForBranchJoinJumpdest.containsKey(jumpdest)) {
		throw new IllegalStateException("target jumpdest processed, but no canonical stack defined");
	}
	Stack<Variable> canonicalStack = canonicalStackForBranchJoinJumpdest.get(jumpdest);
	if (localStack.size() != canonicalStack.size()) {
		log.println("Branch merge: stack size mismatch: canonical @" + HexPrinter.toHex(jumpdest) +
				" with size " + canonicalStack.size() + " vs local @" + HexPrinter.toHex(jumpsrc) + " with size " + localStack.size());
		sawMergeWithDiffStackSize = true;
	}
	Multimap<Variable, Variable> mapToCanonical = HashMultimap.create();

	int mergeSize = Math.min(localStack.size(), canonicalStack.size());
	if (mergeSize == 0) {
		log.println("Branch merge: skipped merger for empty stack");
		return;
	}
	for (int i = 1; i <= mergeSize; ++i) {
		mapToCanonical.put(localStack.get(localStack.size() - i), canonicalStack.get(canonicalStack.size() - i));
	}
	log.println("stack merging from @" + HexPrinter.toHex(jumpsrc) + " into @" + HexPrinter.toHex(jumpdest));
	mapToCanonical.asMap().forEach((variable, canonicals) ->
			canonicals.forEach(canonical -> log.println(" " + canonical + " <- " + variable)));

	if (mapToCanonical.size() != mapToCanonical.values().stream().distinct().count()) {
		throw new IllegalStateException("a canonical variable is assigned multiple times");
	}

	boolean jumpCondition = findJumpCondition(jumpI, jumpdest);
	
	// create re-assignment instructions		
	if (variableReassignments.containsKey(new Pair<>(jumpI, jumpCondition))) {
		throw new IllegalStateException("reassignment does already exist");
	}
	Map<Variable, Variable> reassignments = new LinkedHashMap<>();
	mapToCanonical.asMap().forEach((variable, canonicals) ->
			canonicals.stream().filter(canonical -> variable != canonical)
					.forEach(canonical -> reassignments.put(canonical, variable)));

	// create temporary variables if need have conflicting variable swaps
	Map<Variable, Variable> temporaries = new LinkedHashMap<>();
	Set<Variable> wasAssignedTo = new HashSet<>();
	// search all variables that are reassigned before they get assigned
	reassignments.forEach((canonical, local) -> {
		if (wasAssignedTo.contains(local)) {
			Variable tmpVar = new Variable();
			temporaries.put(local, tmpVar);
			//if (isVirtualCanonicalVar(canonical) && isVirtualCanonicalVar(local)) {
			if (isVirtualCanonicalVar(local)) {
				virtualCanonicalVars.add(tmpVar);
			}
			log.println("swap conflict for: " + canonical + " <- " + local + "; created temp variable: " + tmpVar);
		}
		wasAssignedTo.add(canonical);
	});

	if (temporaries.size() > 0) {
		// replace locals with temporaries, if there is a temporary
		reassignments.replaceAll((canonical, local) -> temporaries.getOrDefault(local, local));
		// add assignemts to temporaries at the beginning
		Map<Variable, Variable> reassignmentsWithTemps = new LinkedHashMap<>();
		temporaries.forEach((local, canonical) -> reassignmentsWithTemps.put(canonical, local));
		reassignmentsWithTemps.putAll(reassignments);
		reassignments.clear();
		reassignments.putAll(reassignmentsWithTemps);
	}

	variableReassignments.put(new Pair<>(jumpI, jumpCondition), reassignments);
}
 
源代码16 项目: securify   文件: Destacker.java
private void handleStackMerging(Stack<Variable> localStack, int jumpsrc, int jumpdest) {
	// destination already destacked, may need to map current stack
	if (!canonicalStackForBranchJoinJumpdest.containsKey(jumpdest)) {
		throw new IllegalStateException("target jumpdest processed, but no canonical stack defined");
	}
	Stack<Variable> canonicalStack = canonicalStackForBranchJoinJumpdest.get(jumpdest);
	if (localStack.size() != canonicalStack.size()) {
		sawMergeWithDiffStackSize = true;
		// can apparently happen for shared error handling code
		/*if (controlFlowGraph.containsKey(jumpdest)) {
			// find out if all paths from `jumpdest` lead to error
			Set<Integer> reachable = ControlFlowDetector.getAllReachableBranches(controlFlowGraph, jumpdest);
			// check that it does not contain a valid exit point
			boolean allPathsError = !reachable.contains(ControlFlowDetector.DEST_EXIT);
			if (allPathsError) {
				// ignore stack mismatch
				log.println("[ignored] Branch merge: stack size mismatch: canonical @" + toHex(jumpdest) +
						" with size " + canonicalStack.size() + " vs local @" + toHex(jumpsrc) + " with size " + localStack.size());
				return;
			}
		}*/
		// so check if all paths lead to error, and if so just don't merge the stacks, since it doesn't matter anyway (?)
		log.println("Branch merge: stack size mismatch: canonical @" + toHex(jumpdest) +
				" with size " + canonicalStack.size() + " vs local @" + toHex(jumpsrc) + " with size " + localStack.size());
	}
	Multimap<Variable, Variable> mapToCanonical = HashMultimap.create();

	for (int i = 1, n = Math.min(localStack.size(), canonicalStack.size()); i <= n; ++i) {
		mapToCanonical.put(localStack.get(localStack.size() - i), canonicalStack.get(canonicalStack.size() - i));
	}
	log.println("stack merging from @" + toHex(jumpsrc) + " into @" + toHex(jumpdest));
	mapToCanonical.asMap().forEach((variable, canonicals) ->
			canonicals.forEach(canonical -> log.println(" " + canonical + " <- " + variable)));

	if (mapToCanonical.size() != mapToCanonical.values().stream().distinct().count()) {
		throw new IllegalStateException("a canonical variable is assigned multiple times");
	}

	// create re-assignemt instructions
	if (variableReassignments.containsKey(jumpsrc)) {
		throw new IllegalStateException("reassignment does already exist");
	}
	Map<Variable, Variable> reassignments = new LinkedHashMap<>();
	mapToCanonical.asMap().forEach((variable, canonicals) ->
			canonicals.stream().filter(canonical -> variable != canonical)
					.forEach(canonical -> reassignments.put(canonical, variable)));

	// create temporary variables if need have conflicting variable swaps
	Map<Variable, Variable> temporaries = new LinkedHashMap<>();
	Set<Variable> wasAssignedTo = new HashSet<>();
	// search all variables that are reassigned before they get assigned
	reassignments.forEach((canonical, local) -> {
		if (wasAssignedTo.contains(local)) {
			Variable tmpVar = new Variable();
			temporaries.put(local, tmpVar);
			log.println("swap conflict for: " + canonical + " <- " + local + "; created temp variable: " + tmpVar);
		}
		wasAssignedTo.add(canonical);
	});

	if (temporaries.size() > 0) {
		// replace locals with temporaries, if there is a temporary
		reassignments.replaceAll((canonical, local) -> temporaries.getOrDefault(local, local));
		// add assignemts to temporaries at the beginning
		Map<Variable, Variable> reassignmentsWithTemps = new LinkedHashMap<>();
		temporaries.forEach((local, canonical) -> reassignmentsWithTemps.put(canonical, local));
		reassignmentsWithTemps.putAll(reassignments);
		reassignments.clear();
		reassignments.putAll(reassignmentsWithTemps);
	}

	variableReassignments.put(jumpsrc, reassignments);
}
 
源代码17 项目: datawave   文件: EventMapper.java
@SuppressWarnings("unchecked")
public void executeHandler(K1 key, RawRecordContainer event, Multimap<String,NormalizedContentInterface> fields, DataTypeHandler<K1> handler,
                Context context) throws Exception {
    long count = 0;
    
    TraceStopwatch handlerTimer = null;
    
    // Handler based metrics
    if (metricsEnabled) {
        
        handlerTimer = new TraceStopwatch("Time in handler");
        handlerTimer.start();
    }
    
    // In the setup we determined whether or not we were performing bulk ingest. This tells us which
    // method to call on the DataTypeHandler interface.
    Multimap<BulkIngestKey,Value> r;
    
    if (!(handler instanceof ExtendedDataTypeHandler)) {
        r = handler.processBulk(key, event, fields, new ContextWrappedStatusReporter(getContext(context)));
        if (r == null) {
            getCounter(context, IngestInput.EVENT_FATAL_ERROR).increment(1);
            getCounter(context, IngestInput.EVENT_FATAL_ERROR.name(), "NullMultiMap").increment(1);
        } else {
            contextWriter.write(r, context);
            count = r.size();
        }
    } else {
        count = ((ExtendedDataTypeHandler<K1,K2,V2>) handler).process(key, event, fields, context, contextWriter);
        if (count == -1) {
            getCounter(context, IngestInput.EVENT_FATAL_ERROR).increment(1);
            getCounter(context, IngestInput.EVENT_FATAL_ERROR.name(), "NegOneCount").increment(1);
        }
    }
    
    // Update the counters
    if (count > 0) {
        getCounter(context, IngestOutput.ROWS_CREATED.name(), handler.getClass().getSimpleName()).increment(count);
        getCounter(context, IngestOutput.ROWS_CREATED).increment(count);
    }
    
    if (handler.getMetadata() != null) {
        handler.getMetadata().addEvent(handler.getHelper(event.getDataType()), event, fields, now.get());
    }
    
    if (metricsEnabled && handlerTimer != null) {
        handlerTimer.stop();
        long handlerTime = handlerTimer.elapsed(TimeUnit.MILLISECONDS);
        
        metricsLabels.clear();
        metricsLabels.put("dataType", event.getDataType().typeName());
        metricsLabels.put("handler", handler.getClass().getName());
        metricsService.collect(Metric.MILLIS_IN_HANDLER, metricsLabels.get(), fields, handlerTime);
        
        if (contextWriter instanceof KeyValueCountingContextWriter) {
            ((KeyValueCountingContextWriter) contextWriter).writeMetrics(event, fields, handler);
        }
    }
}
 
源代码18 项目: datawave   文件: ExpandCompositeTerms.java
/**
 * Rebuilds the current 'and' node, and attempts to create the best composites from the leaf and ancestor anded nodes available. First, we descend into the
 * non-leaf nodes, and keep track of which leaf and anded nodes are used. We then attempt to create composites from the remaining leaf and anded nodes.
 * Finally, any leftover, unused leaf nodes are anded at this level, while the used leaf nodes are passed down to the descendants and anded where
 * appropriate.
 *
 * @param node
 *            An 'and' node from the original script
 * @param data
 *            ExpandData, containing ancestor anded nodes, used anded nodes, and a flag indicating whether composites were found
 * @return An expanded version of the 'and' node containing composite nodes, if found, or the original in node, if not found
 */
@Override
public Object visit(ASTAndNode node, Object data) {
    ExpandData parentData = (ExpandData) data;
    
    // only process delayed predicates
    if (QueryPropertyMarkerVisitor.instanceOfAnyExcept(node, Arrays.asList(ASTDelayedPredicate.class))) {
        return node;
    }
    
    // if we only have one child, just pass through
    // this shouldn't ever really happen, but it could
    if (node.jjtGetNumChildren() == 1)
        return super.visit(node, data);
    
    // first, find all leaf nodes
    // note: an 'and' node defining a range over a single term is considered a leaf node for our purposes
    List<JexlNode> nonLeafNodes = new ArrayList<>();
    Multimap<String,JexlNode> leafNodes = getLeafNodes(node, nonLeafNodes);
    
    // if this is a 'leaf' range node, check to see if a composite can be made
    if (leafNodes.size() == 1 && leafNodes.containsValue(node)) {
        // attempt to build a composite
        return visitLeafNode(node, parentData);
    }
    // otherwise, process the 'and' node as usual
    else {
        
        Multimap<String,JexlNode> usedLeafNodes = LinkedHashMultimap.create();
        
        // process the non-leaf nodes first
        List<JexlNode> processedNonLeafNodes = processNonLeafNodes(parentData, nonLeafNodes, leafNodes, usedLeafNodes);
        
        // remove the used nodes from the leaf and anded nodes
        leafNodes.values().removeAll(usedLeafNodes.values());
        parentData.andedNodes.values().removeAll(parentData.usedAndedNodes.values());
        
        // next, process the remaining leaf nodes
        List<JexlNode> processedLeafNodes = processUnusedLeafNodes(parentData, leafNodes, usedLeafNodes);
        
        // again, remove the used nodes from the leaf and anded nodes
        leafNodes.values().removeAll(usedLeafNodes.values());
        parentData.andedNodes.values().removeAll(parentData.usedAndedNodes.values());
        
        // rebuild the node if composites are found
        if (parentData.foundComposite) {
            List<JexlNode> processedNodes = new ArrayList<>();
            processedNodes.addAll(processedLeafNodes);
            processedNodes.addAll(processedNonLeafNodes);
            
            // rebuild the node
            JexlNode rebuiltNode = createUnwrappedAndNode(processedNodes);
            
            // distribute the used nodes into the rebuilt node
            if (!usedLeafNodes.values().isEmpty()) {
                // first we need to trim the used nodes to eliminate any wrapping nodes
                // i.e. reference, reference expression, or single child and/or nodes
                List<JexlNode> leafNodesToDistribute = usedLeafNodes.values().stream().map(this::getLeafNode).collect(Collectors.toList());
                rebuiltNode = DistributeAndedNodes.distributeAndedNode(rebuiltNode, leafNodesToDistribute, jexlNodeToCompMap);
            }
            
            return rebuiltNode;
        }
        
        return node;
    }
}
 
@Override
public boolean hasSingleElement(Multimap<?,?> map) {
    return map.size() == 1;
}
 
源代码20 项目: Clusion   文件: TSet.java
public static void constructEMMPar(final byte[] key1, final byte[] key2, final byte[] keyENC,
		final Multimap<String, String> lookup, final Multimap<String, String> encryptedIdToRealId)
		throws InterruptedException, ExecutionException, IOException {

	// Instantiation of B buckets in the secure inverted index
	// Initialize of the free set

	// Determination of the bucketSize B
	bucketSize = lookup.size() * spaceOverhead;
	int count = 2;
	for (int j = 1; j < 1000; j++) {
		if (bucketSize > Math.pow(2, count)) {
			count = 2 * j;
		} else {
			break;
		}
	}

	bucketSize = (int) Math.pow(2, count);

	for (int i = 0; i < bucketSize; i++) {
		secureIndex.add(new ArrayList<Record>());
		free.add(new ArrayList<Integer>());
		// For each bucket initialize to S sub-buckets
		for (int j = 0; j < subBucketSize; j++) {
			// initialize all buckets with random values
			secureIndex.get(i).add(new Record(new byte[16], new byte[16]));
			free.get(i).add(j);
		}
	}

	List<String> listOfKeyword = new ArrayList<String>(lookup.keySet());
	int threads = 0;
	if (Runtime.getRuntime().availableProcessors() > listOfKeyword.size()) {
		threads = listOfKeyword.size();
	} else {
		threads = Runtime.getRuntime().availableProcessors();
	}

	ExecutorService service = Executors.newFixedThreadPool(threads);
	ArrayList<String[]> inputs = new ArrayList<String[]>(threads);

	for (int i = 0; i < threads; i++) {
		String[] tmp;
		if (i == threads - 1) {
			tmp = new String[listOfKeyword.size() / threads + listOfKeyword.size() % threads];
			for (int j = 0; j < listOfKeyword.size() / threads + listOfKeyword.size() % threads; j++) {
				tmp[j] = listOfKeyword.get((listOfKeyword.size() / threads) * i + j);
			}
		} else {
			tmp = new String[listOfKeyword.size() / threads];
			for (int j = 0; j < listOfKeyword.size() / threads; j++) {

				tmp[j] = listOfKeyword.get((listOfKeyword.size() / threads) * i + j);
			}
		}
		inputs.add(i, tmp);
	}

	List<Future<Integer>> futures = new ArrayList<Future<Integer>>();
	for (final String[] input : inputs) {
		Callable<Integer> callable = new Callable<Integer>() {
			public Integer call() throws Exception {

				int output = setup(key1, key2, keyENC, input, lookup, encryptedIdToRealId);
				return 1;
			}
		};
		futures.add(service.submit(callable));
	}

	service.shutdown();

}