java.util.LinkedHashSet#add ( )源码实例Demo

下面列出了java.util.LinkedHashSet#add ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: samza   文件: CoordinatorStreamSystemConsumer.java
/**
 * returns all unread messages of a specific type, after an iterator on the stream
 *
 * @param iterator the iterator pointing to an offset in the coordinator stream. All unread messages after this iterator are returned
 * @param type     the type of the messages to be returned
 * @return a set of unread messages of a given type, after a given iterator
 */
public Set<CoordinatorStreamMessage> getUnreadMessages(SystemStreamPartitionIterator iterator, String type) {
  LinkedHashSet<CoordinatorStreamMessage> messages = new LinkedHashSet<CoordinatorStreamMessage>();
  while (iterator.hasNext()) {
    IncomingMessageEnvelope envelope = iterator.next();
    Object[] keyArray = keySerde.fromBytes((byte[]) envelope.getKey()).toArray();
    Map<String, Object> valueMap = null;
    if (envelope.getMessage() != null) {
      valueMap = messageSerde.fromBytes((byte[]) envelope.getMessage());
    }
    CoordinatorStreamMessage coordinatorStreamMessage = new CoordinatorStreamMessage(keyArray, valueMap);
    if (type == null || type.equals(coordinatorStreamMessage.getType())) {
      messages.add(coordinatorStreamMessage);
    }
  }
  return messages;
}
 
源代码2 项目: obevo   文件: Environment.java
public RichIterable<FileObject> getSourceDirs() {
    if (this.sourceDirs == null) {
        // only keep the distinct list of files here
        LinkedHashSet<FileObject> fileObjects = new LinkedHashSet<FileObject>();
        if (coreSourcePath != null) {
            fileObjects.add(coreSourcePath);
        }
        if (additionalSourceDirs != null) {
            fileObjects.addAll(additionalSourceDirs.flatCollect(new Function<String, Iterable<FileObject>>() {
                @Override
                public Iterable<FileObject> valueOf(String path) {
                    MutableList<FileObject> resolvedFileObjects = Lists.mutable.empty();
                    for (FileResolverStrategy fileResolverStrategy : fileResolverStrategies) {
                        resolvedFileObjects.addAllIterable(fileResolverStrategy.resolveFileObjects(path));
                    }
                    if (resolvedFileObjects.isEmpty()) {
                        throw new IllegalArgumentException("Unable to find the given path [" + path + "] via any of the fileResolverStrategies:" + fileResolverStrategies.makeString(", "));
                    }
                    return resolvedFileObjects;
                }
            }).toList());
        }
        this.sourceDirs = Lists.mutable.withAll(fileObjects);
    }
    return this.sourceDirs;
}
 
源代码3 项目: pentaho-reporting   文件: AttributeMap.java
/**
 * Returns all namespaces that have values in this map.
 *
 * @return the namespaces stored in this map.
 */
public String[] getNameSpaces() {
  if ( content == null ) {
    return AttributeMap.EMPTY_NAMESPACES;
  }

  LinkedHashSet<String> entries = new LinkedHashSet<String>();
  for ( final Map.Entry<DualKey, T> entry : content.entrySet() ) {
    entries.add( entry.getKey().namespace );
  }

  return entries.toArray( new String[entries.size()] );
}
 
源代码4 项目: database   文件: TestGroupByState.java
/**
 * <pre>
 * SELECT SUM(?y) as ?x
 * GROUP BY ?z
 * HAVING ?x > 10
 * </pre>
 */
@SuppressWarnings({ "rawtypes", "unchecked" })
public void test_simpleHavingClause() {
    
    final IVariable<IV> y = Var.var("y");
    final IVariable<IV> x = Var.var("x");
    final IVariable<IV> z = Var.var("z");

    final IValueExpression<IV> xExpr = new /* Conditional */Bind(x, new SUM(
            false/* distinct */, (IValueExpression<IV>) y));

    final IValueExpression<IV>[] select = new IValueExpression[] { xExpr };

    final IValueExpression<IV>[] groupBy = new IValueExpression[] { z };

    final IConstraint[] having = new IConstraint[] {//
    new SPARQLConstraint<XSDBooleanIV>(new CompareBOp(x,
            new Constant<XSDNumericIV>(new XSDNumericIV(10)), CompareOp.LT
            ))//
        };

    final LinkedHashSet<IVariable<?>> groupByVars = new LinkedHashSet<IVariable<?>>();
    groupByVars.add(z);

    final LinkedHashSet<IVariable<?>> selectVars = new LinkedHashSet<IVariable<?>>();
    selectVars.add(x);

    final LinkedHashSet<IVariable<?>> columnVars = new LinkedHashSet<IVariable<?>>();
    columnVars.add(y);

    final MockGroupByState expected = new MockGroupByState(groupBy,
            groupByVars, select, selectVars, having, columnVars,
            false/* anyDistinct */, false/* selectDependency */,
            false/* nestedAggregates */, true/* simpleHaving */);

    final IGroupByState actual = new GroupByState(select, groupBy, having);

    assertSameState(expected, actual);

}
 
源代码5 项目: kripton   文件: BindBeanSharedPreferences.java
/**
 * for attribute valueIntegerSet parsing
 */
protected LinkedHashSet<Integer> parseValueIntegerSet(String input) {
  if (input==null) {
    return null;
  }
  KriptonJsonContext context=KriptonBinder.jsonBind();
  try (JacksonWrapperParser wrapper=context.createParser(input)) {
    JsonParser jacksonParser=wrapper.jacksonParser;
    // START_OBJECT
    jacksonParser.nextToken();
    // value of "element"
    jacksonParser.nextValue();
    LinkedHashSet<Integer> result=null;
    if (jacksonParser.currentToken()==JsonToken.START_ARRAY) {
      LinkedHashSet<Integer> collection=new LinkedHashSet<>();
      Integer item=null;
      while (jacksonParser.nextToken() != JsonToken.END_ARRAY) {
        if (jacksonParser.currentToken()==JsonToken.VALUE_NULL) {
          item=null;
        } else {
          item=jacksonParser.getIntValue();
        }
        collection.add(item);
      }
      result=collection;
    }
    return result;
  } catch(Exception e) {
    e.printStackTrace();
    throw(new KriptonRuntimeException(e.getMessage()));
  }
}
 
源代码6 项目: nalu   文件: ProcessorUtils.java
/**
 * Returns all of the superclasses and superinterfaces for a given generator
 * including the generator itself. The returned set maintains an internal
 * breadth-first ordering of the generator, followed by its interfaces (and their
 * super-interfaces), then the supertype and its interfaces, and so on.
 *
 * @param types      types
 * @param typeMirror of the class to check
 * @return Set of implemented super types
 */
public Set<TypeMirror> getFlattenedSupertypeHierarchy(Types types,
                                                      TypeMirror typeMirror) {
  List<TypeMirror> toAdd = new ArrayList<>();
  LinkedHashSet<TypeMirror> result = new LinkedHashSet<>();
  toAdd.add(typeMirror);
  for (int i = 0; i < toAdd.size(); i++) {
    TypeMirror type = toAdd.get(i);
    if (result.add(type)) {
      toAdd.addAll(types.directSupertypes(type));
    }
  }
  return result;
}
 
源代码7 项目: astor   文件: FixedPointGraphTraversal.java
/**
 * Compute a fixed point for the given graph, entering from the given nodes.
 * @param graph The graph to traverse.
 * @param entrySet The nodes to begin traversing from.
 */
public void computeFixedPoint(DiGraph<N, E> graph, Set<N> entrySet) {
  int cycleCount = 0;
  long nodeCount = graph.getNodes().size();

  // Choose a bail-out heuristically in case the computation
  // doesn't converge.
  long maxIterations = Math.max(nodeCount * nodeCount * nodeCount, 100);

  // Use a LinkedHashSet, so that the traversal is deterministic.
  LinkedHashSet<DiGraphNode<N, E>> workSet =
      Sets.newLinkedHashSet();
  for (N n : entrySet) {
    workSet.add(graph.getDirectedGraphNode(n));
  }
  for (; !workSet.isEmpty() && cycleCount < maxIterations; cycleCount++) {
    // For every out edge in the workSet, traverse that edge. If that
    // edge updates the state of the graph, then add the destination
    // node to the resultSet, so that we can update all of its out edges
    // on the next iteration.
    DiGraphNode<N, E> source = workSet.iterator().next();
    N sourceValue = source.getValue();

    workSet.remove(source);

    List<DiGraphEdge<N, E>> outEdges = source.getOutEdges();
    for (DiGraphEdge<N, E> edge : outEdges) {
      N destNode = edge.getDestination().getValue();
      if (callback.traverseEdge(sourceValue, edge.getValue(), destNode)) {
        workSet.add(edge.getDestination());
      }
    }
  }

  Preconditions.checkState(cycleCount != maxIterations,
      NON_HALTING_ERROR_MSG);
}
 
源代码8 项目: htm.java   文件: PatternMachine.java
/**
 * Returns a {@link Set} of indexes mapped to patterns
 * which contain the specified bit.
 * 
 * @param bit
 * @return
 */
public LinkedHashSet<Integer> numbersForBit(int bit) {
    LinkedHashSet<Integer> retVal = new LinkedHashSet<Integer>();
    for(Integer i : patterns.keySet()) {
        if(patterns.get(i).contains(bit)) {
            retVal.add(i);
        }
    }
    
    return retVal;
}
 
源代码9 项目: jenerate   文件: MethodGeneratorImplTest.java
@SafeVarargs
private final LinkedHashSet<StrategyIdentifier> mockGetPossibleStrategies(
        LinkedHashSet<MethodSkeleton<MethodGenerationData>> skeletons, StrategyIdentifier... identifiers) {
    LinkedHashSet<StrategyIdentifier> possibleStrategies = new LinkedHashSet<StrategyIdentifier>();
    for (StrategyIdentifier strategyIdentifier : identifiers) {
        possibleStrategies.add(strategyIdentifier);
    }
    when(methodContentManager.getStrategiesIntersection(skeletons)).thenReturn(possibleStrategies);
    return possibleStrategies;
}
 
源代码10 项目: OpenCue   文件: ServiceDaoJdbc.java
public static LinkedHashSet<String> splitTags(String tags) {
    LinkedHashSet<String> set = Sets.newLinkedHashSet();
    for(String s: tags.split(SPLITTER)) {
       set.add(s.replaceAll(" ", ""));
    }
    return set;
}
 
源代码11 项目: TencentKona-8   文件: SystemFlavorMap.java
/**
 * Stores the listed object under the specified hash key in map. Unlike a
 * standard map, the listed object will not replace any object already at
 * the appropriate Map location, but rather will be appended to a List
 * stored in that location.
 */
private <H, L> void store(H hashed, L listed, Map<H, LinkedHashSet<L>> map) {
    LinkedHashSet<L> list = map.get(hashed);
    if (list == null) {
        list = new LinkedHashSet<>(1);
        map.put(hashed, list);
    }
    if (!list.contains(listed)) {
        list.add(listed);
    }
}
 
源代码12 项目: flow   文件: RouteRegistryInitializerTest.java
@Test(expected = ServletException.class)
public void registerClassesWithSameRoute_class_unrelatedClass_throws()
        throws ServletException {
    LinkedHashSet<Class<?>> classes = new LinkedHashSet<>();
    classes.add(BaseRouteTarget.class);
    classes.add(OtherRouteTarget.class);
    routeRegistryInitializer.process(classes, servletContext);
}
 
源代码13 项目: wandora   文件: AnnieConfiguration.java
public HashSet getAnnotationTypes() {
    LinkedHashSet<String> types = new LinkedHashSet<String>();
    if(firstPersonCheckBox.isSelected())    types.add("FirstPerson");
    if(lookupCheckBox.isSelected())         types.add("Lookup");
    if(personCheckBox.isSelected())         types.add("Person");
    if(sentenceCheckBox.isSelected())       types.add("Sentence");
    if(spaceTokenCheckBox.isSelected())     types.add("SpaceToken");
    if(splitCheckBox.isSelected())          types.add("Split");
    if(tokenCheckBox.isSelected())          types.add("Token");
    if(unknownCheckBox.isSelected())        types.add("Unknown");

    if(dateCheckBox.isSelected())           types.add("Date");
    if(jobTitleCheckBox.isSelected())       types.add("JobTitle");
    if(locationCheckBox.isSelected())       types.add("Location");
    if(organizationCheckBox.isSelected())   types.add("Organization");
    if(tempCheckBox.isSelected())           types.add("Temp");

    if(identifierCheckBox.isSelected())     types.add("Identifier");
    if(moneyCheckBox.isSelected())          types.add("Money");
    if(percentCheckBox.isSelected())        types.add("Percent");
    if(titleCheckBox.isSelected())          types.add("Title");

    String other = otherTextField.getText();
    String[] others = other.split(",");
    if(others.length > 0) {
        for(int i=0; i<others.length; i++) {
            String o = others[i];
            if(o != null && o.trim().length() > 0) {
                types.add(o);
            }
        }
    }
    return types;
}
 
源代码14 项目: LogicNG   文件: AIGTransformation.java
private Formula transformOr(final Or or) {
  Formula aig = or.transformationCacheEntry(AIG);
  if (aig == null) {
    final LinkedHashSet<Formula> nops = new LinkedHashSet<>(or.numberOfOperands());
    for (final Formula op : or)
      nops.add(f.not(apply(op, cache)));
    aig = f.not(f.and(nops));
    if (cache) {
      or.setTransformationCacheEntry(AIG, aig);
      aig.setPredicateCacheEntry(PredicateCacheEntry.IS_AIG, true);
    }
  }
  return aig;
}
 
源代码15 项目: openjdk-jdk8u-backup   文件: SystemFlavorMap.java
/**
 * Semantically equivalent to 'flavorToNative.get(flav)'. This method
 * handles the case where 'flav' is not found in 'flavorToNative' depending
 * on the value of passes 'synthesize' parameter. If 'synthesize' is
 * SYNTHESIZE_IF_NOT_FOUND a native is synthesized, stored, and returned by
 * encoding the DataFlavor's MIME type. Otherwise an empty List is returned
 * and 'flavorToNative' remains unaffected.
 */
private LinkedHashSet<String> flavorToNativeLookup(final DataFlavor flav,
                                                   final boolean synthesize) {

    LinkedHashSet<String> natives = getFlavorToNative().get(flav);

    if (flav != null && !disabledMappingGenerationKeys.contains(flav)) {
        DataTransferer transferer = DataTransferer.getInstance();
        if (transferer != null) {
            LinkedHashSet<String> platformNatives =
                transferer.getPlatformMappingsForFlavor(flav);
            if (!platformNatives.isEmpty()) {
                if (natives != null) {
                    // Prepend the platform-specific mappings to ensure
                    // that the natives added with
                    // addUnencodedNativeForFlavor() are at the end of
                    // list.
                    platformNatives.addAll(natives);
                }
                natives = platformNatives;
            }
        }
    }

    if (natives == null) {
        if (synthesize) {
            String encoded = encodeDataFlavor(flav);
            natives = new LinkedHashSet<>(1);
            getFlavorToNative().put(flav, natives);
            natives.add(encoded);

            LinkedHashSet<DataFlavor> flavors = getNativeToFlavor().get(encoded);
            if (flavors == null) {
                flavors = new LinkedHashSet<>(1);
                getNativeToFlavor().put(encoded, flavors);
            }
            flavors.add(flav);

            nativesForFlavorCache.remove(flav);
            flavorsForNativeCache.remove(encoded);
        } else {
            natives = new LinkedHashSet<>(0);
        }
    }

    return new LinkedHashSet<>(natives);
}
 
private static Map<String, ConfigModel> configs() throws NoSuchMethodException, IllegalAccessException, InvocationTargetException {
    ConfigDef def = brokerConfigs();
    Map<String, String> dynamicUpdates = brokerDynamicUpdates();
    Method getConfigValueMethod = def.getClass().getDeclaredMethod("getConfigValue", ConfigDef.ConfigKey.class, String.class);
    getConfigValueMethod.setAccessible(true);

    Method sortedConfigs = ConfigDef.class.getDeclaredMethod("sortedConfigs");
    sortedConfigs.setAccessible(true);

    List<ConfigDef.ConfigKey> keys = (List) sortedConfigs.invoke(def);
    Map<String, ConfigModel> result = new TreeMap<>();
    for (ConfigDef.ConfigKey key : keys) {
        String configName = String.valueOf(getConfigValueMethod.invoke(def, key, "Name"));
        Type type = parseType(String.valueOf(getConfigValueMethod.invoke(def, key, "Type")));
        Scope scope = parseScope(dynamicUpdates.getOrDefault(key.name, "read-only"));
        ConfigModel descriptor = new ConfigModel();
        descriptor.setType(type);
        descriptor.setScope(scope);

        if (key.validator instanceof ConfigDef.Range) {
            descriptor = range(key, descriptor);
        } else if (key.validator instanceof ConfigDef.ValidString) {
            descriptor.setValues(enumer(key.validator));
        } else if (key.validator instanceof ConfigDef.ValidList) {
            descriptor.setItems(validList(key));
        } else if (key.validator instanceof ApiVersionValidator$) {
            Iterator<ApiVersion> iterator = ApiVersion$.MODULE$.allVersions().iterator();
            LinkedHashSet<String> versions = new LinkedHashSet<>();
            while (iterator.hasNext()) {
                ApiVersion next = iterator.next();
                ApiVersion$.MODULE$.apply(next.shortVersion());
                versions.add(Pattern.quote(next.shortVersion()) + "(\\.[0-9]+)*");
                ApiVersion$.MODULE$.apply(next.version());
                versions.add(Pattern.quote(next.version()));
            }
            descriptor.setPattern(String.join("|", versions));
        } else if (key.validator != null) {
            throw new IllegalStateException(key.validator.getClass().toString());
        }
        result.put(configName, descriptor);
    }
    return result;
}
 
源代码17 项目: incubator-retired-pirk   文件: QuerySchemaLoader.java
/**
 * Returns the query schema as defined in XML format on the given stream.
 * 
 * @param stream
 *          The source of the XML query schema description.
 * @return The query schema.
 * @throws IOException
 *           A problem occurred reading from the given stream.
 * @throws PIRException
 *           The schema description is invalid.
 */
public QuerySchema loadSchema(InputStream stream) throws IOException, PIRException
{
  // Read in and parse the XML file.
  Document doc = parseXMLDocument(stream);

  // Used to build the final schema.
  QuerySchemaBuilder schemaBuilder = new QuerySchemaBuilder();

  // Extract the schemaName.
  String schemaName = extractValue(doc, "schemaName");
  schemaBuilder.setName(schemaName);
  logger.info("schemaName = " + schemaName);

  // Extract the dataSchemaName.
  String dataSchemaName = extractValue(doc, "dataSchemaName");
  schemaBuilder.setDataSchemaName(dataSchemaName);
  logger.info("dataSchemaName = " + dataSchemaName);

  // Extract the selectorName.
  String selectorName = extractValue(doc, "selectorName");
  schemaBuilder.setSelectorName(selectorName);
  logger.info("selectorName = " + selectorName);

  // Extract the query elements.
  NodeList elementsList = doc.getElementsByTagName("elements");
  if (elementsList.getLength() != 1)
  {
    throw new PIRException("elementsList.getLength() = " + elementsList.getLength() + " -- should be 1");
  }
  Element elements = (Element) elementsList.item(0);

  LinkedHashSet<String> elementNames = new LinkedHashSet<>();
  NodeList nList = elements.getElementsByTagName("name");
  for (int i = 0; i < nList.getLength(); i++)
  {
    Node nNode = nList.item(i);
    if (nNode.getNodeType() == Node.ELEMENT_NODE)
    {
      elementNames.add(nNode.getFirstChild().getNodeValue().trim());
    }
  }
  schemaBuilder.setQueryElementNames(elementNames);

  // Extract the filter, if it exists
  if (doc.getElementsByTagName("filter").item(0) != null)
  {
    schemaBuilder.setFilterTypeName(doc.getElementsByTagName("filter").item(0).getTextContent().trim());
  }

  // Create a filter over the query elements.
  schemaBuilder.setFilteredElementNames(extractFilteredElementNames(doc));

  // Extract the additional fields, if they exists
  Map<String,String> additionalFields = new HashMap<>();
  if (doc.getElementsByTagName("additional").item(0) != null)
  {
    NodeList fieldList = doc.getElementsByTagName("field");
    int numFields = fieldList.getLength();
    if (numFields == 0)
    {
      throw new PIRException("numFields = " + numFields + " -- should be at least one");
    }
    for (int i = 0; i < numFields; ++i)
    {
      Element fields = (Element) fieldList.item(i);
      NodeList kv = fields.getChildNodes();
      additionalFields.put(getNodeValue("key", kv), getNodeValue("value", kv));
    }
  }
  schemaBuilder.setAdditionalFields(additionalFields);

  // Create and return the query schema object.
  return schemaBuilder.build();
}
 
源代码18 项目: blip   文件: ObsOptSearcher.java
/**
 * Find the best combination given the order (second way, koller's)
 */
@Override
public ParentSet[] search() {

    vars = smp.sample();

    done = new boolean[n_var];

    sk = 0;

    forbidden = new boolean[n_var];

    LinkedHashSet todo = new LinkedHashSet();

    for (int v = 0; v < n_var; v++) {
        todo.add(v);
    }

    // index of the best parent set for that variable so far (at start, it is 0 for every variable)
    int[] best_ps = new int[n_var];

    // weight for the importance sampling
    double[] ws = new double[n_var];

    // indexes
    int[] ix = new int[n_var];

    int last_chosen = -1;

    ParentSet best;

    for (int i = 0; i < n_var; i++) {

        double tot = 0;
        int j = 0;

        // For each variable that has not been chosen yet (n_var - i), build the weight array
        for (int v = 0; v < n_var; v++) {
            if (done[v]) {
                continue;
            }

            // Update the list of best parent sets; for each variable
            best = m_scores[v][best_ps[v]];
            // check if the last chosen blocks the current best, and find the new best
            if (last_chosen != -1 && find(last_chosen, best.parents)) {
                best_ps[v] = new_best(v, forbidden, best_ps[v]);
                best = m_scores[v][best_ps[v]];
            }

            // best_ps[v] has the best
            ws[j] = 1 / (-best.sk);
            tot += ws[j];
            ix[j] = v;
            j++;
        }

        // j now has the number of variables to check

        double r = solver.randDouble() - Math.pow(2, -10);
        int sel = -1;

        for (int v = 0; v < j && sel == -1; v++) {
            // Normalize weights
            double s = ws[v] /= tot;

            if (r < s) {
                sel = v;
            }
            r -= s;
        }

        // "sel" is the selected index
        int var = ix[sel];

        forbidden[var] = true;
        last_chosen = var;
        done[var] = true;
        str[var] = m_scores[var][best_ps[var]];
        sk += str[var].sk;

        // pf("%d %s \n", var, str[var]);
    }

    return str;
}
 
源代码19 项目: rtg-tools   文件: VcfMerge.java
VcfPositionZipper(ReferenceRanges<String> rr, Set<String> forceMerge, File... vcfFiles) throws IOException {
  mFiles = vcfFiles;
  mReaders = new VcfReader[mFiles.length];
  mHeaders = new VcfHeader[mFiles.length];
  mIndexes = new TabixIndexReader[mFiles.length];
  VcfHeader current = null;
  int numSamples = 0;
  boolean warnNumSamples = true;
  for (int i = 0; i < mFiles.length; ++i) {
    final File vcfFile = mFiles[i];
    final VcfHeader header = VcfUtils.getHeader(vcfFile);
    mHeaders[i] = header;
    if (current != null) {
      current = VcfHeaderMerge.mergeHeaders(current, header, forceMerge);
      if (current.getNumberOfSamples() != numSamples && warnNumSamples) {
        Diagnostic.warning("When merging multiple samples the QUAL, FILTER, and INFO fields are taken from the first record at each position.");
        warnNumSamples = false;
      }
    } else {
      current = header;
      numSamples = current.getNumberOfSamples();
    }
    final File index = TabixIndexer.indexFileName(vcfFile);
    if (!TabixIndexer.isBlockCompressed(vcfFile)) {
      throw new NoTalkbackSlimException(vcfFile + " is not in bgzip format");
    } else if (!index.exists()) {
      throw new NoTalkbackSlimException("Index not found for file: " + index.getPath() + " expected index called: " + index.getPath());
    }
    mIndexes[i] = new TabixIndexReader(TabixIndexer.indexFileName(vcfFile));
  }
  mMergedHeader = current;

  if (rr == null) {
    final LinkedHashSet<String> chroms = new LinkedHashSet<>();
    if (!mMergedHeader.getContigLines().isEmpty()) {
      for (final ContigField cf : mMergedHeader.getContigLines()) {
        chroms.add(cf.getId());
      }
    }
    for (int i = 0; i < vcfFiles.length; ++i) {
      chroms.addAll(Arrays.asList(mIndexes[i].sequenceNames()));
    }
    final ReferenceRanges<String> rrr = SamRangeUtils.createExplicitReferenceRange(chroms);
    mRegions = chroms.stream().map(rrr::forSequence).collect(Collectors.toList());
  } else {
    mRegions = rr.sequenceNames().stream().map(rr::forSequence).collect(Collectors.toList());
  }
  populateNext();
}
 
源代码20 项目: openjdk-jdk8u-backup   文件: SystemFlavorMap.java
/**
 * Adds a mapping from the specified <code>DataFlavor</code> (and all
 * <code>DataFlavor</code>s equal to the specified <code>DataFlavor</code>)
 * to the specified <code>String</code> native.
 * Unlike <code>getNativesForFlavor</code>, the mapping will only be
 * established in one direction, and the native will not be encoded. To
 * establish a two-way mapping, call
 * <code>addFlavorForUnencodedNative</code> as well. The new mapping will
 * be of lower priority than any existing mapping.
 * This method has no effect if a mapping from the specified or equal
 * <code>DataFlavor</code> to the specified <code>String</code> native
 * already exists.
 *
 * @param flav the <code>DataFlavor</code> key for the mapping
 * @param nat the <code>String</code> native value for the mapping
 * @throws NullPointerException if flav or nat is <code>null</code>
 *
 * @see #addFlavorForUnencodedNative
 * @since 1.4
 */
public synchronized void addUnencodedNativeForFlavor(DataFlavor flav,
                                                     String nat) {
    Objects.requireNonNull(nat, "Null native not permitted");
    Objects.requireNonNull(flav, "Null flavor not permitted");

    LinkedHashSet<String> natives = getFlavorToNative().get(flav);
    if (natives == null) {
        natives = new LinkedHashSet<>(1);
        getFlavorToNative().put(flav, natives);
    }
    natives.add(nat);
    nativesForFlavorCache.remove(flav);
}