com.google.common.collect.Sets#newHashSetWithExpectedSize ( )源码实例Demo

下面列出了com.google.common.collect.Sets#newHashSetWithExpectedSize ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: phoenix   文件: WhereOptimizer.java
@Override
public KeySlots visitLeave(InListExpression node, List<KeySlots> childParts) {
    if (childParts.isEmpty()) {
        return null;
    }

    List<Expression> keyExpressions = node.getKeyExpressions();
    Set<KeyRange> ranges = Sets.newHashSetWithExpectedSize(keyExpressions.size());
    KeySlot childSlot = childParts.get(0).iterator().next();
    KeyPart childPart = childSlot.getKeyPart();
    // Handles cases like WHERE substr(foo,1,3) IN ('aaa','bbb')
    for (Expression key : keyExpressions) {
        KeyRange range = childPart.getKeyRange(CompareOp.EQUAL, key);
        if (range != KeyRange.EMPTY_RANGE) { // null means it can't possibly be in range
            ranges.add(range);
        }
    }
    return newKeyParts(childSlot, node, new ArrayList<KeyRange>(ranges), null);
}
 
源代码2 项目: phoenix   文件: HashJoinPlan.java
private HashJoinPlan(SelectStatement statement, 
        QueryPlan plan, HashJoinInfo joinInfo, SubPlan[] subPlans, boolean recompileWhereClause, Map<ImmutableBytesPtr,ServerCache> dependencies) throws SQLException {
    super(plan);
    this.dependencies.putAll(dependencies);
    this.statement = statement;
    this.joinInfo = joinInfo;
    this.subPlans = subPlans;
    this.recompileWhereClause = recompileWhereClause;
    this.tableRefs = Sets.newHashSetWithExpectedSize(subPlans.length + plan.getSourceRefs().size());
    this.tableRefs.addAll(plan.getSourceRefs());
    this.hasSubPlansWithPersistentCache = false;
    for (SubPlan subPlan : subPlans) {
        tableRefs.addAll(subPlan.getInnerPlan().getSourceRefs());
        if (subPlan instanceof HashSubPlan && ((HashSubPlan)subPlan).usePersistentCache) {
            this.hasSubPlansWithPersistentCache = true;
        }
    }
    QueryServices services = plan.getContext().getConnection().getQueryServices();
    this.maxServerCacheTimeToLive = services.getProps().getInt(
            QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS);
    this.serverCacheLimit = services.getProps().getLong(
            QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE);
}
 
源代码3 项目: fastjgame   文件: BinarySerializer.java
private static Set<Class<?>> getFilteredSupportedClasses(Predicate<Class<?>> filter) {
    final Set<Class<?>> allCustomCodecClass = CodecScanner.getAllCustomCodecClass();
    final Set<Class<?>> allProtoBufferClasses = ProtoBufferScanner.getAllProtoBufferClasses();
    final Set<Class<?>> supportedClassSet = Sets.newHashSetWithExpectedSize(allCustomCodecClass.size() + allProtoBufferClasses.size());

    Stream.concat(allCustomCodecClass.stream(), allProtoBufferClasses.stream())
            .filter(filter)
            .forEach(supportedClassSet::add);
    return supportedClassSet;
}
 
源代码4 项目: tez   文件: VertexGroup.java
GroupInfo(String groupName, Vertex... vertices) {
  this.groupName = groupName;
  members = Sets.newHashSetWithExpectedSize(vertices.length);
  for (Vertex v : vertices) {
    members.add(v);
  }
}
 
源代码5 项目: javaide   文件: DensitySplitOptions.java
@Override
protected Set<String> getDefaultValues() {
    Density[] values = Density.values();
    Set<String> fullList = Sets.newHashSetWithExpectedSize(values.length - 2);
    for (Density value : values) {
        if (value != Density.NODPI && value != Density.ANYDPI && value.isRecommended()) {
            fullList.add(value.getResourceValue());
        }
    }

    return fullList;
}
 
源代码6 项目: terrapin   文件: HFileReaderTest.java
@BeforeClass
public static void setUp() throws Exception {
  int randomNum = (int) (Math.random() * Integer.MAX_VALUE);
  hfilePath = "/tmp/hfile-" + randomNum;
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  keyValueMap = Maps.newHashMapWithExpectedSize(10000);
  errorKeys = Sets.newHashSetWithExpectedSize(2000);
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, new CacheConfig(conf),
      fs, 4096).
      withFilePath(new Path(hfilePath)).
      withCompression(Compression.Algorithm.NONE).
      build();
  // Add upto 10K values.
  for (int i = 0; i < 10000; i++) {
    byte[] key = String.format("%04d", i).getBytes();
    byte[] value = null;
    // Add a couple of empty values for testing and making sure we return them.
    if (i <= 1) {
      value = "".getBytes();
    } else {
      value = ("v" + (i + 1)).getBytes();
    }
    KeyValue kv = new KeyValue(key,
        Bytes.toBytes("cf"),
        Bytes.toBytes(""),
        value);
    writer.append(kv);
    keyValueMap.put(ByteBuffer.wrap(key), ByteBuffer.wrap(value));
    if (i >= 4000 && i < 6000) {
      errorKeys.add(ByteBuffer.wrap(key));
    }
  }
  writer.close();
  hfileReader = new TestHFileReader(fs,
      hfilePath,
      new CacheConfig(conf),
      new ExecutorServiceFuturePool(Executors.newFixedThreadPool(1)),
      errorKeys);
}
 
源代码7 项目: bazel   文件: ResourceUsageAnalyzer.java
private void referencedString(@NonNull String string) {
  // See if the string is at all eligible; ignore strings that aren't
  // identifiers (has java identifier chars and nothing but .:/), or are empty or too long
  // We also allow "%", used for formatting strings.
  if (string.isEmpty() || string.length() > 80) {
    return;
  }
  boolean haveIdentifierChar = false;
  for (int i = 0, n = string.length(); i < n; i++) {
    char c = string.charAt(i);
    boolean identifierChar = Character.isJavaIdentifierPart(c);
    if (!identifierChar && c != '.' && c != ':' && c != '/' && c != '%') {
      // .:/ are for the fully qualified resource names, or for resource URLs or
      // relative file names
      return;
    } else if (identifierChar) {
      haveIdentifierChar = true;
    }
  }
  if (!haveIdentifierChar) {
    return;
  }
  if (strings == null) {
    strings = Sets.newHashSetWithExpectedSize(300);
  }
  strings.add(string);

  if (!foundWebContent && string.contains(ANDROID_RES)) {
    foundWebContent = true;
  }
}
 
源代码8 项目: hawkbit   文件: MgmtDistributionSetResourceTest.java
private Set<DistributionSet> createDistributionSetsAlphabetical(final int amount) {
    char character = 'a';
    final Set<DistributionSet> created = Sets.newHashSetWithExpectedSize(amount);
    for (int index = 0; index < amount; index++) {
        final String str = String.valueOf(character);
        created.add(testdataFactory.createDistributionSet(str));
        character++;
    }
    return created;
}
 
源代码9 项目: arcusplatform   文件: GetAttributesHandler.java
private Set<String> getNames(Set<AttributeKey<?>> attributeKeys) {
   Set<String> names = Sets.newHashSetWithExpectedSize(attributeKeys.size());
   for(AttributeKey<?> key: attributeKeys) {
      names.add(key.getName());
   }
   return names;
}
 
源代码10 项目: Kylin   文件: LookupTable.java
public Set<T> mapValues(String col, Set<T> values, String returnCol) {
    int colIdx = tableDesc.findColumnByName(col).getZeroBasedIndex();
    int returnIdx = tableDesc.findColumnByName(returnCol).getZeroBasedIndex();
    Set<T> result = Sets.newHashSetWithExpectedSize(values.size());
    for (T[] row : data.values()) {
        if (values.contains(row[colIdx])) {
            result.add(row[returnIdx]);
        }
    }
    return result;
}
 
源代码11 项目: qconfig   文件: DelayPublishReleaseStatus.java
private boolean allPushSuccess() {
    for (ConfigMetaVersion metaVersion : statusInfo.getTaskConfig().getMetaVersions()) {
        List<Host> hosts = statusInfo.getBatches().get(statusInfo.getFinishedBatchNum());
        Set<String> pushFailIps = Sets.newHashSetWithExpectedSize(hosts.size());
        for (Host host : hosts) {
            pushFailIps.add(host.getIp());
        }

        ConfigMeta meta = metaVersion.getConfigMeta();
        try {
            ListenableFuture<Set<ClientData>> clientsDataFuture = listeningClientsService.getListeningClientsData(meta, true);
            Set<ClientData> clientDataSet = clientsDataFuture.get(Constants.FUTURE_DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS);
            for (ClientData clientData : clientDataSet) {
                if (clientData.getVersion() == metaVersion.getVersion()) {
                    pushFailIps.remove(clientData.getIp());
                }
            }
        } catch (Exception e) {
            LOGGER.error("get client data set error.", e);
        }

        if (!pushFailIps.isEmpty()) {
            return false;
        }
    }
    return true;
}
 
源代码12 项目: javaide   文件: ResourceUsageAnalyzer.java
private void referencedString(@NonNull String string) {
    // See if the string is at all eligible; ignore strings that aren't
    // identifiers (has java identifier chars and nothing but .:/), or are empty or too long
    // We also allow "%", used for formatting strings.
    if (string.isEmpty() || string.length() > 80) {
        return;
    }
    boolean haveIdentifierChar = false;
    for (int i = 0, n = string.length(); i < n; i++) {
        char c = string.charAt(i);
        boolean identifierChar = Character.isJavaIdentifierPart(c);
        if (!identifierChar && c != '.' && c != ':' && c != '/' && c != '%') {
            // .:/ are for the fully qualified resource names, or for resource URLs or
            // relative file names
            return;
        } else if (identifierChar) {
            haveIdentifierChar = true;
        }
    }
    if (!haveIdentifierChar) {
        return;
    }

    if (mStrings == null) {
        mStrings = Sets.newHashSetWithExpectedSize(300);
    }
    mStrings.add(string);

    if (!mFoundWebContent && string.contains(ANDROID_RES)) {
        mFoundWebContent = true;
    }
}
 
源代码13 项目: qconfig   文件: PushConfigServlet.java
private Set<IpAndPort> parseRequest(HttpServletRequest req) throws IOException {
    List<String> list = readLines(req);
    Set<IpAndPort> ipAndPorts = Sets.newHashSetWithExpectedSize(list.size());
    for (String line : list) {
        line = line.trim();
        if (!Strings.isNullOrEmpty(line)) {
            Iterator<String> iterator = COMMA_SPLITTER.split(line).iterator();
            iterator.next();// skip host
            ipAndPorts.add(new IpAndPort(iterator.next(), Integer.parseInt(iterator.next())));
        }
    }
    return ipAndPorts;
}
 
源代码14 项目: phoenix   文件: DeleteCompiler.java
private Set<PTable> getNonDisabledImmutableIndexes(TableRef tableRef) {
    PTable table = tableRef.getTable();
    if (table.isImmutableRows() && !table.getIndexes().isEmpty()) {
        Set<PTable> nonDisabledIndexes = Sets.newHashSetWithExpectedSize(table.getIndexes().size());
        for (PTable index : table.getIndexes()) {
            if (index.getIndexState() != PIndexState.DISABLE) {
                nonDisabledIndexes.add(index);
            }
        }
        return nonDisabledIndexes;
    }
    return Collections.emptySet();
}
 
源代码15 项目: x-pipe   文件: RedisCreateInfo.java
public List<Pair<String,Integer>> getRedisAddresses() {
    if(redises == null || StringUtil.isEmpty(redises))
        throw new IllegalArgumentException("No redises posted");
    String[] redisArray = StringUtil.splitRemoveEmpty("\\s*,\\s*", redises);
    Set<Pair<String, Integer>> addresses = Sets.newHashSetWithExpectedSize(redisArray.length);
    for(String redis : redisArray) {
        addresses.add(IpUtils.parseSingleAsPair(redis));
    }
    return Lists.newArrayList(addresses);
}
 
源代码16 项目: phoenix   文件: SortMergeJoinPlan.java
public SortMergeJoinPlan(
        StatementContext context,
        FilterableStatement statement,
        TableRef table,
        JoinType type,
        QueryPlan lhsPlan,
        QueryPlan rhsPlan,
        Pair<List<Expression>,List<Expression>> lhsAndRhsKeyExpressions,
        List<Expression> rhsKeyExpressions,
        PTable joinedTable,
        PTable lhsTable,
        PTable rhsTable,
        int rhsFieldPosition,
        boolean isSingleValueOnly,
        Pair<List<OrderByNode>,List<OrderByNode>> lhsAndRhsOrderByNodes) throws SQLException {
    if (type == JoinType.Right) throw new IllegalArgumentException("JoinType should not be " + type);
    this.context = context;
    this.statement = statement;
    this.table = table;
    this.joinType = type;
    this.lhsPlan = lhsPlan;
    this.rhsPlan = rhsPlan;
    this.lhsKeyExpressions = lhsAndRhsKeyExpressions.getFirst();
    this.rhsKeyExpressions = lhsAndRhsKeyExpressions.getSecond();
    this.joinedSchema = buildSchema(joinedTable);
    this.lhsSchema = buildSchema(lhsTable);
    this.rhsSchema = buildSchema(rhsTable);
    this.rhsFieldPosition = rhsFieldPosition;
    this.isSingleValueOnly = isSingleValueOnly;
    this.tableRefs = Sets.newHashSetWithExpectedSize(lhsPlan.getSourceRefs().size() + rhsPlan.getSourceRefs().size());
    this.tableRefs.addAll(lhsPlan.getSourceRefs());
    this.tableRefs.addAll(rhsPlan.getSourceRefs());
    this.thresholdBytes =
            context.getConnection().getQueryServices().getProps().getLong(
                QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB,
                QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES);
    this.spoolingEnabled =
            context.getConnection().getQueryServices().getProps().getBoolean(
                QueryServices.CLIENT_JOIN_SPOOLING_ENABLED_ATTRIB,
                QueryServicesOptions.DEFAULT_CLIENT_JOIN_SPOOLING_ENABLED);
    this.actualOutputOrderBys = convertActualOutputOrderBy(lhsAndRhsOrderByNodes.getFirst(), lhsAndRhsOrderByNodes.getSecond(), context);
}
 
源代码17 项目: javaide   文件: VariantDependencies.java
public static VariantDependencies compute(@NonNull Project project, @NonNull final String name, boolean publishVariant, @NonNull VariantType variantType, @Nullable VariantDependencies parentVariant, @NonNull ConfigurationProvider... providers) {
    Set<Configuration> compileConfigs = Sets.newHashSetWithExpectedSize(providers.length * 2);
    Set<Configuration> apkConfigs = Sets.newHashSetWithExpectedSize(providers.length);

    for (ConfigurationProvider provider : providers) {
        if (provider != null) {
            compileConfigs.add(provider.getCompileConfiguration());
            if (provider.getProvidedConfiguration() != null) {
                compileConfigs.add(provider.getProvidedConfiguration());
            }


            apkConfigs.add(provider.getCompileConfiguration());
            apkConfigs.add(provider.getPackageConfiguration());
        }

    }


    if (parentVariant != null) {
        compileConfigs.add(parentVariant.getCompileConfiguration());
        apkConfigs.add(parentVariant.getPackageConfiguration());
    }


    Configuration compile = project.getConfigurations().maybeCreate("_" + name + "Compile");
    compile.setVisible(false);
    compile.setDescription("## Internal use, do not manually configure ##");
    compile.setExtendsFrom(compileConfigs);

    Configuration apk = project.getConfigurations().maybeCreate(variantType.equals(VariantType.LIBRARY) ? "_" + name + "Publish" : "_" + name + "Apk");

    apk.setVisible(false);
    apk.setDescription("## Internal use, do not manually configure ##");
    apk.setExtendsFrom(apkConfigs);

    Configuration publish = null;
    Configuration mapping = null;
    Configuration classes = null;
    Configuration metadata = null;
    if (publishVariant) {
        publish = project.getConfigurations().maybeCreate(name);
        publish.setDescription("Published Configuration for Variant " + name);
        // if the variant is not a library, then the publishing configuration should
        // not extend from the apkConfigs. It's mostly there to access the artifact from
        // another project but it shouldn't bring any dependencies with it.
        if (variantType.equals(VariantType.LIBRARY)) {
            publish.setExtendsFrom(apkConfigs);
        }


        // create configuration for -metadata.
        metadata = project.getConfigurations().create(name + "-metadata");
        metadata.setDescription("Published APKs metadata for Variant " + name);

        // create configuration for -mapping and -classes.
        mapping = project.getConfigurations().maybeCreate(name + "-mapping");
        mapping.setDescription("Published mapping configuration for Variant " + name);

        classes = project.getConfigurations().maybeCreate(name + "-classes");
        classes.setDescription("Published classes configuration for Variant " + name);
        // because we need the transitive dependencies for the classes, extend the compile config.
        classes.setExtendsFrom(compileConfigs);
    }


    return new VariantDependencies(name, compile, apk, publish, mapping, classes, metadata, true);
}
 
源代码18 项目: coming   文件: Closure_89_CollapseProperties_t.java
private boolean inlineAliasIfPossible(Ref alias, GlobalNamespace namespace) {
  // Ensure that the alias is assigned to a local variable at that
  // variable's declaration. If the alias's parent is a NAME,
  // then the NAME must be the child of a VAR node, and we must
  // be in a VAR assignment.
  Node aliasParent = alias.node.getParent();
  if (aliasParent.getType() == Token.NAME) {
    // Ensure that the local variable is well defined and never reassigned.
    Scope scope = alias.scope;
    Var aliasVar = scope.getVar(aliasParent.getString());
    ReferenceCollectingCallback collector =
        new ReferenceCollectingCallback(compiler,
            ReferenceCollectingCallback.DO_NOTHING_BEHAVIOR,
            Predicates.<Var>equalTo(aliasVar));
    (new NodeTraversal(compiler, collector)).traverseAtScope(scope);

    ReferenceCollection aliasRefs =
        collector.getReferenceCollection(aliasVar);
    if (aliasRefs.isWellDefined()
        && aliasRefs.firstReferenceIsAssigningDeclaration()
        && aliasRefs.isAssignedOnceInLifetime()) {
      // The alias is well-formed, so do the inlining now.
      int size = aliasRefs.references.size();
      Set<Node> newNodes = Sets.newHashSetWithExpectedSize(size - 1);
      for (int i = 1; i < size; i++) {
        ReferenceCollectingCallback.Reference aliasRef =
            aliasRefs.references.get(i);

        Node newNode = alias.node.cloneTree();
        aliasRef.getParent().replaceChild(aliasRef.getNameNode(), newNode);
        newNodes.add(newNode);
      }

      // just set the original alias to null.
      aliasParent.replaceChild(alias.node, new Node(Token.NULL));
      compiler.reportCodeChange();

      // Inlining the variable may have introduced new references
      // to descendents of {@code name}. So those need to be collected now.
      namespace.scanNewNodes(alias.scope, newNodes);
      return true;
    }
  }

  return false;
}
 
源代码19 项目: envelope   文件: KuduOutput.java
@Override
public void applyBulkMutations(List<Tuple2<MutationType, Dataset<Row>>> planned) {
  KuduContext kc = new KuduContext(
      config.getString(CONNECTION_CONFIG_NAME), Contexts.getSparkSession().sparkContext());

  String tableName = config.getString(TABLE_CONFIG_NAME);

  Set<String> kuduColumns = null;
  if (KuduUtils.ignoreMissingColumns(config)) {
      try {
        KuduTable table = getConnection().getTable(tableName);
        kuduColumns = Sets.newHashSetWithExpectedSize(table.getSchema().getColumns().size());
        for (int i = 0; i < table.getSchema().getColumns().size(); i++) {
          ColumnSchema columnSchema = table.getSchema().getColumns().get(i);
          kuduColumns.add(columnSchema.getName());
        }
      } catch (Exception e) {
        throw new RuntimeException(e);
      }
  }

  for (Tuple2<MutationType, Dataset<Row>> plan : planned) {
    MutationType mutationType = plan._1();
    Dataset<Row> mutation = plan._2();

    if (KuduUtils.ignoreMissingColumns(config) && kuduColumns != null) {
      Set<String> mutationFields = Sets.newHashSet(mutation.schema().fieldNames());
      for (String col : Sets.difference(mutationFields, kuduColumns)) {
        mutation = mutation.drop(col);
      }
    }

    KuduWriteOptions kuduWriteOptions = new KuduWriteOptions(
        KuduUtils.doesInsertIgnoreDuplicates(config),
        false
    );

    switch (mutationType) {
      case DELETE:
        kc.deleteRows(mutation, tableName, kuduWriteOptions);
        break;
      case INSERT:
        kc.insertRows(mutation, tableName, kuduWriteOptions);
        break;
      case UPDATE:
        kc.updateRows(mutation, tableName, kuduWriteOptions);
        break;
      case UPSERT:
        kc.upsertRows(mutation, tableName, kuduWriteOptions);
        break;
      default:
        throw new RuntimeException("Kudu bulk output does not support mutation type: " + mutationType);
    }
  }
}
 
源代码20 项目: coming   文件: Closure_130_CollapseProperties_s.java
private boolean inlineAliasIfPossible(Ref alias, GlobalNamespace namespace) {
  // Ensure that the alias is assigned to a local variable at that
  // variable's declaration. If the alias's parent is a NAME,
  // then the NAME must be the child of a VAR node, and we must
  // be in a VAR assignment.
  Node aliasParent = alias.node.getParent();
  if (aliasParent.isName()) {
    // Ensure that the local variable is well defined and never reassigned.
    Scope scope = alias.scope;
    Var aliasVar = scope.getVar(aliasParent.getString());
    ReferenceCollectingCallback collector =
        new ReferenceCollectingCallback(compiler,
            ReferenceCollectingCallback.DO_NOTHING_BEHAVIOR,
            Predicates.<Var>equalTo(aliasVar));
    (new NodeTraversal(compiler, collector)).traverseAtScope(scope);

    ReferenceCollection aliasRefs = collector.getReferences(aliasVar);
    if (aliasRefs.isWellDefined()
        && aliasRefs.firstReferenceIsAssigningDeclaration()
        && aliasRefs.isAssignedOnceInLifetime()) {
      // The alias is well-formed, so do the inlining now.
      int size = aliasRefs.references.size();
      Set<Node> newNodes = Sets.newHashSetWithExpectedSize(size - 1);
      for (int i = 1; i < size; i++) {
        ReferenceCollectingCallback.Reference aliasRef =
            aliasRefs.references.get(i);

        Node newNode = alias.node.cloneTree();
        aliasRef.getParent().replaceChild(aliasRef.getNode(), newNode);
        newNodes.add(newNode);
      }

      // just set the original alias to null.
      aliasParent.replaceChild(alias.node, IR.nullNode());
      compiler.reportCodeChange();

      // Inlining the variable may have introduced new references
      // to descendants of {@code name}. So those need to be collected now.
      namespace.scanNewNodes(alias.scope, newNodes);
      return true;
    }
  }

  return false;
}