com.google.common.collect.Lists#newLinkedList ( )源码实例Demo

下面列出了com.google.common.collect.Lists#newLinkedList ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: rya   文件: PCJOptimizerBenchmark.java
private static String buildChainedSPARQL(final List<String> vars) {
    final Queue<String> varQueue= Lists.newLinkedList(vars);
    final List<String> statementPatterns = new ArrayList<>();

    // Create the first SP.
    final String var1 = varQueue.remove();
    final String var2 = varQueue.remove();
    statementPatterns.add( var1 + " <urn:predicate> " + var2);

    // Chain the rest of the SPs off of each other.
    String lastVar = var2;

    while(!varQueue.isEmpty()) {
        final String var = varQueue.remove();
        statementPatterns.add( lastVar + " <urn:predicate> " + var);
        lastVar = var;
    }

    // Build the SPARQL query from the pieces.
    return "select " + Joiner.on(" ").join(vars) + " where { " +
    Joiner.on(" . ").join(statementPatterns) +
    " . }" ;
}
 
源代码2 项目: joyqueue   文件: ProduceRequestHandler.java
protected void splitByPartitionGroup(TopicConfig topicConfig, TopicName topic, Producer producer, byte[] clientAddress, Traffic traffic,
                            ProduceRequest.PartitionRequest partitionRequest, Map<Integer, ProducePartitionGroupRequest> partitionGroupRequestMap) {
    PartitionGroup partitionGroup = topicConfig.fetchPartitionGroupByPartition((short) partitionRequest.getPartition());
    ProducePartitionGroupRequest producePartitionGroupRequest = partitionGroupRequestMap.get(partitionGroup.getGroup());

    if (producePartitionGroupRequest == null) {
        producePartitionGroupRequest = new ProducePartitionGroupRequest(Lists.newLinkedList(), Lists.newLinkedList(),
                Lists.newLinkedList(), Maps.newHashMap(), Maps.newHashMap());
        partitionGroupRequestMap.put(partitionGroup.getGroup(), producePartitionGroupRequest);
    }

    List<BrokerMessage> brokerMessages = Lists.newLinkedList();
    for (KafkaBrokerMessage message : partitionRequest.getMessages()) {
        BrokerMessage brokerMessage = KafkaMessageConverter.toBrokerMessage(producer.getTopic(), partitionRequest.getPartition(), producer.getApp(), clientAddress, message);
        brokerMessages.add(brokerMessage);
    }

    traffic.record(topic.getFullName(), partitionRequest.getTraffic(), partitionRequest.getSize());
    producePartitionGroupRequest.getPartitions().add(partitionRequest.getPartition());
    producePartitionGroupRequest.getMessages().addAll(brokerMessages);
    producePartitionGroupRequest.getMessageMap().put(partitionRequest.getPartition(), brokerMessages);
    producePartitionGroupRequest.getKafkaMessages().addAll(partitionRequest.getMessages());
    producePartitionGroupRequest.getKafkaMessageMap().put(partitionRequest.getPartition(), partitionRequest.getMessages());
}
 
源代码3 项目: vscrawler   文件: JedisBlockingQueueStore.java
@Override
public List<ResourceItem> queryAll(String queueID) {
    lockQueue(queueID);
    Jedis jedis = jedisPool.getResource();
    try {
        final Map<String, String> map = jedis.hgetAll(makeDataKey(queueID));
        return Lists.newLinkedList(Iterables.transform(jedis.zrange(makePoolQueueKey(queueID), 0, -1), new Function<String, ResourceItem>() {
            @Override
            public ResourceItem apply(String input) {
                return JSONObject.toJavaObject(JSONObject.parseObject(map.get(input)), ResourceItem.class);
            }
        }));
    } finally {
        IOUtils.closeQuietly(jedis);
        unLockQueue(queueID);
    }
}
 
/**
 *
 */
@Test
public void testLinkedListModification() {
    GridBinaryTestClasses.TestObjectContainer obj = new GridBinaryTestClasses.TestObjectContainer();

    obj.foo = Lists.newLinkedList(Arrays.asList("a", "b", "c"));

    BinaryObjectBuilderImpl mutObj = wrap(obj);

    List<String> list = mutObj.getField("foo");

    list.add("!"); // "a", "b", "c", "!"
    list.add(0, "_"); // "_", "a", "b", "c", "!"

    String s = list.remove(1); // "_", "b", "c", "!"
    assertEquals("a", s);

    assertEquals(Arrays.asList("c", "!"), list.subList(2, 4));
    assertEquals(1, list.indexOf("b"));
    assertEquals(1, list.lastIndexOf("b"));

    GridBinaryTestClasses.TestObjectContainer res = mutObj.build().deserialize();

    assertTrue(res.foo instanceof LinkedList);
    assertEquals(Arrays.asList("_", "b", "c", "!"), res.foo);
}
 
源代码5 项目: morf   文件: UpgradePath.java
/**
 * @return the sql
 */
public List<String> getSql() {
  List<String> results = Lists.newLinkedList();
  if (!sql.isEmpty() || !upgradeScriptAdditions.isEmpty())
    results.addAll(initialisationSql);

  results.addAll(sql);

  for (UpgradeScriptAddition addition : upgradeScriptAdditions) {
    Iterables.addAll(results, addition.sql());
  }

  if (!results.isEmpty())
    results.addAll(finalisationSql);

  return Collections.unmodifiableList(results);
}
 
源代码6 项目: kylin-on-parquet-v2   文件: JobService.java
private void cancelCheckpointJobInner(CheckpointExecutable checkpointExecutable) throws IOException {
    List<String> segmentIdList = Lists.newLinkedList();
    List<String> jobIdList = Lists.newLinkedList();
    jobIdList.add(checkpointExecutable.getId());
    setRelatedIdList(checkpointExecutable, segmentIdList, jobIdList);

    CubeInstance cubeInstance = getCubeManager()
            .getCube(CubingExecutableUtil.getCubeName(checkpointExecutable.getParams()));
    if (!segmentIdList.isEmpty()) {
        List<CubeSegment> toRemoveSegments = Lists.newLinkedList();
        for (String segmentId : segmentIdList) {
            final CubeSegment segment = cubeInstance.getSegmentById(segmentId);
            if (segment != null && segment.getStatus() != SegmentStatusEnum.READY) {
                toRemoveSegments.add(segment);
            }
        }

        getCubeManager().dropOptmizingSegments(cubeInstance, toRemoveSegments.toArray(new CubeSegment[] {}));
    }

    for (String jobId : jobIdList) {
        getExecutableManager().discardJob(jobId);
    }
}
 
源代码7 项目: aion-germany   文件: SiegeRaceCounter.java
protected <K> Map<K, Long> getOrderedCounterMap(Map<K, AtomicLong> unorderedMap) {
	if (GenericValidator.isBlankOrNull(unorderedMap)) {
		return Collections.emptyMap();
	}

	LinkedList<Map.Entry<K, AtomicLong>> tempList = Lists.newLinkedList(unorderedMap.entrySet());
	Collections.sort(tempList, new Comparator<Map.Entry<K, AtomicLong>>() {

		@Override
		public int compare(Map.Entry<K, AtomicLong> o1, Map.Entry<K, AtomicLong> o2) {
			return new Long(o2.getValue().get()).compareTo(o1.getValue().get());
		}
	});

	Map<K, Long> result = Maps.newLinkedHashMap();
	for (Map.Entry<K, AtomicLong> entry : tempList) {
		if (entry.getValue().get() > 0) {
			result.put(entry.getKey(), entry.getValue().get());
		}
	}
	return result;
}
 
源代码8 项目: pinlater   文件: PinLaterBackendUtils.java
/**
 * Merges multiple iterables into a list using the comparing logic provided by the comparator.
 * The returned list will only include the first n merged items, where n is the limit specified.
 *
 * @param iterablesToMerge  The iterables to be merged.
 * @param comparator        Comparator specifying the comparison logic between the iterables.
 * @param limit             Max number of results that will be returned.
 * @param <T>               Iterable item type.
 * @param <S>               Comparator between iterate items type.
 * @return List of the first n merged results.
 */
public static <T, S extends Comparator<T>> List<T> mergeIntoList(
    Iterable<? extends Iterable<T>> iterablesToMerge,
    S comparator,
    int limit) {
  // Perform a k-way merge on the collections and return the result in an ArrayList.
  List<T> mergedCols = Lists.newLinkedList();
  Iterator<T> mergeIterator = Iterables.mergeSorted(iterablesToMerge, comparator).iterator();
  while (mergeIterator.hasNext() && mergedCols.size() < limit) {
    mergedCols.add(mergeIterator.next());
  }
  return mergedCols;
}
 
源代码9 项目: jweb-cms   文件: App.java
private List<String> orderedModules() {
    if (orderedModules == null) {
        Graph<String> graph = createGraph();
        Deque<String> readyModules = Lists.newLinkedList();
        for (String node : graph.nodes()) {
            if (graph.predecessors(node).isEmpty()) {
                readyModules.push(node);
            }
        }
        Set<String> visited = Sets.newLinkedHashSet();
        while (!readyModules.isEmpty()) {
            String moduleName = readyModules.pollFirst();
            visited.add(moduleName);

            Set<String> successors = graph.successors(moduleName);
            for (String successor : successors) {
                ModuleNode moduleNode = moduleNode(successor).orElseThrow();
                boolean ready = true;
                for (String dependency : moduleNode.module.dependencies()) {
                    if (isInstalled(dependency) && !visited.contains(dependency)) {
                        ready = false;
                        break;
                    }
                }
                if (ready && !visited.contains(successor)) {
                    readyModules.add(successor);
                }
            }
        }
        orderedModules = ImmutableList.copyOf(visited);
    }
    return orderedModules;
}
 
源代码10 项目: attic-aurora   文件: SlaAlgorithm.java
@Override
public Number calculate(Iterable<IScheduledTask> tasks, Range<Long> timeFrame) {
  Iterable<IScheduledTask> activeTasks = FluentIterable.from(tasks)
      .filter(
          Predicates.compose(Predicates.in(Tasks.ACTIVE_STATES), IScheduledTask::getStatus));

  List<Long> waitTimes = Lists.newLinkedList();
  for (IScheduledTask task : activeTasks) {
    long pendingTs = 0;
    for (ITaskEvent event : task.getTaskEvents()) {
      if (event.getStatus() == PENDING) {
        pendingTs = event.getTimestamp();
      } else if (event.getStatus() == status && timeFrame.contains(event.getTimestamp())) {

        if (pendingTs == 0) {
          throw new IllegalArgumentException("SLA: missing PENDING status for:"
              + task.getAssignedTask().getTaskId());
        }

        waitTimes.add(event.getTimestamp() - pendingTs);
        break;
      }
    }
  }

  return SlaUtil.percentile(waitTimes, 50.0);
}
 
源代码11 项目: joyqueue   文件: DefaultPartitionMonitorService.java
@Override
public List<PartitionGroupMonitorInfo> getPartitionGroupInfosByTopicAndApp(String topic, String app) {
    AppStat appStat = brokerStat.getOrCreateTopicStat(topic).getOrCreateAppStat(app);
    List<PartitionGroupMonitorInfo> result = Lists.newLinkedList();
    for (Map.Entry<Integer, PartitionGroupStat> entry : appStat.getPartitionGroupStatMap().entrySet()) {
        PartitionGroupMonitorInfo partitionGroupMonitorInfo = convertPartitionGroupMonitorInfo(entry.getValue());
        result.add(partitionGroupMonitorInfo);
    }
    return result;
}
 
源代码12 项目: ns4_gear_watchdog   文件: TailFile.java
public List<Event> readEvents(int numEvents, boolean backoffWithoutNL,
                              boolean addByteOffset) throws IOException {
    List<Event> events = Lists.newLinkedList();
    for (int i = 0; i < numEvents; i++) {
        Event event = readEvent(backoffWithoutNL, addByteOffset);
        if (event == null) {
            break;
        }
        events.add(event);
    }
    return events;
}
 
源代码13 项目: joyqueue   文件: ProduceMessageRequestHandler.java
protected ProduceMessageAckData buildResponse(ProduceMessageData produceMessageData, JoyQueueCode code) {
    BrokerMessage firstMessage = produceMessageData.getMessages().get(0);
    List<ProduceMessageAckItemData> item = Lists.newLinkedList();

    // 批量消息处理
    if (firstMessage.isBatch()) {
        item.add(ProduceMessageAckItemData.INVALID_INSTANCE);
    } else {
        for (int i = 0; i < produceMessageData.getMessages().size(); i++) {
            item.add(ProduceMessageAckItemData.INVALID_INSTANCE);
        }
    }
    return new ProduceMessageAckData(item, code);
}
 
源代码14 项目: kylin-on-parquet-v2   文件: SCCreator.java
private List<DataModelDesc> generateKylinModelForSystemCube(String owner, MetricsSinkDesc sinkDesc) {
    List<DataModelDesc> result = Lists.newLinkedList();
    result.add(ModelCreator.generateKylinModelForMetricsQuery(owner, config, sinkDesc));
    result.add(ModelCreator.generateKylinModelForMetricsQueryCube(owner, config, sinkDesc));
    result.add(ModelCreator.generateKylinModelForMetricsQueryRPC(owner, config, sinkDesc));
    result.add(ModelCreator.generateKylinModelForMetricsJob(owner, config, sinkDesc));
    result.add(ModelCreator.generateKylinModelForMetricsJobException(owner, config, sinkDesc));

    return result;
}
 
源代码15 项目: tez   文件: TestDAGAppMaster.java
@Test(timeout = 5000)
public void testParseAllPluginsOnlyCustomSpecified() throws IOException {
  Configuration conf = new Configuration(false);
  conf.set(TEST_KEY, TEST_VAL);
  UserPayload defaultPayload = TezUtils.createUserPayloadFromConf(conf);
  TezUserPayloadProto payloadProto = TezUserPayloadProto.newBuilder()
      .setUserPayload(ByteString.copyFrom(defaultPayload.getPayload())).build();

  AMPluginDescriptorProto proto = createAmPluginDescriptor(false, false, true, payloadProto);

  List<NamedEntityDescriptor> tsDescriptors;
  BiMap<String, Integer> tsMap;
  List<NamedEntityDescriptor> clDescriptors;
  BiMap<String, Integer> clMap;
  List<NamedEntityDescriptor> tcDescriptors;
  BiMap<String, Integer> tcMap;


  // Only plugin, Yarn.
  tsDescriptors = Lists.newLinkedList();
  tsMap = HashBiMap.create();
  clDescriptors = Lists.newLinkedList();
  clMap = HashBiMap.create();
  tcDescriptors = Lists.newLinkedList();
  tcMap = HashBiMap.create();
  DAGAppMaster.parseAllPlugins(tsDescriptors, tsMap, clDescriptors, clMap, tcDescriptors, tcMap,
      proto, false, defaultPayload);
  verifyDescAndMap(tsDescriptors, tsMap, 2, true, TS_NAME,
      TezConstants.getTezYarnServicePluginName());
  verifyDescAndMap(clDescriptors, clMap, 1, true, CL_NAME);
  verifyDescAndMap(tcDescriptors, tcMap, 1, true, TC_NAME);
  assertEquals(TS_NAME + CLASS_SUFFIX, tsDescriptors.get(0).getClassName());
  assertEquals(CL_NAME + CLASS_SUFFIX, clDescriptors.get(0).getClassName());
  assertEquals(TC_NAME + CLASS_SUFFIX, tcDescriptors.get(0).getClassName());
}
 
源代码16 项目: testability-explorer   文件: MethodInfo.java
public MethodInfo copyWithoutInvocation(ClassInfo parent, String invokedClassName,
                                        String invokedMethodName) {
  List<Operation> operationsWithoutInvocation = Lists.newLinkedList();
  for (Operation operation : operations) {
    if (operation instanceof MethodInvocation) {
      if (((MethodInvocation) operation).equals(invokedClassName, invokedMethodName)) {
        continue;
      }
    }
    operationsWithoutInvocation.add(operation);
  }
  return new MethodInfo(parent, name, startingLineNumber, methodThis,
      parameters, localVariables, visibility, operationsWithoutInvocation, isFinal,
      isConstructor, linesOfComplexity);
}
 
源代码17 项目: reactor_simulator   文件: TimeSeriesMonitor.java
public TimeSeriesMonitor(int windowSize) {
  this.windowSize = windowSize;
  this.mean = 0;
  this.window = Lists.newLinkedList();
}
 
源代码18 项目: spoofax   文件: Menu.java
public Menu(String name) {
    this(name, Lists.<IMenuItem>newLinkedList());
}
 
源代码19 项目: helix   文件: WorkflowDispatcher.java
/**
 * Create a new workflow based on an existing one
 * @param manager connection to Helix
 * @param origWorkflowName the name of the existing workflow
 * @param newWorkflowName the name of the new workflow
 * @param newStartTime a provided start time that deviates from the desired start time
 * @return the cloned workflow, or null if there was a problem cloning the existing one
 */
public static Workflow cloneWorkflow(HelixManager manager, String origWorkflowName,
    String newWorkflowName, Date newStartTime) {
  // Read all resources, including the workflow and jobs of interest
  HelixDataAccessor accessor = manager.getHelixDataAccessor();
  PropertyKey.Builder keyBuilder = accessor.keyBuilder();
  Map<String, HelixProperty> resourceConfigMap =
      accessor.getChildValuesMap(keyBuilder.resourceConfigs(), true);
  if (!resourceConfigMap.containsKey(origWorkflowName)) {
    LOG.error("No such workflow named " + origWorkflowName);
    return null;
  }
  if (resourceConfigMap.containsKey(newWorkflowName)) {
    LOG.error("Workflow with name " + newWorkflowName + " already exists!");
    return null;
  }

  // Create a new workflow with a new name
  Map<String, String> workflowConfigsMap =
      resourceConfigMap.get(origWorkflowName).getRecord().getSimpleFields();
  WorkflowConfig.Builder workflowConfigBlder = WorkflowConfig.Builder.fromMap(workflowConfigsMap);

  // Set the schedule, if applicable
  if (newStartTime != null) {
    ScheduleConfig scheduleConfig = ScheduleConfig.oneTimeDelayedStart(newStartTime);
    workflowConfigBlder.setScheduleConfig(scheduleConfig);
  }
  workflowConfigBlder.setTerminable(true);

  WorkflowConfig workflowConfig = workflowConfigBlder.build();

  JobDag jobDag = workflowConfig.getJobDag();
  Map<String, Set<String>> parentsToChildren = jobDag.getParentsToChildren();

  Workflow.Builder workflowBuilder = new Workflow.Builder(newWorkflowName);
  workflowBuilder.setWorkflowConfig(workflowConfig);

  // Add each job back as long as the original exists
  Set<String> namespacedJobs = jobDag.getAllNodes();
  for (String namespacedJob : namespacedJobs) {
    if (resourceConfigMap.containsKey(namespacedJob)) {
      // Copy over job-level and task-level configs
      String job = TaskUtil.getDenamespacedJobName(origWorkflowName, namespacedJob);
      HelixProperty jobConfig = resourceConfigMap.get(namespacedJob);
      Map<String, String> jobSimpleFields = jobConfig.getRecord().getSimpleFields();

      JobConfig.Builder jobCfgBuilder = JobConfig.Builder.fromMap(jobSimpleFields);

      jobCfgBuilder.setWorkflow(newWorkflowName); // overwrite workflow name
      Map<String, Map<String, String>> rawTaskConfigMap = jobConfig.getRecord().getMapFields();
      List<TaskConfig> taskConfigs = Lists.newLinkedList();
      for (Map<String, String> rawTaskConfig : rawTaskConfigMap.values()) {
        TaskConfig taskConfig = TaskConfig.Builder.from(rawTaskConfig);
        taskConfigs.add(taskConfig);
      }
      jobCfgBuilder.addTaskConfigs(taskConfigs);
      workflowBuilder.addJob(job, jobCfgBuilder);

      // Add dag dependencies
      Set<String> children = parentsToChildren.get(namespacedJob);
      if (children != null) {
        for (String namespacedChild : children) {
          String child = TaskUtil.getDenamespacedJobName(origWorkflowName, namespacedChild);
          workflowBuilder.addParentChildDependency(job, child);
        }
      }
    }
  }
  return workflowBuilder.build();
}
 
源代码20 项目: Summer   文件: SessionHandlerGroup.java
public SessionHandlerGroup() {
	sessionHandlerList = Lists.newLinkedList();
}