android.graphics.Paint.FontMetrics#com.google.common.collect.EvictingQueue源码实例Demo

下面列出了android.graphics.Paint.FontMetrics#com.google.common.collect.EvictingQueue 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: aliyun-log-java-producer   文件: ProducerBatch.java
public ProducerBatch(
    GroupKey groupKey,
    String packageId,
    int batchSizeThresholdInBytes,
    int batchCountThreshold,
    int maxReservedAttempts,
    long nowMs) {
  this.groupKey = groupKey;
  this.packageId = packageId;
  this.createdMs = nowMs;
  this.batchSizeThresholdInBytes = batchSizeThresholdInBytes;
  this.batchCountThreshold = batchCountThreshold;
  this.curBatchCount = 0;
  this.curBatchSizeInBytes = 0;
  this.reservedAttempts = EvictingQueue.create(maxReservedAttempts);
  this.attemptCount = 0;
}
 
源代码2 项目: ThriftJ   文件: FailoverStrategy.java
public void fail(T object) {
	logger.info("Server {}:{} failed.", ((ThriftServer)object).getHost(),((ThriftServer)object).getPort());
	boolean addToFail = false;
	try {
		EvictingQueue<Long> evictingQueue = failCountMap.get(object);
		synchronized (evictingQueue) {
			evictingQueue.add(System.currentTimeMillis());
			if (evictingQueue.remainingCapacity() == 0 && evictingQueue.element() >= (System.currentTimeMillis() - failDuration)) {
				addToFail = true;
			}
		}
	} catch (ExecutionException e) {
		logger.error("Ops.", e);
	}
	if (addToFail) {
		failedList.put(object, Boolean.TRUE);
		logger.info("Server {}:{} failed. Add to fail list.", ((ThriftServer)object).getHost(), ((ThriftServer)object).getPort());
	}
}
 
private static void getSamplesFilteredByLatency(
    long latencyLowerNs,
    long latencyUpperNs,
    int maxSpansToReturn,
    List<RecordEventsSpanImpl> output,
    EvictingQueue<RecordEventsSpanImpl> queue) {
  for (RecordEventsSpanImpl span : queue) {
    if (output.size() >= maxSpansToReturn) {
      break;
    }
    long spanLatencyNs = span.getLatencyNs();
    if (spanLatencyNs >= latencyLowerNs && spanLatencyNs < latencyUpperNs) {
      output.add(span);
    }
  }
}
 
源代码4 项目: java-timeseries   文件: ArimaProcess.java
private ArimaProcess(Builder builder) {
    this.coefficients = builder.coefficients;
    this.distribution = builder.distribution;
    this.period = builder.period;
    this.seasonalCycle = builder.seasonalCycle;
    this.startTime = builder.startTime;
    this.currentTime = startTime;
    int seasonalFrequency = (int) builder.period.frequencyPer(builder.seasonalCycle);
    double[] arSarCoeffs = ArimaCoefficients.expandArCoefficients(coefficients.arCoeffs(),
                                                                  coefficients.seasonalARCoeffs(),

                                                                  seasonalFrequency);
    double[] maSmaCoeffs = ArimaCoefficients.expandMaCoefficients(coefficients.maCoeffs(),
                                                                  coefficients.seasonalMACoeffs(),
                                                                  seasonalFrequency);
    this.errors = EvictingQueue.create(maSmaCoeffs.length);
    this.diffSeries = EvictingQueue.create(arSarCoeffs.length);
    this.series = EvictingQueue.create(coefficients.d() + coefficients.D() * seasonalFrequency);
    this.maPoly = LagPolynomial.movingAverage(maSmaCoeffs);
    this.arPoly = LagPolynomial.autoRegressive(arSarCoeffs);
    this.diffPoly = LagPolynomial.differences(coefficients.d())
                                 .times(LagPolynomial.seasonalDifferences(seasonalFrequency, coefficients.D()));
}
 
源代码5 项目: LagMonitor   文件: PaperTimingsCommand.java
@Override
protected void sendTimings(CommandSender sender) {
    EvictingQueue<TimingHistory> history = Reflection.getField(TimingsManager.class, "HISTORY", EvictingQueue.class)
            .get(null);

    TimingHistory lastHistory = history.peek();
    if (lastHistory == null) {
        sendError(sender, "Not enough data collected yet");
        return;
    }

    List<BaseComponent[]> lines = new ArrayList<>();
    printTimings(lines, lastHistory);

    Pages pagination = new Pages("Paper Timings", lines);
    pagination.send(sender);

    plugin.getPageManager().setPagination(sender.getName(), pagination);
}
 
源代码6 项目: datacollector   文件: ProductionPipelineRunner.java
private void retainErrorMessagesInMemory(Map<String, List<ErrorMessage>> errorMessages) {
  // Shortcut to avoid synchronization
  if(errorMessages.isEmpty()) {
    return;
  }

  synchronized (stageToErrorMessagesMap) {
    for (Map.Entry<String, List<ErrorMessage>> e : errorMessages.entrySet()) {
      EvictingQueue<ErrorMessage> errorMessageList = stageToErrorMessagesMap.computeIfAbsent(e.getKey(),
          k -> EvictingQueue.create(configuration.get(Constants.MAX_PIPELINE_ERRORS_KEY,
              Constants.MAX_PIPELINE_ERRORS_DEFAULT
          ))
      );
      errorMessageList.addAll(errorMessages.get(e.getKey()));
    }
  }
}
 
源代码7 项目: datacollector   文件: ProductionPipelineRunner.java
private void retainErrorRecordsInMemory(Map<String, List<Record>> errorRecords) {
  // Shortcut to avoid synchronization
  if(errorRecords.isEmpty()) {
    return;
  }

  synchronized (stageToErrorRecordsMap) {
    for (Map.Entry<String, List<Record>> e : errorRecords.entrySet()) {
      EvictingQueue<Record> errorRecordList = stageToErrorRecordsMap.computeIfAbsent(e.getKey(),
          k -> EvictingQueue.create(configuration.get(Constants.MAX_ERROR_RECORDS_PER_STAGE_KEY,
              Constants.MAX_ERROR_RECORDS_PER_STAGE_DEFAULT
          ))
      );
      // replace with a data structure with an upper cap
      errorRecordList.addAll(errorRecords.get(e.getKey()));
    }
  }
}
 
源代码8 项目: LagMonitor   文件: PaperTimingsCommand.java
@Override
protected void sendTimings(CommandSender sender) {
    EvictingQueue<TimingHistory> history = Reflection.getField(TimingsManager.class, "HISTORY", EvictingQueue.class)
            .get(null);

    TimingHistory lastHistory = history.peek();
    if (lastHistory == null) {
        sendError(sender, "Not enough data collected yet");
        return;
    }

    List<BaseComponent[]> lines = new ArrayList<>();
    printTimings(lines, lastHistory);

    Pages pagination = new Pages("Paper Timings", lines);
    pagination.send(sender);

    plugin.getPageManager().setPagination(sender.getName(), pagination);
}
 
/**
 * <p>
 * fail.
 * </p>
 *
 * @param object a T object.
 */
public void fail(T object) {
    logger.trace("server {} failed.", object);
    boolean addToFail = false;
    try {
        EvictingQueue<Long> evictingQueue = failCountMap.get(object);
        synchronized (evictingQueue) {
            evictingQueue.add(System.currentTimeMillis());
            if (evictingQueue.remainingCapacity() == 0
                    && evictingQueue.element() >= System.currentTimeMillis() - failDuration) {
                addToFail = true;
            }
        }
    } catch (ExecutionException e) {
        logger.error("Ops.", e);
    }
    if (addToFail) {
        failedList.put(object, TRUE);
        logger.trace("server {} failed. add to fail list.", object);
    }
}
 
public KafkaIngestionHealthCheck(Config config, KafkaExtractorStatsTracker statsTracker) {
  this.config = config;
  this.slidingWindowSize = ConfigUtils.getInt(config, KAFKA_INGESTION_HEALTH_CHECK_SLIDING_WINDOW_SIZE_KEY, DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_SLIDING_WINDOW_SIZE);
  this.ingestionLatencyThresholdMinutes = ConfigUtils.getLong(config, KAFKA_INGESTION_HEALTH_CHECK_LATENCY_THRESHOLD_MINUTES_KEY, DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_LATENCY_THRESHOLD_MINUTES);
  this.consumptionRateDropOffFraction = ConfigUtils.getDouble(config, KAFKA_INGESTION_HEALTH_CHECK_CONSUMPTION_RATE_DROPOFF_FRACTION_KEY, DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_CONSUMPTION_RATE_DROPOFF_FRACTION);
  this.expectedConsumptionRate = ConfigUtils.getDouble(config, KAFKA_INGESTION_HEALTH_CHECK_EXPECTED_CONSUMPTION_RATE_MBPS_KEY, DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_EXPECTED_CONSUMPTION_RATE_MBPS);
  this.increasingLatencyCheckEnabled = ConfigUtils.getBoolean(config, KAFKA_INGESTION_HEALTH_CHECK_INCREASING_LATENCY_CHECK_ENABLED_KEY, DEFAULT_KAFKA_INGESTION_HEALTH_CHECK_INCREASING_LATENCY_CHECK_ENABLED);
  this.ingestionLatencies = EvictingQueue.create(this.slidingWindowSize);
  this.consumptionRateMBps = EvictingQueue.create(this.slidingWindowSize);
  EventBus eventBus;
  try {
    eventBus = EventBusFactory.get(ContainerHealthCheckFailureEvent.CONTAINER_HEALTH_CHECK_EVENT_BUS_NAME,
        SharedResourcesBrokerFactory.getImplicitBroker());
  } catch (IOException e) {
    log.error("Could not find EventBus instance for container health check", e);
    eventBus = null;
  }
  this.eventBus = eventBus;
  this.statsTracker = statsTracker;
}
 
源代码11 项目: tutorials   文件: EvictingQueueUnitTest.java
@Test
public void givenEvictingQueue_whenAddElementToFull_thenShouldEvictOldestItem() {
    //given
    Queue<Integer> evictingQueue = EvictingQueue.create(10);

    //when
    IntStream.range(0, 10).forEach(evictingQueue::add);

    //then
    assertThat(evictingQueue).containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);

    //and
    evictingQueue.add(100);

    //then
    assertThat(evictingQueue).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9, 100);
}
 
源代码12 项目: plugins   文件: ChatHistoryPlugin.java
@Override
protected void startUp()
{
	messageQueue = EvictingQueue.create(100);
	friends = new ArrayDeque<>(FRIENDS_MAX_SIZE + 1);
	keyManager.registerKeyListener(this);
}
 
源代码13 项目: besu   文件: PendingTransactions.java
public PendingTransactions(
    final int maxTransactionRetentionHours,
    final int maxPendingTransactions,
    final int maxPooledTransactionHashes,
    final Clock clock,
    final MetricsSystem metricsSystem,
    final Supplier<BlockHeader> chainHeadHeaderSupplier,
    final Optional<EIP1559> eip1559,
    final Percentage priceBump) {
  this.maxTransactionRetentionHours = maxTransactionRetentionHours;
  this.maxPendingTransactions = maxPendingTransactions;
  this.clock = clock;
  this.newPooledHashes = EvictingQueue.create(maxPooledTransactionHashes);
  this.chainHeadHeaderSupplier = chainHeadHeaderSupplier;
  this.transactionReplacementHandler = new TransactionPoolReplacementHandler(eip1559, priceBump);
  final LabelledMetric<Counter> transactionAddedCounter =
      metricsSystem.createLabelledCounter(
          BesuMetricCategory.TRANSACTION_POOL,
          "transactions_added_total",
          "Count of transactions added to the transaction pool",
          "source");
  localTransactionAddedCounter = transactionAddedCounter.labels("local");
  remoteTransactionAddedCounter = transactionAddedCounter.labels("remote");
  localTransactionHashesAddedCounter = transactionAddedCounter.labels("pool");

  transactionRemovedCounter =
      metricsSystem.createLabelledCounter(
          BesuMetricCategory.TRANSACTION_POOL,
          "transactions_removed_total",
          "Count of transactions removed from the transaction pool",
          "source",
          "operation");
}
 
源代码14 项目: presto   文件: JmxHistoricalData.java
public JmxHistoricalData(int maxEntries, Set<String> tableNames, MBeanServer mbeanServer)
{
    tables = tableNames.stream()
            .map(objectNamePattern -> toPattern(objectNamePattern.toLowerCase(ENGLISH)))
            .flatMap(objectNamePattern -> mbeanServer.queryNames(WILDCARD, null).stream()
                    .map(objectName -> objectName.getCanonicalName().toLowerCase(ENGLISH))
                    .filter(name -> name.matches(objectNamePattern)))
            .collect(toImmutableSet());

    for (String tableName : tables) {
        tableData.put(tableName, EvictingQueue.create(maxEntries));
    }
}
 
private RecordEventsReadableSpan(
    SpanContext context,
    String name,
    InstrumentationLibraryInfo instrumentationLibraryInfo,
    Kind kind,
    SpanId parentSpanId,
    boolean hasRemoteParent,
    TraceConfig traceConfig,
    SpanProcessor spanProcessor,
    Clock clock,
    Resource resource,
    @Nullable AttributesMap attributes,
    List<io.opentelemetry.trace.Link> links,
    int totalRecordedLinks,
    long startEpochNanos) {
  this.context = context;
  this.instrumentationLibraryInfo = instrumentationLibraryInfo;
  this.parentSpanId = parentSpanId;
  this.hasRemoteParent = hasRemoteParent;
  this.links = links;
  this.totalRecordedLinks = totalRecordedLinks;
  this.name = name;
  this.kind = kind;
  this.spanProcessor = spanProcessor;
  this.resource = resource;
  this.hasEnded = false;
  this.clock = clock;
  this.startEpochNanos = startEpochNanos;
  this.attributes = attributes;
  this.events = EvictingQueue.create(traceConfig.getMaxNumberOfEvents());
  this.traceConfig = traceConfig;
}
 
源代码16 项目: adaptive-alerting   文件: SmaPointForecaster.java
public SmaPointForecaster(SmaPointForecasterParams params) {
    notNull(params, "params can't be null");
    params.validate();
    this.params = params;
    this.periodOfValues = EvictingQueue.create(params.getLookBackPeriod());

    if (params.getInitialPeriodOfValues() != null) {
        params.getInitialPeriodOfValues().forEach(this::updateMeanEstimate);
    }
}
 
源代码17 项目: adaptive-alerting   文件: EdmxDetector.java
public EdmxDetector(UUID uuid, EdmxHyperparams hyperparams, boolean trusted) {
    notNull(uuid, "uuid can't be null");
    notNull(hyperparams, "hyperparams can't be null");
    hyperparams.validate();

    log.info("Creating EdmxDetector: uuid={}, hyperparams={}", uuid, hyperparams);
    this.uuid = uuid;
    this.hyperparams = hyperparams;
    this.buffer = EvictingQueue.create(hyperparams.getBufferSize());
    this.trusted = trusted;
}
 
void add(JobExecution jobExecution) {
    lock.writeLock().lock();
    try {
        jobExecutionsByExitCode.computeIfAbsent(jobExecution.getExitStatus().getExitCode(),
                (exitCode) -> EvictingQueue.create(cacheSize)).add(jobExecution);
        jobExecutions.add(jobExecution);
    } finally {
        lock.writeLock().unlock();
    }
}
 
源代码19 项目: ForgeHax   文件: ChunkLogger.java
@Override
protected void onEnabled() {
  chunkLock.lock();
  try {
    if (max_chunks.get() <= 0) {
      chunks = Queues.newArrayDeque();
    } else {
      chunks = EvictingQueue.create(max_chunks.get());
    }
  } finally {
    chunkLock.unlock();
  }
}
 
源代码20 项目: Elasticsearch   文件: SimulatedAnealingMinimizer.java
/**
 * Runs the simulated annealing algorithm and produces a model with new coefficients that, theoretically
 * fit the data better and generalizes to future forecasts without overfitting.
 *
 * @param model         The MovAvgModel to be optimized for
 * @param train         A training set provided to the model, which predictions will be
 *                      generated from
 * @param test          A test set of data to compare the predictions against and derive
 *                      a cost for the model
 * @return              A new, minimized model that (theoretically) better fits the data
 */
public static MovAvgModel minimize(MovAvgModel model, EvictingQueue<Double> train, double[] test) {

    double temp = 1;
    double minTemp = 0.0001;
    int iterations = 100;
    double alpha = 0.9;

    MovAvgModel bestModel = model;
    MovAvgModel oldModel = model;

    double oldCost = cost(model, train, test);
    double bestCost = oldCost;

    while (temp > minTemp) {
        for (int i = 0; i < iterations; i++) {
            MovAvgModel newModel = oldModel.neighboringModel();
            double newCost = cost(newModel, train, test);

            double ap = acceptanceProbability(oldCost, newCost, temp);
            if (ap > Math.random()) {
                oldModel = newModel;
                oldCost = newCost;

                if (newCost < bestCost) {
                    bestCost = newCost;
                    bestModel = newModel;
                }
            }
        }

        temp *= alpha;
    }

    return bestModel;
}
 
源代码21 项目: Elasticsearch   文件: SimulatedAnealingMinimizer.java
/**
 * Calculates the "cost" of a model.  E.g. when run on the training data, how closely do the  predictions
 * match the test data
 *
 * Uses Least Absolute Differences to calculate error.  Note that this is not scale free, but seems
 * to work fairly well in practice
 *
 * @param model     The MovAvgModel we are fitting
 * @param train     A training set of data given to the model, which will then generate predictions from
 * @param test      A test set of data to compare against the predictions
 * @return          A cost, or error, of the model
 */
private static double cost(MovAvgModel model, EvictingQueue<Double> train, double[] test) {
    double error = 0;
    double[] predictions = model.predict(train, test.length);

    assert(predictions.length == test.length);

    for (int i = 0; i < predictions.length; i++) {
        error += Math.abs(test[i] - predictions[i]) ;
    }

    return error;
}
 
源代码22 项目: ThriftJ   文件: FailoverStrategy.java
/**
 * 自定义 failover 策略
 * @param failCount 失败次数
 * @param failDuration 失效持续时间
 * @param recoverDuration 恢复持续时间
 */
public FailoverStrategy(final int failCount, long failDuration, long recoverDuration) {
	this.failDuration = failDuration;
	this.failedList = CacheBuilder.newBuilder().weakKeys().expireAfterWrite(recoverDuration, TimeUnit.MILLISECONDS).build();
	this.failCountMap = CacheBuilder.newBuilder().weakKeys().build(new CacheLoader<T, EvictingQueue<Long>>() {
		@Override
		public EvictingQueue<Long> load(T key) throws Exception {
			return EvictingQueue.create(failCount);
		}
	});
}
 
private static void getSamples(
    int maxSpansToReturn,
    List<RecordEventsSpanImpl> output,
    EvictingQueue<RecordEventsSpanImpl> queue) {
  for (RecordEventsSpanImpl span : queue) {
    if (output.size() >= maxSpansToReturn) {
      break;
    }
    output.add(span);
  }
}
 
源代码24 项目: runelite   文件: ChatHistoryPlugin.java
@Override
protected void startUp()
{
	messageQueue = EvictingQueue.create(100);
	friends = new ArrayDeque<>(FRIENDS_MAX_SIZE + 1);
	keyManager.registerKeyListener(this);
}
 
源代码25 项目: dcos-commons   文件: OfferOutcomeTrackerV2.java
public OfferOutcomeSummary() {
  this.acceptedCount = 0;
  this.rejectedCount = 0;
  this.outcomes = EvictingQueue.create(DEFAULT_CAPACITY);
  this.failureReasons = new HashMap<>();
  this.rejectedAgents = new HashMap<>();
}
 
源代码26 项目: bboxdb   文件: StatisticsHelper.java
/**
 * Update the statistics 
 * 
 * @param region
 * @param regionSize
 */
public static void updateStatisticsHistory(final String regionIdentifier, final double regionSize) {
	
	synchronized (statisticsHistory) {
		statisticsHistory
			.computeIfAbsent(regionIdentifier, (e) -> EvictingQueue.create(HISTORY_LENGTH))
			.add(regionSize);
	}

}
 
源代码27 项目: che   文件: MessagesReSender.java
public void resend(String endpointId) {
  Queue<DelayedMessage> delayedMessages = delayedMessageRegistry.remove(endpointId);

  if (delayedMessages == null || delayedMessages.isEmpty()) {
    return;
  }

  Optional<Session> sessionOptional = registry.get(endpointId);

  if (!sessionOptional.isPresent()) {
    return;
  }

  Queue<DelayedMessage> backingQueue = EvictingQueue.create(delayedMessages.size());
  while (!delayedMessages.isEmpty()) {
    backingQueue.offer(delayedMessages.poll());
  }

  Session session = sessionOptional.get();
  for (DelayedMessage delayedMessage : backingQueue) {
    if (session.isOpen()) {
      session.getAsyncRemote().sendText(delayedMessage.message);
    } else {
      delayedMessages.add(delayedMessage);
    }
  }

  if (!delayedMessages.isEmpty()) {
    delayedMessageRegistry.put(endpointId, delayedMessages);
  }
}
 
public BoostVHTActiveLearningNode(double[] classObservation, int parallelism_hint, SplittingOption splitOption, int maxBufferSize) {
  super(classObservation, parallelism_hint);
  weightSeenAtLastSplitEvaluation = this.getWeightSeen();
  id = VerticalHoeffdingTree.LearningNodeIdGenerator.generate();
  attributeContentEventKeys = new HashMap<>();
  isSplitting = false;
  parallelismHint = parallelism_hint;
  this.splittingOption = splitOption;
  this.maxBufferSize = maxBufferSize;
  this.buffer = EvictingQueue.create(maxBufferSize);
}
 
源代码29 项目: datacollector   文件: SystemProcessImpl.java
public SimpleFileTailer(File file) {
  this.file = file;
  this.history = EvictingQueue.create(2500);
  this.inbuf = new byte[8192 * 8];
  try {
    this.randomAccessFile = new RandomAccessFile(file, "r");
  } catch (FileNotFoundException e) {
    throw new RuntimeException(Utils.format("Unexpected error reading output file '{}': {}", file, e), e);
  }
}
 
源代码30 项目: datacollector   文件: AggregatorDataProvider.java
/**
 * Creates an AggregatorDataProvider for a family of Aggregators that will close data windows together (atomically)
 *
 * @param windowsToKeep number of data windows to keep in memory, including the live one.
 */
public AggregatorDataProvider(int windowsToKeep, WindowType windowType) {
  Utils.checkArgument(windowsToKeep > 0, "windows to keep must be greater than zero");
  aggregators = new HashSet<>();
  dataWindowQueue = EvictingQueue.create(windowsToKeep);
  dataWindowList = Collections.emptyList();
  this.windowType = windowType;
}