java.util.NavigableMap#floorEntry ( )源码实例Demo

下面列出了java.util.NavigableMap#floorEntry ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: batfish   文件: FlattenerLineMap.java
/**
 * Return original line number corresponding to the character at the specified position on the
 * specified new/flattened line. If there is no entry covering the specified position, the last
 * original line number associated with this flattened line is returned.
 */
public int getOriginalLine(@Nonnull Integer newLineNumber, @Nonnull Integer newStartingPosition) {
  NavigableMap<Integer, Integer> wordMap = _lineMap.get(newLineNumber);
  if (wordMap == null) {
    /*
     * Result from looking up an unmapped line, this handles lines like the header inserted after
     * flattening
     */
    return UNMAPPED_LINE_NUMBER;
  } else {
    Entry<Integer, Integer> originalLineEntry = wordMap.floorEntry(newStartingPosition);
    /*
     * Default to the last entry if there is no corresponding entry (e.g. looking up original
     * line for a word like 'set' that did not exist in the original config should still give
     * useful output)
     */
    return (originalLineEntry == null)
        ? wordMap.lastEntry().getValue()
        : originalLineEntry.getValue();
  }
}
 
源代码2 项目: daily-coding-problems   文件: Solution.java
Integer get(Integer key, Integer time) {
    NavigableMap<Integer, Integer> value = map.get(key);
    if (Objects.isNull(value)) {
        return null;
    }

    Map.Entry<Integer, Integer> timedValue = value.floorEntry(time);
    if (Objects.isNull(timedValue)) {
        return null;
    }
    return timedValue.getValue();
}
 
源代码3 项目: openjdk-jdk9   文件: TreeSubMapTest.java
/**
 * floorEntry returns preceding entry.
 */
public void testFloorEntry() {
    NavigableMap map = map5();
    Map.Entry e1 = map.floorEntry(three);
    assertEquals(three, e1.getKey());

    Map.Entry e2 = map.floorEntry(six);
    assertEquals(five, e2.getKey());

    Map.Entry e3 = map.floorEntry(one);
    assertEquals(one, e3.getKey());

    Map.Entry e4 = map.floorEntry(zero);
    assertNull(e4);
}
 
源代码4 项目: openjdk-jdk9   文件: TreeSubMapTest.java
/**
 * floorEntry returns preceding entry.
 */
public void testDescendingFloorEntry() {
    NavigableMap map = dmap5();
    Map.Entry e1 = map.floorEntry(m3);
    assertEquals(m3, e1.getKey());

    Map.Entry e2 = map.floorEntry(m6);
    assertEquals(m5, e2.getKey());

    Map.Entry e3 = map.floorEntry(m1);
    assertEquals(m1, e3.getKey());

    Map.Entry e4 = map.floorEntry(zero);
    assertNull(e4);
}
 
源代码5 项目: webanno   文件: SpanCrossSentenceBehavior.java
@Override
public void onRender(TypeAdapter aAdapter, VDocument aResponse,
        Map<AnnotationFS, VSpan> annoToSpanIdx, int aPageBegin, int aPageEnd)
{
    if (aAdapter.getLayer().isCrossSentence() || annoToSpanIdx.isEmpty()) {
        return;
    }
    
    CAS cas = annoToSpanIdx.entrySet().iterator().next().getKey().getCAS();
    
    // Build indexes to allow quickly looking up the sentence by its begin/end offsets. Since
    // The indexes are navigable, we can also find the sentences starting/ending closes to a
    // particular offset, even if it is not the start/end offset of a sentence.
    NavigableMap<Integer, AnnotationFS> sentBeginIdx = new TreeMap<>();
    NavigableMap<Integer, AnnotationFS> sentEndIdx = new TreeMap<>();
    for (AnnotationFS sent : selectOverlapping(cas, getType(cas, Sentence.class), aPageBegin,
            aPageEnd)) {
        sentBeginIdx.put(sent.getBegin(), sent);
        sentEndIdx.put(sent.getEnd(), sent);
    }
    
    for (AnnotationFS fs : annoToSpanIdx.keySet()) {
        Entry<Integer, AnnotationFS> s1 = sentBeginIdx.floorEntry(fs.getBegin());
        Entry<Integer, AnnotationFS> s2 = sentEndIdx.ceilingEntry(fs.getEnd());
        
        if (s1 == null || s2 == null) {
            // Unable to determine any sentences overlapping with the annotation
            continue;
        }
        
        if (!WebAnnoCasUtil.isSame(s1.getValue(), s2.getValue())) {
            aResponse.add(new VComment(new VID(fs), ERROR,
                    "Crossing sentence boundaries is not permitted."));
        }
    }
}
 
public GenerationResult get(String name, int index) {
	NavigableMap<Integer,Generation> generations = this.contents.get(
		name
	);
	
	if (generations == null) {
		logger.info(String.format("Cache miss on generation: %s,%d", name, index));
		return GenerationResult.miss();
	}
	
	Entry<Integer, Generation> best = generations.floorEntry(index);
	if (best == null) {
		logger.info(String.format("Cache miss on index: %s,%d", name, index));
		return GenerationResult.miss();
	}
	
	if (best.getKey().equals(index)) {
		logger.info(String.format("Cache hit: %s,%d", name, index));
		return GenerationResult.hit(best.getValue());
	} else {
		logger.info(String.format("Partial cache hit: %s,%d", name, index));
		Generation current = best.getValue();
		for (int i = best.getKey() + 1; i <= index; i++) {
			current = current.evolve();
		}
		generations.put(index, current);
		return GenerationResult.partialHit(current, index - best.getKey());
	}
}
 
源代码7 项目: j2objc   文件: TreeSubMapTest.java
/**
 * floorEntry returns preceding entry.
 */
public void testFloorEntry() {
    NavigableMap map = map5();
    Map.Entry e1 = map.floorEntry(three);
    assertEquals(three, e1.getKey());

    Map.Entry e2 = map.floorEntry(six);
    assertEquals(five, e2.getKey());

    Map.Entry e3 = map.floorEntry(one);
    assertEquals(one, e3.getKey());

    Map.Entry e4 = map.floorEntry(zero);
    assertNull(e4);
}
 
源代码8 项目: j2objc   文件: TreeSubMapTest.java
/**
 * floorEntry returns preceding entry.
 */
public void testDescendingFloorEntry() {
    NavigableMap map = dmap5();
    Map.Entry e1 = map.floorEntry(m3);
    assertEquals(m3, e1.getKey());

    Map.Entry e2 = map.floorEntry(m6);
    assertEquals(m5, e2.getKey());

    Map.Entry e3 = map.floorEntry(m1);
    assertEquals(m1, e3.getKey());

    Map.Entry e4 = map.floorEntry(zero);
    assertNull(e4);
}
 
源代码9 项目: kogito-runtimes   文件: KieRepositoryImpl.java
synchronized KieModule load(InternalKieScanner kieScanner, ReleaseId releaseId, VersionRange versionRange) {
    String ga = releaseId.getGroupId() + ":" + releaseId.getArtifactId();

    NavigableMap<ComparableVersion, KieModule> artifactMap = kieModules.get(ga);
    if ( artifactMap == null || artifactMap.isEmpty() ) {
        return null;
    }
    KieModule kieModule = artifactMap.get(new ComparableVersion(releaseId.getVersion()));

    if (versionRange.fixed) {
        if ( kieModule != null && releaseId.isSnapshot() ) {
            String oldSnapshotVersion = ((ReleaseIdImpl)kieModule.getReleaseId()).getSnapshotVersion();
            if ( oldSnapshotVersion != null ) {
                String currentSnapshotVersion = kieScanner.getArtifactVersion(releaseId);
                if (currentSnapshotVersion != null &&
                    new ComparableVersion(currentSnapshotVersion).compareTo(new ComparableVersion(oldSnapshotVersion)) > 0) {
                    // if the snapshot currently available on the maven repo is newer than the cached one
                    // return null to enforce the building of this newer version
                    return null;
                }
            }
        }
        return kieModule;
    }

    Map.Entry<ComparableVersion, KieModule> entry =
            versionRange.upperBound == null ?
            artifactMap.lastEntry() :
            versionRange.upperInclusive ?
                artifactMap.floorEntry(new ComparableVersion(versionRange.upperBound)) :
                artifactMap.lowerEntry(new ComparableVersion(versionRange.upperBound));

    if ( entry == null ) {
        return null;
    }

    if ( versionRange.lowerBound == null ) {
        return entry.getValue();
    }

    int comparison = entry.getKey().compareTo(new ComparableVersion(versionRange.lowerBound));
    return comparison > 0 || (comparison == 0 && versionRange.lowerInclusive) ? entry.getValue() : null;
}
 
源代码10 项目: hadoop   文件: RLESparseResourceAllocation.java
/**
 * Add a resource for the specified interval
 * 
 * @param reservationInterval the interval for which the resource is to be
 *          added
 * @param capacity the resource to be added
 * @return true if addition is successful, false otherwise
 */
public boolean addInterval(ReservationInterval reservationInterval,
    ReservationRequest capacity) {
  Resource totCap =
      Resources.multiply(capacity.getCapability(),
          (float) capacity.getNumContainers());
  if (totCap.equals(ZERO_RESOURCE)) {
    return true;
  }
  writeLock.lock();
  try {
    long startKey = reservationInterval.getStartTime();
    long endKey = reservationInterval.getEndTime();
    NavigableMap<Long, Resource> ticks =
        cumulativeCapacity.headMap(endKey, false);
    if (ticks != null && !ticks.isEmpty()) {
      Resource updatedCapacity = Resource.newInstance(0, 0, 0);
      Entry<Long, Resource> lowEntry = ticks.floorEntry(startKey);
      if (lowEntry == null) {
        // This is the earliest starting interval
        cumulativeCapacity.put(startKey, totCap);
      } else {
        updatedCapacity = Resources.add(lowEntry.getValue(), totCap);
        // Add a new tick only if the updated value is different
        // from the previous tick
        if ((startKey == lowEntry.getKey())
            && (isSameAsPrevious(lowEntry.getKey(), updatedCapacity))) {
          cumulativeCapacity.remove(lowEntry.getKey());
        } else {
          cumulativeCapacity.put(startKey, updatedCapacity);
        }
      }
      // Increase all the capacities of overlapping intervals
      Set<Entry<Long, Resource>> overlapSet =
          ticks.tailMap(startKey, false).entrySet();
      for (Entry<Long, Resource> entry : overlapSet) {
        updatedCapacity = Resources.add(entry.getValue(), totCap);
        entry.setValue(updatedCapacity);
      }
    } else {
      // This is the first interval to be added
      cumulativeCapacity.put(startKey, totCap);
    }
    Resource nextTick = cumulativeCapacity.get(endKey);
    if (nextTick != null) {
      // If there is overlap, remove the duplicate entry
      if (isSameAsPrevious(endKey, nextTick)) {
        cumulativeCapacity.remove(endKey);
      }
    } else {
      // Decrease capacity as this is end of the interval
      cumulativeCapacity.put(endKey, Resources.subtract(cumulativeCapacity
          .floorEntry(endKey).getValue(), totCap));
    }
    return true;
  } finally {
    writeLock.unlock();
  }
}
 
源代码11 项目: big-c   文件: RLESparseResourceAllocation.java
/**
 * Add a resource for the specified interval
 * 
 * @param reservationInterval the interval for which the resource is to be
 *          added
 * @param capacity the resource to be added
 * @return true if addition is successful, false otherwise
 */
public boolean addInterval(ReservationInterval reservationInterval,
    ReservationRequest capacity) {
  Resource totCap =
      Resources.multiply(capacity.getCapability(),
          (float) capacity.getNumContainers());
  if (totCap.equals(ZERO_RESOURCE)) {
    return true;
  }
  writeLock.lock();
  try {
    long startKey = reservationInterval.getStartTime();
    long endKey = reservationInterval.getEndTime();
    NavigableMap<Long, Resource> ticks =
        cumulativeCapacity.headMap(endKey, false);
    if (ticks != null && !ticks.isEmpty()) {
      Resource updatedCapacity = Resource.newInstance(0, 0);
      Entry<Long, Resource> lowEntry = ticks.floorEntry(startKey);
      if (lowEntry == null) {
        // This is the earliest starting interval
        cumulativeCapacity.put(startKey, totCap);
      } else {
        updatedCapacity = Resources.add(lowEntry.getValue(), totCap);
        // Add a new tick only if the updated value is different
        // from the previous tick
        if ((startKey == lowEntry.getKey())
            && (isSameAsPrevious(lowEntry.getKey(), updatedCapacity))) {
          cumulativeCapacity.remove(lowEntry.getKey());
        } else {
          cumulativeCapacity.put(startKey, updatedCapacity);
        }
      }
      // Increase all the capacities of overlapping intervals
      Set<Entry<Long, Resource>> overlapSet =
          ticks.tailMap(startKey, false).entrySet();
      for (Entry<Long, Resource> entry : overlapSet) {
        updatedCapacity = Resources.add(entry.getValue(), totCap);
        entry.setValue(updatedCapacity);
      }
    } else {
      // This is the first interval to be added
      cumulativeCapacity.put(startKey, totCap);
    }
    Resource nextTick = cumulativeCapacity.get(endKey);
    if (nextTick != null) {
      // If there is overlap, remove the duplicate entry
      if (isSameAsPrevious(endKey, nextTick)) {
        cumulativeCapacity.remove(endKey);
      }
    } else {
      // Decrease capacity as this is end of the interval
      cumulativeCapacity.put(endKey, Resources.subtract(cumulativeCapacity
          .floorEntry(endKey).getValue(), totCap));
    }
    return true;
  } finally {
    writeLock.unlock();
  }
}
 
源代码12 项目: beam   文件: IsmReaderImpl.java
/**
 * Returns the record for the last key having this iterators key prefix. Last is defined as the
 * largest key with the same key prefix when comparing key's byte representations using an
 * unsigned lexicographical byte order.
 *
 * <p>Null is returned if the prefix is not present within this file.
 */
@Override
public WindowedValue<IsmRecord<V>> getLast() throws IOException {
  RandomAccessData keyBytes = new RandomAccessData();
  int shardId = coder.encodeAndHash(keyComponents, keyBytes);

  Optional<SeekableByteChannel> inChannel =
      initializeFooterAndShardIndex(Optional.<SeekableByteChannel>absent(), readCounter);

  // Key is not stored here
  if (!shardIdToShardMap.containsKey(shardId) || !bloomFilterMightContain(keyBytes)) {
    return null;
  }

  inChannel = initializeForKeyedRead(shardId, inChannel, readCounter);
  closeIfPresent(inChannel);

  final NavigableMap<RandomAccessData, IsmShardKey> indexInShard = indexPerShard.get(shardId);
  RandomAccessData end = keyBytes.increment();
  final IsmShardKey cacheEntry = indexInShard.floorEntry(end).getValue();

  NavigableMap<RandomAccessData, WindowedValue<IsmRecord<V>>> block;
  try (Closeable readerCloser = IsmReader.setSideInputReadContext(readCounter)) {
    block = fetch(cacheEntry);
  }

  RandomAccessData lastKey = block.lastKey();

  // If the requested key is greater than the last key within the block, then it
  // does not exist.
  if (RandomAccessData.UNSIGNED_LEXICOGRAPHICAL_COMPARATOR.compare(keyBytes, lastKey) > 0) {
    return null;
  }

  Entry<RandomAccessData, WindowedValue<IsmRecord<V>>> rval = block.floorEntry(end);

  // If the prefix matches completely then we can return
  if (RandomAccessData.UNSIGNED_LEXICOGRAPHICAL_COMPARATOR.commonPrefixLength(
          keyBytes, rval.getKey())
      == keyBytes.size()) {
    return rval.getValue();
  }
  return null;
}
 
源代码13 项目: incubator-heron   文件: KafkaSpout.java
@SuppressWarnings("Duplicates")
@Override
public void ack(Object msgId) {
  long start = System.nanoTime();
  ConsumerRecordMessageId consumerRecordMessageId = (ConsumerRecordMessageId) msgId;
  TopicPartition topicPartition = consumerRecordMessageId.getTopicPartition();
  if (!assignedPartitions.contains(topicPartition)) {
    LOG.info("ignore {} because it's been revoked", consumerRecordMessageId);
    return;
  }
  long offset = consumerRecordMessageId.getOffset();
  ackRegistry.putIfAbsent(topicPartition, new TreeMap<>());
  NavigableMap<Long, Long> navigableMap = ackRegistry.get(topicPartition);

  Map.Entry<Long, Long> floorRange = navigableMap.floorEntry(offset);
  Map.Entry<Long, Long> ceilingRange = navigableMap.ceilingEntry(offset);

  long floorBottom = floorRange != null ? floorRange.getKey() : Long.MIN_VALUE;
  long floorTop = floorRange != null ? floorRange.getValue() : Long.MIN_VALUE;
  long ceilingBottom = ceilingRange != null ? ceilingRange.getKey() : Long.MAX_VALUE;
  long ceilingTop = ceilingRange != null ? ceilingRange.getValue() : Long.MAX_VALUE;

  //the ack is for a message that has already been acknowledged.
  //This happens when a failed tuple has caused
  //Kafka consumer to seek back to earlier position, and some messages are replayed.
  if ((offset >= floorBottom && offset <= floorTop)
      || (offset >= ceilingBottom && offset <= ceilingTop)) {
    return;
  }
  if (ceilingBottom - floorTop == 2) {
    //the ack connects the two adjacent range
    navigableMap.put(floorBottom, ceilingTop);
    navigableMap.remove(ceilingBottom);
  } else if (offset == floorTop + 1) {
    //the acknowledged offset is the immediate neighbour
    // of the upper bound of the floor range
    navigableMap.put(floorBottom, offset);
  } else if (offset == ceilingBottom - 1) {
    //the acknowledged offset is the immediate neighbour
    // of the lower bound of the ceiling range
    navigableMap.remove(ceilingBottom);
    navigableMap.put(offset, ceilingTop);
  } else {
    //it is a new born range
    navigableMap.put(offset, offset);
  }
  LOG.debug("ack {} in {} ns", msgId, System.nanoTime() - start);
  LOG.debug("{}", ackRegistry.get(consumerRecordMessageId.getTopicPartition()));
}
 
源代码14 项目: webanno   文件: SpanCrossSentenceBehavior.java
@Override
public List<Pair<LogMessage, AnnotationFS>> onValidate(TypeAdapter aAdapter, CAS aCas)
{
    // If crossing sentence boundaries is permitted, then there is nothing to validate here
    if (aAdapter.getLayer().isCrossSentence()) {
        return emptyList();
    }
    
    Type type = getType(aCas, aAdapter.getAnnotationTypeName());
    
    // If there are no annotations on this layer, nothing to do
    Collection<AnnotationFS> annotations = select(aCas, type);
    if (annotations.isEmpty()) {
        return emptyList();
    }

    // Prepare feedback messsage list
    List<Pair<LogMessage, AnnotationFS>> messages = new ArrayList<>();

    // Build indexes to allow quickly looking up the sentence by its begin/end offsets. Since
    // The indexes are navigable, we can also find the sentences starting/ending closes to a
    // particular offset, even if it is not the start/end offset of a sentence.
    NavigableMap<Integer, AnnotationFS> sentBeginIdx = new TreeMap<>();
    NavigableMap<Integer, AnnotationFS> sentEndIdx = new TreeMap<>();
    for (AnnotationFS sent : select(aCas, getType(aCas, Sentence.class))) {
        sentBeginIdx.put(sent.getBegin(), sent);
        sentEndIdx.put(sent.getEnd(), sent);
    }
    
    for (AnnotationFS fs : annotations) {
        Entry<Integer, AnnotationFS> s1 = sentBeginIdx.floorEntry(fs.getBegin());
        Entry<Integer, AnnotationFS> s2 = sentEndIdx.ceilingEntry(fs.getEnd());
        
        if (s1 == null || s2 == null) {
            messages.add(Pair.of(LogMessage.error(this,
                    "Unable to determine any sentences overlapping with [%d-%d]", fs.getBegin(),
                    fs.getEnd()), fs));
            continue;
        }
        
        if (!WebAnnoCasUtil.isSame(s1.getValue(), s2.getValue())) {
            messages.add(Pair.of(
                    LogMessage.error(this, "Crossing sentence boundaries is not permitted."),
                    fs));
        }
    }

    return messages;
}
 
源代码15 项目: webanno   文件: RelationCrossSentenceBehavior.java
@Override
public List<Pair<LogMessage, AnnotationFS>> onValidate(TypeAdapter aAdapter, CAS aCas)
{
    // If crossing sentence boundaries is permitted, then there is nothing to validate here
    if (aAdapter.getLayer().isCrossSentence()) {
        return emptyList();
    }
    
    RelationAdapter adapter = (RelationAdapter) aAdapter;
    Type type = getType(aCas, aAdapter.getAnnotationTypeName());
    Feature targetFeature = type.getFeatureByBaseName(adapter.getTargetFeatureName());
    Feature sourceFeature = type.getFeatureByBaseName(adapter.getSourceFeatureName());
    
    // If there are no annotations on this layer, nothing to do
    Collection<AnnotationFS> annotations = select(aCas, type);
    if (annotations.isEmpty()) {
        return emptyList();
    }

    // Prepare feedback messsage list
    List<Pair<LogMessage, AnnotationFS>> messages = new ArrayList<>();

    // Build indexes to allow quickly looking up the sentence by its begin/end offsets. Since
    // The indexes are navigable, we can also find the sentences starting/ending closes to a
    // particular offset, even if it is not the start/end offset of a sentence.
    NavigableMap<Integer, AnnotationFS> sentBeginIdx = new TreeMap<>();
    NavigableMap<Integer, AnnotationFS> sentEndIdx = new TreeMap<>();
    for (AnnotationFS sent : select(aCas, getType(aCas, Sentence.class))) {
        sentBeginIdx.put(sent.getBegin(), sent);
        sentEndIdx.put(sent.getEnd(), sent);
    }
    
    for (AnnotationFS fs : annotations) {
        AnnotationFS sourceFs = (AnnotationFS) fs.getFeatureValue(sourceFeature);
        AnnotationFS targetFs = (AnnotationFS) fs.getFeatureValue(targetFeature);

        Entry<Integer, AnnotationFS> s1 = sentBeginIdx.floorEntry(sourceFs.getBegin());
        Entry<Integer, AnnotationFS> s2 = sentEndIdx.ceilingEntry(targetFs.getEnd());
        
        if (s1 == null || s2 == null) {
            messages.add(Pair.of(LogMessage.error(this,
                    "Unable to determine any sentences overlapping with [%d-%d]",
                    sourceFs.getBegin(), targetFs.getEnd()), fs));
            continue;
        }
        
        if (!WebAnnoCasUtil.isSame(s1.getValue(), s2.getValue())) {
            messages.add(Pair.of(
                    LogMessage.error(this, "Crossing sentence boundaries is not permitted."),
                    fs));
        }
    }

    return messages;
    
}
 
源代码16 项目: cryptotrader   文件: BitflyerAdviser.java
@VisibleForTesting
BigDecimal calculateSfdRate(Context context, Request request, boolean buy) {

    if (FX_BTC_JPY != ProductType.find(request.getInstrument())) {
        return ZERO;
    }

    BigDecimal sfdPct = getDecimalProperty(KEY_SFD_PCT, ONE);

    if (sfdPct.signum() == 0) {
        return ZERO;
    }

    Key key = Key.from(request);

    BigDecimal fxPrice = buy ? context.getBestAskPrice(key) : context.getBestBidPrice(key);

    if (fxPrice == null) {
        return ZERO;
    }

    BigDecimal cashPrice = context.getLastPrice(Key.build(key).instrument(BTC_JPY.name()).build());

    if (cashPrice == null || cashPrice.signum() == 0) {
        return ZERO;
    }

    BigDecimal pct = fxPrice.divide(cashPrice, SCALE, HALF_UP).subtract(ONE);

    BigDecimal adj = buy ? pct : pct.negate();

    NavigableMap<BigDecimal, BigDecimal> table = getSfdTable();

    Entry<BigDecimal, BigDecimal> floor = table.floorEntry(adj);

    if (floor == null) {
        return ZERO;
    }

    if (sfdPct.signum() > 0) {
        return floor.getValue().multiply(sfdPct);
    }

    Entry<BigDecimal, BigDecimal> higher = table.higherEntry(adj);

    if (higher == null) {
        return floor.getValue().multiply(sfdPct.negate());
    }

    BigDecimal numerator = adj.subtract(floor.getKey()).multiply(higher.getValue().subtract(floor.getValue()));

    BigDecimal denominator = higher.getKey().subtract(floor.getKey());

    BigDecimal interpolated = floor.getValue().add(numerator.divide(denominator, SCALE, HALF_UP));

    return interpolated.multiply(sfdPct.negate());

}