java.util.TreeMap#firstEntry ( )源码实例Demo

下面列出了java.util.TreeMap#firstEntry ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: gemfirexd-oss   文件: OrderedTombstoneMap.java
/**
 * Remove a version tag from the map.
 */
public Map.Entry<VersionTag, T> take() {
  if(tombstoneMap.isEmpty()) {
    //if there are no more entries, return null;
    return null;
  } else {
    //Otherwise, look at all of the members and find the tag with the 
    //lowest timestamp.
    long lowestTimestamp = Long.MAX_VALUE;
    TreeMap<VersionTag, T> lowestMap = null;
    for(TreeMap<VersionTag, T> memberMap: tombstoneMap.values()) {
      VersionTag firstTag = memberMap.firstKey();
      long stamp = firstTag.getVersionTimeStamp();
      if(stamp < lowestTimestamp) {
        lowestTimestamp = stamp;
        lowestMap = memberMap;
      }
    }
    if(lowestMap == null) {
      return null;
    }
    //Remove the lowest entry
    Entry<VersionTag, T> result = lowestMap.firstEntry();
    lowestMap.remove(result.getKey());
    if(lowestMap.isEmpty()) {
      //if this is the last entry from a given member,
      //the map for that member
      tombstoneMap.remove(result.getKey().getMemberID());
    }
    
    return result;
  }
}
 
源代码2 项目: gemfirexd-oss   文件: HfileSortedOplogJUnitTest.java
/**
 * Tests hoplog iterator. after returning first key, has next should return false and all
 * subsequent next calls should return null
 */
public void testSingleKVIterator() throws Exception {
  String hoplogName = getRandomHoplogName();
  TreeMap<String, String> map = createHoplog(hoplogName, 1);
  HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
  HoplogReader reader = testHoplog.getReader();

  HoplogIterator<byte[], byte[]> iter = reader.scan();
  assertNull(iter.getKey());
  assertNull(iter.getValue());
  assertTrue(iter.hasNext());
  assertNull(iter.getKey());
  assertNull(iter.getValue());

  Entry<String, String> entry = map.firstEntry();
  iter.next();
  assertNotNull(iter.getKey());
  assertEquals(entry.getKey(), new String(iter.getKey()));
  assertNotNull(iter.getValue());
  assertEquals(entry.getValue(), new String(iter.getValue()));

  assertFalse(iter.hasNext());
  try {
    iter.next();
    fail();
  } catch (NoSuchElementException e) {
  }
}
 
源代码3 项目: gemfirexd-oss   文件: OrderedTombstoneMap.java
/**
 * Remove a version tag from the map.
 */
public Map.Entry<VersionTag, T> take() {
  if(tombstoneMap.isEmpty()) {
    //if there are no more entries, return null;
    return null;
  } else {
    //Otherwise, look at all of the members and find the tag with the 
    //lowest timestamp.
    long lowestTimestamp = Long.MAX_VALUE;
    TreeMap<VersionTag, T> lowestMap = null;
    for(TreeMap<VersionTag, T> memberMap: tombstoneMap.values()) {
      VersionTag firstTag = memberMap.firstKey();
      long stamp = firstTag.getVersionTimeStamp();
      if(stamp < lowestTimestamp) {
        lowestTimestamp = stamp;
        lowestMap = memberMap;
      }
    }
    if(lowestMap == null) {
      return null;
    }
    //Remove the lowest entry
    Entry<VersionTag, T> result = lowestMap.firstEntry();
    lowestMap.remove(result.getKey());
    if(lowestMap.isEmpty()) {
      //if this is the last entry from a given member,
      //the map for that member
      tombstoneMap.remove(result.getKey().getMemberID());
    }
    
    return result;
  }
}
 
源代码4 项目: gemfirexd-oss   文件: HfileSortedOplogJUnitTest.java
/**
 * Tests hoplog iterator. after returning first key, has next should return false and all
 * subsequent next calls should return null
 */
public void testSingleKVIterator() throws Exception {
  String hoplogName = getRandomHoplogName();
  TreeMap<String, String> map = createHoplog(hoplogName, 1);
  HFileSortedOplog testHoplog = new HFileSortedOplog(hdfsStore, new Path(testDataDir, hoplogName), blockCache, stats, storeStats);
  HoplogReader reader = testHoplog.getReader();

  HoplogIterator<byte[], byte[]> iter = reader.scan();
  assertNull(iter.getKey());
  assertNull(iter.getValue());
  assertTrue(iter.hasNext());
  assertNull(iter.getKey());
  assertNull(iter.getValue());

  Entry<String, String> entry = map.firstEntry();
  iter.next();
  assertNotNull(iter.getKey());
  assertEquals(entry.getKey(), new String(iter.getKey()));
  assertNotNull(iter.getValue());
  assertEquals(entry.getValue(), new String(iter.getValue()));

  assertFalse(iter.hasNext());
  try {
    iter.next();
    fail();
  } catch (NoSuchElementException e) {
  }
}
 
源代码5 项目: render   文件: ChannelSpec.java
public Map.Entry<Integer, ImageAndMask> getFloorMipmapEntry(final Integer mipmapLevel,
                                                            final TreeMap<Integer, ImageAndMask> levelToImageMap) {

    Map.Entry<Integer, ImageAndMask> floorEntry = levelToImageMap.floorEntry(mipmapLevel);

    if (floorEntry == null) {
        floorEntry = levelToImageMap.firstEntry();
    } else if ((floorEntry.getKey() < mipmapLevel) && (mipmapPathBuilder != null)) {
        floorEntry = mipmapPathBuilder.deriveImageAndMask(mipmapLevel,
                                                          levelToImageMap.firstEntry(),
                                                          true);
    }

    return floorEntry;
}
 
源代码6 项目: incubator-iotdb   文件: StorageGroupProcessor.java
/**
 * get processor from hashmap, flush oldest processor if necessary
 *
 * @param timeRangeId time partition range
 * @param tsFileProcessorTreeMap tsFileProcessorTreeMap
 * @param fileList file list to add new processor
 * @param sequence whether is sequence or not
 */
private TsFileProcessor getOrCreateTsFileProcessorIntern(long timeRangeId,
    TreeMap<Long, TsFileProcessor> tsFileProcessorTreeMap,
    Collection<TsFileResource> fileList,
    boolean sequence)
    throws IOException, DiskSpaceInsufficientException {

  TsFileProcessor res;
  // we have to ensure only one thread can change workSequenceTsFileProcessors
  writeLock();
  try {
    if (!tsFileProcessorTreeMap.containsKey(timeRangeId)) {
      // we have to remove oldest processor to control the num of the memtables
      // TODO: use a method to control the number of memtables
      if (tsFileProcessorTreeMap.size()
          >= IoTDBDescriptor.getInstance().getConfig().getConcurrentWritingTimePartition()) {
        Map.Entry<Long, TsFileProcessor> processorEntry = tsFileProcessorTreeMap.firstEntry();
        logger.info(
            "will close a {} TsFile because too many active partitions ({} > {}) in the storage group {},",
            sequence, tsFileProcessorTreeMap.size(),
            IoTDBDescriptor.getInstance().getConfig().getConcurrentWritingTimePartition(),
            storageGroupName);
        asyncCloseOneTsFileProcessor(sequence, processorEntry.getValue());
      }

      // build new processor
      TsFileProcessor newProcessor = createTsFileProcessor(sequence, timeRangeId);
      tsFileProcessorTreeMap.put(timeRangeId, newProcessor);
      fileList.add(newProcessor.getTsFileResource());
      res = newProcessor;
    } else {
      res = tsFileProcessorTreeMap.get(timeRangeId);
    }

  } finally {
    // unlock in finally
    writeUnlock();
  }

  return res;
}
 
源代码7 项目: linstor-server   文件: ConfFileBuilder.java
private NetInterface getPreferredNetIf(DrbdRscData<Resource> peerRscDataRef)
{
    NetInterface preferredNetIf = null;
    try
    {
        TreeMap<VolumeNumber, DrbdVlmData<Resource>> sortedVlmData = new TreeMap<>(
            peerRscDataRef.getVlmLayerObjects()
        );
        Entry<VolumeNumber, DrbdVlmData<Resource>> firstVolumeEntry = sortedVlmData.firstEntry();
        Resource rsc = peerRscDataRef.getAbsResource();
        Node node = rsc.getNode();

        PriorityProps prioProps = new PriorityProps();

        if (firstVolumeEntry != null)
        {
            VolumeNumber firstVlmNr = firstVolumeEntry.getKey();
            List<AbsRscLayerObject<Resource>> storageRscList = LayerUtils.getChildLayerDataByKind(
                firstVolumeEntry.getValue().getRscLayerObject(),
                DeviceLayerKind.STORAGE
            );
            for (AbsRscLayerObject<Resource> rscObj : storageRscList)
            {
                VlmProviderObject<Resource> vlmProviderObject = rscObj.getVlmProviderObject(firstVlmNr);
                if (vlmProviderObject != null)
                {
                    prioProps.addProps(
                        vlmProviderObject.getStorPool().getProps(accCtx)
                    );
                }
            }
        }
        prioProps.addProps(rsc.getProps(accCtx));
        prioProps.addProps(node.getProps(accCtx));

        String prefNic = prioProps.getProp(ApiConsts.KEY_STOR_POOL_PREF_NIC);

        if (prefNic != null)
        {
            preferredNetIf = node.getNetInterface(
                accCtx,
                new NetInterfaceName(prefNic)
            );

            if (preferredNetIf == null)
            {
                errorReporter.logWarning(
                    String.format("Preferred network interface '%s' not found, fallback to default", prefNic)
                );
            }
        }

        // fallback if preferred couldn't be found
        if (preferredNetIf == null)
        {
            // Try to find the 'default' network interface
            preferredNetIf = node.getNetInterface(accCtx, NetInterfaceName.DEFAULT_NET_INTERFACE_NAME);
            // If there is not even a 'default', use the first one that is found in the node's
            // list of network interfaces
            if (preferredNetIf == null)
            {
                preferredNetIf = node.streamNetInterfaces(accCtx).findFirst().orElse(null);
            }
        }
    }
    catch (AccessDeniedException | InvalidKeyException | InvalidNameException implError)
    {
        throw new ImplementationError(implError);
    }

    return preferredNetIf;
}
 
源代码8 项目: JavaTutorial   文件: TreeMapExample.java
public void getSmallestEntry(TreeMap<String, String> maps){
    Map.Entry<String,String> entry =  maps.firstEntry();
    System.out.println("最小的Entry如下");
    System.out.print("key = " + entry.getKey());
    System.out.println(" value = " + entry.getValue());
}
 
源代码9 项目: ambry   文件: IndexTest.java
/**
 * Tests all cases of {@link PersistentIndex#findEntriesSince(FindToken, long)} that result in an index based
 * {@link StoreFindToken} being returned.
 * 1. Uninited -> Index
 * 2. Index -> Index
 * 3. Journal -> Index
 * @throws StoreException
 */
private void findEntriesSinceToIndexBasedTest() throws StoreException {
  // ------------------
  // 1. Index -> Index
  Offset firstIndexSegmentStartOffset = state.referenceIndex.firstKey();
  Offset secondIndexSegmentStartOffset = state.referenceIndex.higherKey(firstIndexSegmentStartOffset);
  MockId firstId = state.referenceIndex.get(firstIndexSegmentStartOffset).firstKey();
  // All elements from first index segment and two from the second to be returned (because of size restrictions)
  Set<MockId> expectedKeys = new HashSet<>();
  long maxTotalSizeOfEntries = 0;
  for (Map.Entry<MockId, TreeSet<IndexValue>> segmentEntry : state.referenceIndex.get(firstIndexSegmentStartOffset)
      .entrySet()) {
    if (!segmentEntry.getKey().equals(firstId)) {
      expectedKeys.add(segmentEntry.getKey());
      maxTotalSizeOfEntries += getSizeOfAllValues(segmentEntry.getValue());
    }
  }
  TreeMap<MockId, TreeSet<IndexValue>> secondIndexSegment = state.referenceIndex.get(secondIndexSegmentStartOffset);
  Map.Entry<MockId, TreeSet<IndexValue>> secondIndexSegmentEntry = secondIndexSegment.firstEntry();
  expectedKeys.add(secondIndexSegmentEntry.getKey());
  maxTotalSizeOfEntries += getSizeOfAllValues(secondIndexSegmentEntry.getValue());
  secondIndexSegmentEntry = secondIndexSegment.higherEntry(secondIndexSegmentEntry.getKey());
  expectedKeys.add(secondIndexSegmentEntry.getKey());
  maxTotalSizeOfEntries += getSizeOfAllValues(secondIndexSegmentEntry.getValue());

  StoreFindToken startToken =
      new StoreFindToken(firstId, firstIndexSegmentStartOffset, state.sessionId, state.incarnationId);
  StoreFindToken expectedEndToken =
      new StoreFindToken(secondIndexSegmentEntry.getKey(), secondIndexSegmentStartOffset, state.sessionId,
          state.incarnationId);
  expectedEndToken.setBytesRead(state.index.getAbsolutePositionInLogForOffset(secondIndexSegmentStartOffset));
  doFindEntriesSinceTest(startToken, maxTotalSizeOfEntries, expectedKeys, expectedEndToken);

  // ------------------
  // 2. Uninitialized -> Index
  // add firstStoreKey and its size
  expectedKeys.add(firstId);
  maxTotalSizeOfEntries +=
      getSizeOfAllValues(state.referenceIndex.get(firstIndexSegmentStartOffset).firstEntry().getValue());
  doFindEntriesSinceTest(new StoreFindToken(), maxTotalSizeOfEntries, expectedKeys, expectedEndToken);

  // ------------------
  // 3. Journal -> Index
  // create a journal based token for an offset that isn't in the journal
  startToken = new StoreFindToken(state.logOrder.firstKey(), state.sessionId, state.incarnationId, false);
  doFindEntriesSinceTest(startToken, maxTotalSizeOfEntries, expectedKeys, expectedEndToken);
}