com.google.common.cache.Cache#asMap ( )源码实例Demo

下面列出了com.google.common.cache.Cache#asMap ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: datawave   文件: DateIndexHelper.java
private void showMeDaCache(String when) {
    log.trace("from applicationContext:" + applicationContext);
    if (this.applicationContext != null) {
        CacheManager cacheManager = applicationContext.getBean("dateIndexHelperCacheManager", CacheManager.class);
        log.trace("beans are " + Arrays.toString(applicationContext.getBeanDefinitionNames()));
        if (cacheManager != null) {
            for (String cacheName : cacheManager.getCacheNames()) {
                log.trace(when + " got " + cacheName);
                Object nativeCache = cacheManager.getCache(cacheName).getNativeCache();
                log.trace("nativeCache is a " + nativeCache);
                Cache cache = (Cache) nativeCache;
                Map map = cache.asMap();
                log.trace("cache map is " + map);
                log.trace("cache map size is " + map.size());
                for (Object key : map.keySet()) {
                    log.trace("value for " + key + " is :" + map.get(key));
                }
            }
        } else {
            log.trace(when + "CacheManager is " + cacheManager);
        }
    }
}
 
源代码2 项目: uavstack   文件: DoTestRuleFilterFactory.java
@Test
public void guavaCache() throws InterruptedException {

    TesTicker ticker = new TesTicker();
    Cache<String, Pojo> collection = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.SECONDS).ticker(ticker)
            .<String, Pojo> build();
    Pojo p1 = new Pojo("p1name", "p1val");
    Pojo p2 = new Pojo("p2name", "p2val");
    collection.put("p1", p1);
    collection.put("p2", p2);
    ticker.advance(3, TimeUnit.SECONDS);
    Map<String, Pojo> map = collection.asMap();
    assertTrue(map.containsKey("p1"));
    // map.get("p1");
    ticker.advance(3, TimeUnit.SECONDS);
    assertEquals(2, collection.size());
    assertFalse(map.containsKey("p1"));// 有清除过期操作
    assertEquals(1, collection.size());
    assertNull(collection.getIfPresent("p2"));
    assertNull(collection.getIfPresent("p1"));// 有清除过期操作
    assertEquals(0, collection.size());
}
 
源代码3 项目: incubator-heron   文件: PrometheusSink.java
@Override
public void processRecord(MetricsRecord record) {
  final String[] sources = MetricsUtil.splitRecordSource(record);

  if (sources.length > 2) {
    final String source = String.format("%s/%s/%s", getTopologyName(), sources[1], sources[2]);

    Map<String, Double> sourceCache = metricsCache.getIfPresent(source);
    if (sourceCache == null) {
      final Cache<String, Double> newSourceCache = createCache();
      sourceCache = newSourceCache.asMap();
    }

    sourceCache.putAll(processMetrics(record.getMetrics()));
    metricsCache.put(source, sourceCache);
  } else {
    LOG.log(Level.SEVERE, "Unexpected metrics source: " + record.getSource());
  }
}
 
源代码4 项目: cellery-security   文件: UserContextStoreImpl.java
public UserContextStoreImpl() {

        log.info("User Context expiry set to {} seconds.", getUserContextExpiry());
        Cache<String, String> cache = CacheBuilder.newBuilder()
                .expireAfterWrite(getUserContextExpiry(), TimeUnit.SECONDS)
                .removalListener(removalNotification -> {
                    log.debug("Stored user context was removed: " + removalNotification);
                })
                .build();

        userContextMap = cache.asMap();
    }
 
源代码5 项目: ambari-metrics   文件: TimelineMetricsHolder.java
/**
 * Returns values from cache and clears the cache
 * @param cache
 * @param lock
 * @return
 */
private Map<String, TimelineMetrics> extractMetricsFromCacheWithLock(Cache<String, TimelineMetrics> cache, ReadWriteLock lock) {
  lock.writeLock().lock();
  Map<String, TimelineMetrics> metricsMap = new TreeMap<>(cache.asMap());
  cache.invalidateAll();
  lock.writeLock().unlock();
  return metricsMap;
}
 
源代码6 项目: icure-backend   文件: GuavaConcurrentMapFactory.java
@Override
public <K, V> ConcurrentMap<K, V> getMap(final ConcurrentMapListener<K, V> listener) {
	// Create cache builder
	CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder();

	// Set expireAfterWrite
	if (expireDuration != null && expireUnit != null) {
		cacheBuilder = cacheBuilder.expireAfterWrite(expireDuration, expireUnit);
	}

	// Configure listener
	if (listener != null) {
		cacheBuilder.removalListener((RemovalListener<K, V>) notification -> {
			K key = notification.getKey();
			V value = notification.getValue();
			switch (notification.getCause()) {
				case REPLACED:
					listener.entryUpdated(key, value);
					break;
				case EXPLICIT:
					listener.entryRemoved(key, value);
					break;
				case COLLECTED:
				case EXPIRED:
				case SIZE:
					listener.entryEvicted(key, value);
					break;
			}
		});
	}

	// Build cache
	Cache<K, V> cache = cacheBuilder.build();

	return cache.asMap();
}
 
源代码7 项目: hermes   文件: PageCacheBuilder.java
@SuppressWarnings("rawtypes")
private void copyCacheDatas(Cache<Long, Page<T>> from, Cache<Long, Page<T>> to) {
	ConcurrentMap<Long, Page<T>> asMap = from.asMap();

	Object[] segments = (Object[]) Reflects.forField().getDeclaredFieldValue(asMap, "segments");
	Object segment = segments[0];
	Queue accessQueue = (Queue) Reflects.forField().getDeclaredFieldValue(segment, "accessQueue");

	ReentrantLock lock = ((ReentrantLock) segment);
	lock.lock();
	try {
		for (Object item : accessQueue) {
			try {
				Method m1 = Reflects.forMethod().getMethod(item.getClass(), "getKey");
				m1.setAccessible(true);
				Long key = (Long) m1.invoke(item);
				Method m2 = Reflects.forMethod().getMethod(item.getClass(), "getValueReference");
				m2.setAccessible(true);
				Object valueRef = m2.invoke(item);
				Page<T> value = Reflects.forMethod().invokeDeclaredMethod(valueRef, "get");

				to.put(key, value);
			} catch (Exception e) {
				log.error("Exception occurred while copying data from old cache to new cache.", e);
			}
		}
	} finally {
		lock.unlock();
	}
}
 
源代码8 项目: DBus   文件: GuavaLocalCache.java
@Override
public Map<String, Object> asMap(String cacheName) {
    Cache<String, Object> cache = container.get(cacheName);
    if (cache == null) {
        return Collections.emptyMap();
    }
    Map<String, Object> map = cache.asMap();
    if (map != null) {
        return ImmutableMap.copyOf(map);
    }
    return Collections.emptyMap();
}
 
源代码9 项目: alfresco-repository   文件: LockStoreImpl.java
private static ConcurrentMap<NodeRef, LockState> createMap(long expiry, TimeUnit timeUnit)
{
    Cache<NodeRef, LockState> cache = CacheBuilder.newBuilder()
                .concurrencyLevel(32)
                .expireAfterWrite(expiry, timeUnit)
                .build();
    return cache.asMap();
}
 
源代码10 项目: incubator-heron   文件: WebSink.java
@Override
public void processRecord(MetricsRecord record) {
  final String[] sources = MetricsUtil.splitRecordSource(record);
  final String source;

  if (sources.length > 2) {
    source = shouldIncludeTopologyName
        ? String.format("%s/%s/%s", getTopologyName(), sources[1], sources[2])
        : String.format("/%s/%s", sources[1], sources[2]);
  } else {
    source = shouldIncludeTopologyName
        ? String.format("%s/%s", getTopologyName(), record.getSource())
        : String.format("/%s", record.getSource());
  }

  if (flattenMetrics) {
    metricsCache.putAll(processMetrics(source + "/", record.getMetrics()));
  } else {
    Map<String, Double> sourceCache;
    Object sourceObj = metricsCache.getIfPresent(source);
    if (sourceObj instanceof Map) {
      @SuppressWarnings("unchecked")
      Map<String, Double> castObj = (Map<String, Double>) sourceObj;
      sourceCache = castObj;
    } else {
      final Cache<String, Double> newSourceCache = createCache();
      sourceCache = newSourceCache.asMap();
    }
    sourceCache.putAll(processMetrics("", record.getMetrics()));
    metricsCache.put(source, sourceCache);
  }
}
 
源代码11 项目: heroic   文件: ElasticsearchSuggestModule.java
@Provides
@ElasticsearchScope
public RateLimitedCache<Pair<String, HashCode>> writeCache(final HeroicReporter reporter) {
    final Cache<Pair<String, HashCode>, Boolean> cache = CacheBuilder
        .newBuilder()
        .concurrencyLevel(writeCacheConcurrency)
        .maximumSize(writeCacheMaxSize)
        .expireAfterWrite(writeCacheDurationMinutes, MINUTES)
        .build();

    reporter.registerCacheSize("elasticsearch-suggest-write-through", cache::size);


    if (writesPerSecond <= 0d) {
        return new DisabledRateLimitedCache<>(cache.asMap());
    }

    if (distributedCacheSrvRecord.length() > 0) {
        return new DistributedRateLimitedCache<>(
          cache.asMap(),
          RateLimiter.create(writesPerSecond, rateLimitSlowStartSeconds, SECONDS),
          MemcachedConnection.create(distributedCacheSrvRecord),
          toIntExact(Duration.of(writeCacheDurationMinutes, MINUTES).convert(SECONDS)),
          reporter.newMemcachedReporter("suggest")
        );
    }

    return new DefaultRateLimitedCache<>(cache.asMap(),
        RateLimiter.create(writesPerSecond, rateLimitSlowStartSeconds, SECONDS));
}
 
源代码12 项目: heroic   文件: ElasticsearchMetadataModule.java
@Provides
@ElasticsearchScope
public RateLimitedCache<Pair<String, HashCode>> writeCache(HeroicReporter reporter) {
    final Cache<Pair<String, HashCode>, Boolean> cache = CacheBuilder
        .newBuilder()
        .concurrencyLevel(writeCacheConcurrency)
        .maximumSize(writeCacheMaxSize)
        .expireAfterWrite(writeCacheDurationMinutes, TimeUnit.MINUTES)
        .build();

    reporter.registerCacheSize("elasticsearch-metadata-write-through", cache::size);

    if (writesPerSecond <= 0d) {
        return new DisabledRateLimitedCache<>(cache.asMap());
    }

    if (distributedCacheSrvRecord.length() > 0) {
        return new DistributedRateLimitedCache<>(
          cache.asMap(),
          RateLimiter.create(writesPerSecond, rateLimitSlowStartSeconds, SECONDS),
          MemcachedConnection.create(distributedCacheSrvRecord),
          toIntExact(Duration.of(writeCacheDurationMinutes, MINUTES).convert(SECONDS)),
          reporter.newMemcachedReporter("metadata")
        );
    }

    return new DefaultRateLimitedCache<>(cache.asMap(),
        RateLimiter.create(writesPerSecond, rateLimitSlowStartSeconds, TimeUnit.SECONDS));
}
 
源代码13 项目: pinpoint   文件: DefaultActiveTraceRepository.java
private ConcurrentMap<ActiveTraceHandle, ActiveTrace> createCache(int maxActiveTraceSize) {
    final CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder();
    cacheBuilder.concurrencyLevel(64);
    cacheBuilder.initialCapacity(maxActiveTraceSize);
    cacheBuilder.maximumSize(maxActiveTraceSize);

    final Cache<ActiveTraceHandle, ActiveTrace> localCache = cacheBuilder.build();
    return localCache.asMap();
}
 
源代码14 项目: pinpoint   文件: SimpleCache.java
private ConcurrentMap<T, Result> createCache(int maxCacheSize) {
    final CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder();
    cacheBuilder.concurrencyLevel(64);
    cacheBuilder.initialCapacity(maxCacheSize);
    cacheBuilder.maximumSize(maxCacheSize);
    Cache<T, Result> localCache = cacheBuilder.build();
    ConcurrentMap<T, Result> cache = localCache.asMap();
    return cache;
}
 
源代码15 项目: pinpoint   文件: LRUCache.java
public LRUCache(int maxCacheSize) {
    final CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder();
    cacheBuilder.concurrencyLevel(32);
    cacheBuilder.initialCapacity(maxCacheSize);
    cacheBuilder.maximumSize(maxCacheSize);
    Cache<T, Object> localCache = cacheBuilder.build();
    this.cache = localCache.asMap();
}
 
源代码16 项目: registry   文件: SchemaIdVersionTest.java
@Test
public void testEqualityWithCache() throws Exception {
    Cache<SchemaIdVersion, String> cache = CacheBuilder.newBuilder().maximumSize(10).build();
    ConcurrentMap<SchemaIdVersion, String> map = cache.asMap();
    doTestSchemaVersionIdEquality(map);
}