com.google.common.cache.LoadingCache#getUnchecked ( )源码实例Demo

下面列出了com.google.common.cache.LoadingCache#getUnchecked ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: entity-fishing   文件: RelatednessTest.java
@Test
public void whenCacheReachMaxSizeRemove(){
    CacheLoader<String, String> loader;
    loader = new CacheLoader<String, String>() {
        @Override
        public String load(String key) throws Exception {
            return key.toUpperCase();
        }
    };
    LoadingCache<String, String> cache;
    cache = CacheBuilder.newBuilder().maximumSize(3).build(loader);

    cache.getUnchecked("one");
    cache.getUnchecked("two");
    cache.getUnchecked("three");
    cache.getUnchecked("four");

    assertEquals(3, cache.size());

    assertEquals(null, cache.getIfPresent("one"));
    assertEquals("FOUR", cache.getIfPresent("four"));
}
 
源代码2 项目: brooklyn-server   文件: SshMachineLocation.java
protected <T> T execSsh(final Map<String, ?> props, final Function<ShellTool, T> task) {
    final LoadingCache<Map<String, ?>, Pool<SshTool>> sshPoolCache = getSshPoolCache();
    Pool<SshTool> pool = sshPoolCache.getUnchecked(props);
    if (LOG.isTraceEnabled()) {
        LOG.trace("{} execSsh got pool: {}", this, pool);
    }

    if (groovyTruth(props.get(CLOSE_CONNECTION.getName()))) {
        Function<SshTool, T> close = new Function<SshTool, T>() {
            @Override
            public T apply(SshTool input) {
                T result = task.apply(input);
                if (LOG.isDebugEnabled()) {
                    LOG.debug("{} invalidating all sshPoolCache entries: {}", SshMachineLocation.this, sshPoolCache.stats().toString());
                }
                sshPoolCache.invalidateAll();
                sshPoolCache.cleanUp();
                return result;
            }
        };
        return pool.exec(close);
    } else {
        return pool.exec(task);
    }
}
 
源代码3 项目: tutorials   文件: GuavaCacheUnitTest.java
@Test
public void whenCacheReachMaxSize_thenEviction() {
    final CacheLoader<String, String> loader = new CacheLoader<String, String>() {
        @Override
        public final String load(final String key) {
            return key.toUpperCase();
        }
    };
    final LoadingCache<String, String> cache = CacheBuilder.newBuilder().maximumSize(3).build(loader);
    cache.getUnchecked("first");
    cache.getUnchecked("second");
    cache.getUnchecked("third");
    cache.getUnchecked("forth");
    assertEquals(3, cache.size());
    assertNull(cache.getIfPresent("first"));
    assertEquals("FORTH", cache.getIfPresent("forth"));
}
 
源代码4 项目: tutorials   文件: GuavaCacheUnitTest.java
@Test
public void whenEntryIdle_thenEviction() throws InterruptedException {
    final CacheLoader<String, String> loader = new CacheLoader<String, String>() {
        @Override
        public final String load(final String key) {
            return key.toUpperCase();
        }
    };
    final LoadingCache<String, String> cache = CacheBuilder.newBuilder().expireAfterAccess(2, TimeUnit.MILLISECONDS).build(loader);
    cache.getUnchecked("hello");
    assertEquals(1, cache.size());
    cache.getUnchecked("hello");
    Thread.sleep(3);
    cache.getUnchecked("test");
    assertEquals(1, cache.size());
    assertNull(cache.getIfPresent("hello"));
}
 
源代码5 项目: tutorials   文件: GuavaCacheUnitTest.java
@Test
public void whenEntryLiveTimeExpire_thenEviction() throws InterruptedException {
    final CacheLoader<String, String> loader = new CacheLoader<String, String>() {
        @Override
        public final String load(final String key) {
            return key.toUpperCase();
        }
    };
    final LoadingCache<String, String> cache = CacheBuilder.newBuilder().expireAfterWrite(2, TimeUnit.MILLISECONDS).build(loader);
    cache.getUnchecked("hello");
    assertEquals(1, cache.size());
    Thread.sleep(3);
    cache.getUnchecked("test");
    assertEquals(1, cache.size());
    assertNull(cache.getIfPresent("hello"));
}
 
源代码6 项目: tutorials   文件: GuavaCacheUnitTest.java
@Test
public void whenEntryRemovedFromCache_thenNotify() {
    final CacheLoader<String, String> loader = new CacheLoader<String, String>() {
        @Override
        public final String load(final String key) {
            return key.toUpperCase();
        }
    };
    final RemovalListener<String, String> listener = new RemovalListener<String, String>() {
        @Override
        public void onRemoval(final RemovalNotification<String, String> n) {
            if (n.wasEvicted()) {
                final String cause = n.getCause().name();
                assertEquals(RemovalCause.SIZE.toString(), cause);
            }
        }
    };
    final LoadingCache<String, String> cache = CacheBuilder.newBuilder().maximumSize(3).removalListener(listener).build(loader);
    cache.getUnchecked("first");
    cache.getUnchecked("second");
    cache.getUnchecked("third");
    cache.getUnchecked("last");
    assertEquals(3, cache.size());
}
 
源代码7 项目: presto   文件: CachingJdbcClient.java
private static <K, V> V get(LoadingCache<K, V> cache, K key)
{
    try {
        return cache.getUnchecked(key);
    }
    catch (UncheckedExecutionException e) {
        throwIfInstanceOf(e.getCause(), PrestoException.class);
        throw e;
    }
}
 
源代码8 项目: presto   文件: CachingHiveMetastore.java
private static <K, V> V get(LoadingCache<K, V> cache, K key)
{
    try {
        return cache.getUnchecked(key);
    }
    catch (UncheckedExecutionException e) {
        throwIfInstanceOf(e.getCause(), PrestoException.class);
        throw e;
    }
}
 
@Override
public IntCollection greaterOrEqual(int valueId, double max) {
	if (useCache) {
		GreaterOrEqualCall call = new GreaterOrEqualCall(valueId, max);
		LoadingCache<GreaterOrEqualCall, IntCollection> cache = getCache();
		return cache.getUnchecked(call);
	}
	return greaterOrEqual_(valueId, max);
}
 
源代码10 项目: datacollector   文件: JdbcUtil.java
/**
 * Write records to a JDBC destination using the recordWriter specified by key, and handle errors
 *
 * @param recordIterator iterator of SDC records
 * @param key key to select the recordWriter
 * @param recordWriters JDBC record writer cache
 * @param errorRecordHandler error record handler
 * @param perRecord indicate record or batch update
 * @throws StageException
 */
public <T> void write(
    Iterator<Record> recordIterator,
    T key,
    LoadingCache<T, JdbcRecordWriter> recordWriters,
    ErrorRecordHandler errorRecordHandler,
    boolean perRecord
) throws StageException {
  final JdbcRecordWriter jdbcRecordWriter;
  try {
    jdbcRecordWriter = recordWriters.getUnchecked(key);
  } catch (UncheckedExecutionException ex) {
    final Throwable throwable = ex.getCause();
    final ErrorCode errorCode;
    final Object[] messageParams;
    if (throwable instanceof StageException) {
      StageException stageEx = (StageException) ex.getCause();
      errorCode = stageEx.getErrorCode();
      messageParams = stageEx.getParams();
    } else {
      errorCode = JdbcErrors.JDBC_301;
      messageParams = new Object[] {ex.getMessage(), ex.getCause()};
    }
    // Failed to create RecordWriter, report all as error records.
    while (recordIterator.hasNext()) {
      Record record = recordIterator.next();
      errorRecordHandler.onError(new OnRecordErrorException(record, errorCode, messageParams));
    }
    return;
  }
  List<OnRecordErrorException> errors = perRecord
      ? jdbcRecordWriter.writePerRecord(recordIterator)
      : jdbcRecordWriter.writeBatch(recordIterator);

  for (OnRecordErrorException error : errors) {
    errorRecordHandler.onError(error);
  }
}
 
@Test
public void testSubsetOfMapKeys() {
    final Set<String> validKeys = ImmutableSet.of("a", "b", "c");
    LoadingCache<Map<String, Integer>, Map<Integer, String>> keySubset =
            KeyTransformingLoadingCache.from(keyValueSwapCache, new Function<Map<String, Integer>, Map<String, Integer>>() {
                @Override
                public Map<String, Integer> apply(Map<String, Integer> input) {
                    Map<String, Integer> replacement = Maps.newHashMap(input);
                    replacement.keySet().retainAll(validKeys);
                    return replacement;
                }
            });

    Map<Integer, String> output = keySubset.getUnchecked(ImmutableMap.of("a", 1, "b", 2, "d", 4));
    assertEquals(output, ImmutableMap.of(1, "a", 2, "b"));
    assertEquals(keySubset.size(), 1, "Expected cache to contain one value");
    assertEquals(keySubset.stats().loadCount(), 1, "Expected cache to have loaded one value");

    // Check input with different key reducing to same map gives same output
    Map<Integer, String> output2 = keySubset.getUnchecked(ImmutableMap.of("a", 1, "b", 2, "z", 26));
    assertEquals(output2, output);
    assertEquals(keySubset.size(), 1, "Expected cache to contain one value");
    assertEquals(keySubset.stats().loadCount(), 1, "Expected cache to have loaded one value");

    // And
    keySubset.getUnchecked(ImmutableMap.of("c", 3));
    assertEquals(keySubset.size(), 2, "Expected cache to contain two values");
    assertEquals(keySubset.stats().loadCount(), 2, "Expected cache to have loaded a second value");
}
 
源代码12 项目: tchannel-java   文件: PrefixedHeadersCarrier.java
private static Function<String, String> cachingTransformer(
        Function<String, String> transformer
) {
    final LoadingCache<String, String> cache = CacheBuilder.newBuilder()
            .maximumSize(MAX_CACHE_SIZE)
            .build(CacheLoader.from(transformer));
    return new Function<String, String>() {
        @Override
        public String apply(String key) {
            return cache.getUnchecked(key);
        }
    };
}
 
源代码13 项目: molgenis   文件: L3Cache.java
public List<Object> get(Repository<Entity> repository, Query<Entity> query) {
  // Set fetch to null because we are only caching identifiers
  LoadingCache<Query<Entity>, List<Object>> cache = getQueryCache(repository);
  Query<Entity> fetchlessQuery = new QueryImpl<>(query);
  fetchlessQuery.setFetch(null);
  return cache.getUnchecked(fetchlessQuery);
}
 
源代码14 项目: buck   文件: LinkableListFilterFactory.java
@Nonnull
private static Map<BuildTarget, String> getCachedBuildTargetToLinkGroupMap(
    ImmutableList<CxxLinkGroupMapping> mapping, TargetGraph targetGraph) {
  LoadingCache<ImmutableList<CxxLinkGroupMapping>, Map<BuildTarget, String>> groupingCache =
      graphCache.getUnchecked(targetGraph);
  return groupingCache.getUnchecked(mapping);
}
 
源代码15 项目: buck   文件: TargetSpecResolver.java
/**
 * Create {@link TargetSpecResolver instance} using {@link
 * BuildTargetPatternToBuildPackagePathComputation}
 *
 * @param eventBus Event bus to send performance events to
 * @param executor The executor for the {@link GraphTransformationEngine}
 * @param cellProvider Provider to get a cell by path; this is a workaround for the state that
 *     cell itself is not really hashable so we use cell path instead as a key for appropriate
 *     caches
 * @param dirListCachePerRoot Global cache that stores a mapping of cell root path to a cache of
 *     all directory structures under that cell
 * @param fileTreeCachePerRoot Global cache that stores a mapping of cell root path to a cache of
 *     all file tree structures under that cell
 */
public static TargetSpecResolver createWithFileSystemCrawler(
    BuckEventBus eventBus,
    DepsAwareExecutor<? super ComputeResult, ?> executor,
    CellProvider cellProvider,
    LoadingCache<Path, DirectoryListCache> dirListCachePerRoot,
    LoadingCache<Path, FileTreeCache> fileTreeCachePerRoot) {
  // For each cell we create a separate graph engine. The purpose of graph engine is to
  // recursively build a file tree with all files in appropriate cell for appropriate path.
  // This file tree will later be used to resolve target pattern to a list of build files
  // where those targets are defined.
  // For example, for target pattern like //project/folder/... it will return all files and
  // folders
  // under [cellroot]/project/folder recursively as FileTree object. We then traverse FileTree
  // object looking for a build file name in all subfolders recursively.
  // Graph Engines automatically ensures right amount of parallelism and does caching of the data.
  return new TargetSpecResolver(
      eventBus,
      cellProvider,
      (Path cellPath, String buildFileName, ProjectFilesystemView fileSystemView) -> {
        DirectoryListCache dirListCache = dirListCachePerRoot.getUnchecked(cellPath);
        Verify.verifyNotNull(
            dirListCache,
            "Injected directory list cache map does not have cell %s",
            fileSystemView.getRootPath());

        FileTreeCache fileTreeCache = fileTreeCachePerRoot.getUnchecked(cellPath);
        Verify.verifyNotNull(
            fileTreeCache,
            "Injected file tree cache map does not have cell %s",
            fileSystemView.getRootPath());

        return new DefaultGraphTransformationEngine(
            ImmutableList.of(
                new GraphComputationStage<>(
                    BuildTargetPatternToBuildPackagePathComputation.of(
                        buildFileName, fileSystemView)),
                new GraphComputationStage<>(
                    DirectoryListComputation.of(fileSystemView), dirListCache),
                new GraphComputationStage<>(FileTreeComputation.of(), fileTreeCache)),
            16,
            executor);
      });
}