下面列出了怎么用com.google.common.cache.Cache的API类实例代码及写法,或者点击链接到github查看源代码。
public GraphqlHandler(Distributor distributor, URI publicUri) {
this.distributor = Objects.requireNonNull(distributor);
this.publicUri = Objects.requireNonNull(publicUri);
GraphQLSchema schema = new SchemaGenerator()
.makeExecutableSchema(buildTypeDefinitionRegistry(), buildRuntimeWiring());
Cache<String, PreparsedDocumentEntry> cache = CacheBuilder.newBuilder()
.maximumSize(1024)
.build();
graphQl = GraphQL.newGraphQL(schema)
.preparsedDocumentProvider((executionInput, computeFunction) -> {
try {
return cache.get(executionInput.getQuery(), () -> computeFunction.apply(executionInput));
} catch (ExecutionException e) {
if (e.getCause() instanceof RuntimeException) {
throw (RuntimeException) e.getCause();
} else if (e.getCause() != null) {
throw new RuntimeException(e.getCause());
}
throw new RuntimeException(e);
}
})
.build();
}
/**
* Build instance of {@link HMSCache}
* @return {@link HMSCache}
*/
@SuppressWarnings("unchecked")
public HMSCache build() throws StageException {
Utils.checkArgument(
!cacheTypes.isEmpty(),
"Invalid HMSCache Configuration, Should support at least one type of cache"
);
Map<HMSCacheType, Cache<String, Optional<HMSCacheSupport.HMSCacheInfo>>> cacheMap = new HashMap<>();
CacheBuilder cacheBuilder = CacheBuilder.newBuilder();
if (maxCacheSize > 0) {
cacheBuilder.maximumSize(maxCacheSize);
}
for (HMSCacheType type : cacheTypes) {
cacheMap.put(type, cacheBuilder.build());
}
return new HMSCache(cacheMap);
}
@Test
public void testCacheGuava() {
Cache<Object, Integer> cache = CacheBuilder.newBuilder()
.maximumSize(1000)
.expireAfterWrite(10, TimeUnit.MINUTES)
.build();
called=0;
Supplier<Integer> fn = FluentFunctions.of(this::getOne)
.name("myFunction")
.memoize((key,f)->cache.get(key,()->f.apply(key)));
fn.get();
fn.get();
fn.get();
assertThat(called,equalTo(1));
}
@Override
public void clearTableFromCache(RpcController controller, ClearTableFromCacheRequest request,
RpcCallback<ClearTableFromCacheResponse> done) {
byte[] schemaName = request.getSchemaName().toByteArray();
byte[] tableName = request.getTableName().toByteArray();
try {
byte[] tenantId = request.getTenantId().toByteArray();
byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
Cache<ImmutableBytesPtr, PTable> metaDataCache =
GlobalCache.getInstance(this.env).getMetaDataCache();
metaDataCache.invalidate(cacheKey);
} catch (Throwable t) {
logger.error("incrementTableTimeStamp failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
}
}
public RemoveEntryTask(final Cache<Long, byte[]> payloadCache,
final PublishPayloadLocalPersistence localPersistence,
final BucketLock bucketLock,
final Queue<RemovablePayload> removablePayloads,
final long removeDelay,
final ConcurrentHashMap<Long, AtomicLong> referenceCounter,
final long taskMaxDuration) {
this.payloadCache = payloadCache;
this.localPersistence = localPersistence;
this.bucketLock = bucketLock;
this.removablePayloads = removablePayloads;
this.removeDelay = removeDelay;
this.referenceCounter = referenceCounter;
this.taskMaxDuration = taskMaxDuration;
}
@Test
public void getCache()
{
String cacheName = "test";
final Cache<Object, Object> cache = new NullCache<>();
final CacheProvider virtualHost = mock(CacheProvider.class, withSettings().extraInterfaces(VirtualHost.class));
when(virtualHost.getNamedCache(cacheName)).thenReturn(cache);
final Subject subject = new Subject();
subject.getPrincipals().add(new VirtualHostPrincipal((VirtualHost<?>) virtualHost));
subject.setReadOnly();
Cache<String, String> actualCache = Subject.doAs(subject,
(PrivilegedAction<Cache<String, String>>) () -> CacheFactory.getCache(cacheName,
null));
assertSame(actualCache, cache);
verify(virtualHost).getNamedCache(cacheName);
}
@Override
public Set<ClientData> getListeningClientsData(ConfigMeta meta) {
Cache<String, Long> cache = onlineClients.get(meta);
if (cache == null) {
return ImmutableSet.of();
}
Set<ClientData> clients = Sets.newHashSetWithExpectedSize((int)cache.size());
for (Map.Entry<String, Long> entry : cache.asMap().entrySet()) {
String ip = entry.getKey();
long version = entry.getValue();
ClientData client = new ClientData(ip, version);
clients.add(client);
}
return clients;
}
@Override
public void run() {
try {
String oldName = Thread.currentThread().getName();
Thread.currentThread().setName("qconfig-config-listener-clearUp");
try {
for (Cache<Listener, Listener> cache : listenerMappings.values()) {
cache.cleanUp();
}
} finally {
Thread.currentThread().setName(oldName);
}
} catch (Exception e) {
logger.error("schedule listener clear up error", e);
}
}
@SuppressWarnings("unchecked")
public <T> T get(String cacheName, String key) {
try {
Cache<String, Object> cache = getCacheHolder(cacheName);
if (cache != null) {
Object result = cache.get(key, new Callable<Object>() {
@Override
public Object call() throws Exception {
return _NULL;
}
});
if (result != null && !_NULL.equals(result)) {
return (T) result;
}
}
} catch (Exception e) {
logger.warn("get LEVEL1 cache error", e);
}
return null;
}
@Override
public void processRecord(MetricsRecord record) {
final String[] sources = MetricsUtil.splitRecordSource(record);
if (sources.length > 2) {
final String source = String.format("%s/%s/%s", getTopologyName(), sources[1], sources[2]);
Map<String, Double> sourceCache = metricsCache.getIfPresent(source);
if (sourceCache == null) {
final Cache<String, Double> newSourceCache = createCache();
sourceCache = newSourceCache.asMap();
}
sourceCache.putAll(processMetrics(record.getMetrics()));
metricsCache.put(source, sourceCache);
} else {
LOG.log(Level.SEVERE, "Unexpected metrics source: " + record.getSource());
}
}
@Test
public void testCacheNewEntries() throws StageException {
SchemaGeneratorConfig config = new SchemaGeneratorConfig();
config.enableCache = true;
config.cacheKeyExpression = "key"; // Constant
ProcessorRunner runner = new ProcessorRunner.Builder(SchemaGeneratorDProcessor.class, new SchemaGeneratorProcessor(config))
.addOutputLane("a")
.build();
runner.runInit();
// Cache is empty at the begging
Cache<String, String> cache = (Cache<String, String>) runner.getContext().getStageRunnerSharedMap().get(SchemaGeneratorProcessor.CACHE_KEY);
Assert.assertNotNull(cache);
Assert.assertNull(cache.getIfPresent("key"));
Record record = RecordCreator.create();
record.set(Field.create(Field.Type.LIST_MAP, ImmutableMap.of(
"a", Field.create(Field.Type.STRING, "Arvind")
)));
runner.runProcess(ImmutableList.of(record));
// Now a new key should exist for this record in teh cache
Assert.assertNotNull(cache.getIfPresent("key"));
}
public HandlerToPreparePlan(
QueryContext context,
SqlNode sqlNode,
SqlToPlanHandler handler,
Cache<Long, PreparedPlan> planCache,
String sql,
AttemptObserver observer,
SqlHandlerConfig config) {
this.context = context;
this.sqlNode = sqlNode;
this.handler = handler;
this.planCache = planCache;
this.sql = sql;
this.observer = observer;
this.config = config;
}
private CompletableFuture<ResolverResult> resolveWithCache(@Nonnull FDBRecordContext context,
@Nonnull ScopedValue<String> scopedName,
@Nonnull Cache<ScopedValue<String>, ResolverResult> directoryCache,
@Nonnull ResolverCreateHooks hooks) {
ResolverResult value = directoryCache.getIfPresent(scopedName);
if (value != null) {
return CompletableFuture.completedFuture(value);
}
return context.instrument(
FDBStoreTimer.Events.DIRECTORY_READ,
runAsyncBorrowingReadVersion(context, childContext -> readOrCreateValue(childContext, scopedName.getData(), hooks))
).thenApply(fetched -> {
directoryCache.put(scopedName, fetched);
return fetched;
});
}
/**
* @param args
*/
public static void main(String[] args) {
Cache<String, String> makeMap = CacheBuilder.newBuilder().weakKeys().maximumSize(10).build();
for (int i = 0; i < 7; i++) {
makeMap.put("a" + i, "V" + i);
}
System.out.println(Joiner.on(", ").withKeyValueSeparator("=").join(makeMap.asMap()));
for (int i = 0; i < 1; i++) {
makeMap.put("b" + i, "V" + i);
}
System.out.println(Joiner.on(", ").withKeyValueSeparator("=").join(makeMap.asMap()));
System.out.println(makeMap.asMap().containsKey("a1"));
System.out.println(makeMap.asMap().containsKey("a4"));
System.out.println(makeMap.asMap().containsKey("a5"));
System.out.println(makeMap.asMap().get("a1"));
System.out.println(makeMap.asMap().get("a4"));
System.out.println(makeMap.asMap().get("a5"));
}
@Test
public void str8Caching()
{
String testString = "Test";
BBEncoder encoder = new BBEncoder(64);
encoder.writeStr8(testString);
encoder.writeStr8(testString);
ByteBuffer buffer = encoder.buffer();
BBDecoder decoder = new BBDecoder();
decoder.init(buffer);
Cache<Binary, String> original = BBDecoder.getStringCache();
Cache<Binary, String> cache = CacheBuilder.newBuilder().maximumSize(2).build();
try
{
BBDecoder.setStringCache(cache);
String decodedString1 = decoder.readStr8();
String decodedString2 = decoder.readStr8();
assertThat(testString, is(equalTo(decodedString1)));
assertThat(testString, is(equalTo(decodedString2)));
assertSame(decodedString1, decodedString2);
}
finally
{
cache.cleanUp();
BBDecoder.setStringCache(original);
}
}
private static void getNodeHashMap(Long id, GraphDatabaseService gdb, Cache<Long, HashMap<String, Object>> cache) {
Node thisNode = gdb.getNodeById(id);
List<String> keys = new ArrayList<>();
HashMap<String, Object> nodeMap = new HashMap<>();
IteratorUtil.addToCollection(thisNode.getPropertyKeys(), keys)
.stream()
.forEach(n -> nodeMap.put(n, thisNode.getProperty(n)));
nodeMap.put("id", id);
cache.put(id, nodeMap);
}
private <T> Cache<String, T> newCache() {
Cache<String, T> cache = CacheBuilder.newBuilder()
.maximumSize(m_configUtil.getMaxConfigCacheSize())
.expireAfterAccess(m_configUtil.getConfigCacheExpireTime(), m_configUtil.getConfigCacheExpireTimeUnit())
.build();
allCaches.add(cache);
return cache;
}
private ManagedCache(Cache<String, Object> cache, String name, long expiryMs, long maxItems) {
this.cache = cache;
this.name = name;
this.expiry = expiryMs;
this.maxItems = maxItems;
if (counter.incrementAndGet() == 1) {
// clean up cache every 5 minutes to promote garbage collection
CleanUp cleanUp = new CleanUp();
cleanUp.start();
}
}
@Provides
@ElasticsearchScope
public RateLimitedCache<Pair<String, HashCode>> writeCache(final HeroicReporter reporter) {
final Cache<Pair<String, HashCode>, Boolean> cache = CacheBuilder
.newBuilder()
.concurrencyLevel(writeCacheConcurrency)
.maximumSize(writeCacheMaxSize)
.expireAfterWrite(writeCacheDurationMinutes, MINUTES)
.build();
reporter.registerCacheSize("elasticsearch-suggest-write-through", cache::size);
if (writesPerSecond <= 0d) {
return new DisabledRateLimitedCache<>(cache.asMap());
}
if (distributedCacheSrvRecord.length() > 0) {
return new DistributedRateLimitedCache<>(
cache.asMap(),
RateLimiter.create(writesPerSecond, rateLimitSlowStartSeconds, SECONDS),
MemcachedConnection.create(distributedCacheSrvRecord),
toIntExact(Duration.of(writeCacheDurationMinutes, MINUTES).convert(SECONDS)),
reporter.newMemcachedReporter("suggest")
);
}
return new DefaultRateLimitedCache<>(cache.asMap(),
RateLimiter.create(writesPerSecond, rateLimitSlowStartSeconds, SECONDS));
}
/**
* Implements a threadsafe addition to the message map
*/
private void addMessageToMap(TransferableChatMessage msg) {
String channelId = msg.getChannelId();
//as guava cache is synchronized, maybe this is not necessary
synchronized (messageMap){
//get all users (sessions) present in the channel where the message goes to
Cache<String, TransferableChatMessage> sessionsInChannel = heartbeatMap.getIfPresent(channelId);
if(sessionsInChannel != null) {
for(String sessionId : sessionsInChannel.asMap().keySet()) {
TransferableChatMessage tcm = sessionsInChannel.getIfPresent(sessionId);
String sessionKey = tcm.getId();
try {
Map<String, List<TransferableChatMessage>> channelMap = messageMap.get(sessionKey, () -> {
return new HashMap<String, List<TransferableChatMessage>>();
});
if(channelMap.get(channelId) == null) {
channelMap.put(channelId, new ArrayList<TransferableChatMessage>());
}
channelMap.get(channelId).add(msg);
log.debug("Added chat message to channel={}, sessionKey={}", channelId, sessionKey);
} catch(Exception e){
log.warn("Failed to add chat message to channel={}, sessionKey={}", channelId, sessionKey);
}
}
}
}
}
void prepareForExecution(
Reporter reporter,
Executor executor,
OptionsProvider options,
ActionCacheChecker actionCacheChecker,
TopDownActionCache topDownActionCache,
OutputService outputService) {
this.reporter = Preconditions.checkNotNull(reporter);
this.executorEngine = Preconditions.checkNotNull(executor);
this.progressSuppressingEventHandler = new ProgressSuppressingEventHandler(reporter);
// Start with a new map each build so there's no issue with internal resizing.
this.buildActionMap = Maps.newConcurrentMap();
this.completedAndResetActions = Sets.newConcurrentHashSet();
this.lostDiscoveredInputsMap = Maps.newConcurrentMap();
this.hadExecutionError = false;
this.actionCacheChecker = Preconditions.checkNotNull(actionCacheChecker);
this.topDownActionCache = topDownActionCache;
// Don't cache possibly stale data from the last build.
this.options = options;
// Cache some option values for performance, since we consult them on every action.
this.useAsyncExecution = options.getOptions(BuildRequestOptions.class).useAsyncExecution;
this.finalizeActions = options.getOptions(BuildRequestOptions.class).finalizeActions;
this.replayActionOutErr = options.getOptions(BuildRequestOptions.class).replayActionOutErr;
this.outputService = outputService;
RemoteOptions remoteOptions = options.getOptions(RemoteOptions.class);
this.bazelRemoteExecutionEnabled = remoteOptions != null && remoteOptions.isRemoteEnabled();
Cache<PathFragment, Boolean> cache =
CacheBuilder.from(options.getOptions(BuildRequestOptions.class).directoryCreationCacheSpec)
.concurrencyLevel(Runtime.getRuntime().availableProcessors())
.build();
this.knownRegularDirectories = Collections.newSetFromMap(cache.asMap());
}
public static BzlLoadFunction create(
RuleClassProvider ruleClassProvider,
PackageFactory packageFactory,
DigestHashFunction digestHashFunction,
Cache<Label, ASTFileLookupValue> astFileLookupValueCache) {
return new BzlLoadFunction(
ruleClassProvider,
packageFactory,
// When we are not inlining BzlLoadValue nodes, there is no need to have separate
// ASTFileLookupValue nodes for bzl files. Instead we inline ASTFileLookupFunction for a
// strict memory win, at a small code complexity cost.
//
// Detailed explanation:
// (1) The ASTFileLookupValue node for a bzl file is used only for the computation of
// that file's BzlLoadValue node. So there's no concern about duplicate work that would
// otherwise get deduped by Skyframe.
// (2) ASTFileLookupValue doesn't have an interesting equality relation, so we have no
// hope of getting any interesting change-pruning of ASTFileLookupValue nodes. If we
// had an interesting equality relation that was e.g. able to ignore benign
// whitespace, then there would be a hypothetical benefit to having separate
// ASTFileLookupValue nodes (e.g. on incremental builds we'd be able to not re-execute
// top-level code in bzl files if the file were reparsed to an equivalent AST).
// (3) A ASTFileLookupValue node lets us avoid redoing work on a BzlLoadFunction Skyframe
// restart, but we can also achieve that result ourselves with a cache that persists between
// Skyframe restarts.
//
// Therefore, ASTFileLookupValue nodes are wasteful from two perspectives:
// (a) ASTFileLookupValue contains a StarlarkFile, and that business object is really
// just a temporary thing for bzl execution. Retaining it forever is pure waste.
// (b) The memory overhead of the extra Skyframe node and edge per bzl file is pure
// waste.
new InliningAndCachingASTManager(
ruleClassProvider, digestHashFunction, astFileLookupValueCache),
/*cachedBzlLoadDataManager=*/ null);
}
private ConcurrentMap<T, Result> createCache(int maxCacheSize) {
final CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder();
cacheBuilder.concurrencyLevel(64);
cacheBuilder.initialCapacity(maxCacheSize);
cacheBuilder.maximumSize(maxCacheSize);
Cache<T, Result> localCache = cacheBuilder.build();
ConcurrentMap<T, Result> cache = localCache.asMap();
return cache;
}
@Test
public void testIsCached() {
final Cache<String, Boolean> cache =
CacheBuilder.newBuilder().expireAfterWrite(Duration.ofMinutes(1)).build();
final WriteCache expiringCache = new ExpiringCache(cache);
assertFalse(expiringCache.checkCacheOrSet(Utils.makeMetric()));
assertTrue(expiringCache.checkCacheOrSet(Utils.makeMetric()));
}
@Override
public Set<String> getListeningClients(ConfigMeta meta) {
Cache<String, Long> cache = onlineClients.get(meta);
if (cache == null) {
return ImmutableSet.of();
}
Set<String> clients = Sets.newHashSetWithExpectedSize((int) cache.size());
for (String ip : cache.asMap().keySet()) {
clients.add(ip);
}
return clients;
}
@Override
public void destroy() {
if (sshTunnelService != null){
sshTunnelService.stop();
}
boolean interrupted = shutdownExecutorIfNeeded();
//Invalidate all the thread cache so that all statements/result sets are properly closed.
toBeInvalidatedThreadCaches.forEach(Cache::invalidateAll);
//Closes all connections
Optional.ofNullable(connectionManager).ifPresent(ConnectionManager::closeAll);
jdbcUtil.closeQuietly(hikariDataSource);
if (interrupted) {
Thread.currentThread().interrupt();
}
}
private <K, V> Response<V> getResponse(Key<K> key, Cache<Key<K>, Response<V>> cache, Factory<Response<V>> responseFactory, Transformer<Key<K>, ? super Response<V>> keyGenerator) {
Response<V> response = key == null ? null : cache.getIfPresent(key);
if (response != null) {
return response;
} else {
response = responseFactory.create();
if (!response.isError()) {
Key<K> actualKey = keyGenerator.transform(response);
cache.put(actualKey, response);
}
return response;
}
}
@Modified
public void modified(ComponentContext context) {
readComponentConfiguration(context);
// Reset Cache and copy all.
Cache<Long, SettableFuture<CompletedBatchOperation>> prevFutures = pendingFutures;
pendingFutures = CacheBuilder.newBuilder()
.expireAfterWrite(pendingFutureTimeoutMinutes, TimeUnit.MINUTES)
.removalListener(new TimeoutFuture())
.build();
pendingFutures.putAll(prevFutures.asMap());
}
private Schema<?> get(final Class<?> cls, Cache<Class<?>, Schema<?>> cache) {
try {
return cache.get(cls, new Callable() {
@Override
public Object call() throws Exception {
return RuntimeSchema.createFrom(cls);
}
});
} catch (ExecutionException e) {
return null;
}
}
/**
* Obtain a ManagedCache instance
*
* @param name of cache store
* @param expiryMs in milliseconds
* @param maxItems maximum number of cached objects
* @return cache instance
*/
public synchronized static ManagedCache createCache(String name, long expiryMs, long maxItems) {
ManagedCache managedCache = getInstance(name);
if (managedCache != null) {
return managedCache;
}
long expiryTimer = Math.max(expiryMs, MIN_EXPIRY);
Cache<String, Object> cache = CacheBuilder.newBuilder().maximumSize(maxItems).expireAfterWrite(expiryTimer, TimeUnit.MILLISECONDS).build();
// create cache
managedCache = new ManagedCache(cache, name, expiryTimer, maxItems);
cacheCollection.put(name, managedCache);
log.info("Created cache ({}), expiry {} ms, maxItems={}", name, expiryTimer, maxItems);
return managedCache;
}