下面列出了com.google.inject.PrivateModule#com.google.common.base.Suppliers 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Override
public void refresh() {
_cache = Suppliers.memoizeWithExpiration(new Supplier<CachedInfo>() {
@Override
public CachedInfo get() {
Map<String, DataCenter> dataCenters = _dataCenterDao.loadAll();
if (!_ignoredDataCenters.isEmpty()) {
ImmutableMap.Builder<String, DataCenter> dataCentersBuilder = ImmutableMap.builder();
for (Map.Entry<String, DataCenter> entry : dataCenters.entrySet()) {
if (!_ignoredDataCenters.contains(entry.getKey())) {
dataCentersBuilder.put(entry);
} else if (_loggedIgnored.add(entry.getKey())) {
// Only want to log that we're ignoring the data center once
_log.info("Ignoring data center: {}", entry.getKey());
}
}
dataCenters = dataCentersBuilder.build();
}
return new CachedInfo(dataCenters);
}
}, 10, TimeUnit.MINUTES);
}
public void testRhombus() throws Exception {
getTaskManager().getAlgorithmCollection().getRecalculateTaskScheduleAlgorithm().setEnabled(false);
Task[] tasks = new Task[] {
createTask(TestSetupHelper.newMonday()), createTask(TestSetupHelper.newMonday()), createTask(TestSetupHelper.newMonday()),
createTask(TestSetupHelper.newMonday()), createTask(TestSetupHelper.newMonday())};
TaskDependency[] deps = new TaskDependency[] {
createDependency(tasks[4], tasks[3]),
createDependency(tasks[4], tasks[2]),
createDependency(tasks[2], tasks[1]),
createDependency(tasks[1], tasks[0]),
createDependency(tasks[3], tasks[0])
};
DependencyGraph graph = createGraph(tasks, deps);
SchedulerImpl scheduler = new SchedulerImpl(graph, Suppliers.ofInstance(getTaskManager().getTaskHierarchy()));
scheduler.run();
assertEquals(TestSetupHelper.newMonday(), tasks[0].getStart());
assertEquals(TestSetupHelper.newTuesday(), tasks[1].getStart());
assertEquals(TestSetupHelper.newTuesday(), tasks[3].getStart());
assertEquals(TestSetupHelper.newWendesday(), tasks[2].getStart());
assertEquals(TestSetupHelper.newThursday(), tasks[4].getStart());
}
public void testEarliestStartEarlierThanStartDate() {
getTaskManager().getAlgorithmCollection().getRecalculateTaskScheduleAlgorithm().setEnabled(false);
// start date on Mo, but earliest start is set to Fr (previous week).
// task should be shifted backwards because there are no other constraints
Task[] tasks = new Task[] {createTask(TestSetupHelper.newMonday())};
tasks[0].setThirdDateConstraint(TaskImpl.EARLIESTBEGIN);
tasks[0].setThirdDate(TestSetupHelper.newFriday());
TaskDependency[] deps = new TaskDependency[0];
DependencyGraph graph = createGraph(tasks, deps);
SchedulerImpl scheduler = new SchedulerImpl(graph, Suppliers.ofInstance(getTaskManager().getTaskHierarchy()));
scheduler.run();
assertEquals(TestSetupHelper.newFriday(), tasks[0].getStart());
}
public HybridEngineFactory(List<String> wfPackges) {
super(wfPackges);
timeoutManager = Suppliers.memoize(new Supplier<TimeoutManager>() {
@Override
public TimeoutManager get() {
logger.info("Creating TimeoutManager...");
return createTimeoutManager();
}
});
storage = Suppliers.memoize(new Supplier<Storage>() {
@Override
public Storage get() {
logger.info("Creating Storage...");
return createStorage();
}
});
}
@Test
public void testPollSkipsEmptyChannels() {
EventReaderDAO readerDao = mock(EventReaderDAO.class);
EventStore eventStore = new DefaultEventStore(readerDao, mock(EventWriterDAO.class), new AstyanaxEventIdSerializer(), new MockClaimStore());
DedupQueue q = new DedupQueue("test-queue", "read", "write",
mock(QueueDAO.class), eventStore, Suppliers.ofInstance(true), mock(ScheduledExecutorService.class), getPersistentSortedQueueFactory(),
mock(MetricRegistry.class));
q.startAndWait();
// The first poll checks the read channel, find it empty, checks the write channel.
q.poll(Duration.ofSeconds(30), new SimpleEventSink(10));
verify(readerDao).readNewer(eq("read"), Matchers.<EventSink>any());
verify(readerDao).readNewer(eq("write"), Matchers.<EventSink>any());
verifyNoMoreInteractions(readerDao);
reset(readerDao);
// Subsequent polls w/in a short window skips the poll operations.
q.poll(Duration.ofSeconds(30), new SimpleEventSink(10));
verifyNoMoreInteractions(readerDao);
}
public LocationTemplateContext(
TemplateContext delegate,
RuleContext ruleContext,
@Nullable ImmutableMap<Label, ImmutableCollection<Artifact>> labelMap,
boolean execPaths,
boolean allowData,
boolean windowsPath) {
this(
delegate,
ruleContext.getLabel(),
// Use a memoizing supplier to avoid eagerly building the location map.
Suppliers.memoize(
() -> LocationExpander.buildLocationMap(ruleContext, labelMap, allowData)),
execPaths,
ruleContext.getRule().getPackage().getRepositoryMapping(),
windowsPath);
}
public UnitTestCassandraEngineFactory(boolean truncate) {
super(Arrays.asList("org.copperengine.core.persistent.cassandra.workflows"));
this.truncate = truncate;
backchannel = Suppliers.memoize(new Supplier<Backchannel>() {
@Override
public Backchannel get() {
return new BackchannelDefaultImpl();
}
});
dummyResponseSender = Suppliers.memoize(new Supplier<DummyResponseSender>() {
@Override
public DummyResponseSender get() {
return new DummyResponseSender(scheduledExecutorService.get(), engine.get());
}
});
dependencyInjector.get().register("dummyResponseSender", new Supplier2Provider<>(dummyResponseSender));
dependencyInjector.get().register("backchannel", new Supplier2Provider<>(backchannel));
}
protected ZabbixFeed(final Builder<? extends ZabbixFeed, ?> builder) {
setConfig(BASE_URI_PROVIDER, builder.baseUriProvider);
if (builder.baseUri != null) {
if (builder.baseUriProvider != null) {
throw new IllegalStateException("Not permitted to supply baseUri and baseUriProvider");
}
setConfig(BASE_URI_PROVIDER, Suppliers.ofInstance(builder.baseUri));
} else {
setConfig(BASE_URI_PROVIDER, checkNotNull(builder.baseUriProvider, "baseUriProvider and baseUri"));
}
setConfig(GROUP_ID, checkNotNull(builder.groupId, "Zabbix groupId must be set"));
setConfig(TEMPLATE_ID, checkNotNull(builder.templateId, "Zabbix templateId must be set"));
setConfig(UNIQUE_HOSTNAME_GENERATOR, checkNotNull(builder.uniqueHostnameGenerator, "uniqueHostnameGenerator"));
Set<ZabbixPollConfig<?>> polls = Sets.newLinkedHashSet();
for (ZabbixPollConfig<?> config : builder.polls) {
if (!config.isEnabled()) continue;
@SuppressWarnings({ "unchecked", "rawtypes" })
ZabbixPollConfig<?> configCopy = new ZabbixPollConfig(config);
if (configCopy.getPeriod() < 0) configCopy.period(builder.period, builder.periodUnits);
polls.add(configCopy);
}
setConfig(POLLS, polls);
initUniqueTag(builder.uniqueTag, polls);
}
public void testFindHighestRevision_nonExistentHashThrows() throws Exception {
GitClonedRepository mockRepo = mockClonedRepo(repositoryName);
expectLogCommand(mockRepo, LOG_FORMAT_COMMIT_ID, "bogusHash")
.andThrow(
new CommandException(
"git",
ImmutableList.<String>of("mock args"),
"mock stdout",
"mock stderr: unknown revision",
1));
control.replay();
try {
GitRevisionHistory rh = new GitRevisionHistory(Suppliers.ofInstance(mockRepo));
rh.findHighestRevision("bogusHash");
fail("'git log' didn't fail on bogus hash ID");
} catch (MoeProblem expected) {
}
control.verify();
}
public LoadTestCassandraEngineFactory() {
super(Arrays.asList("org.copperengine.core.persistent.cassandra.loadtest.workflows"));
super.setCassandraHosts(Arrays.asList("nuc1.scoop-gmbh.de"));
backchannel = Suppliers.memoize(new Supplier<Backchannel>() {
@Override
public Backchannel get() {
return new BackchannelDefaultImpl();
}
});
dummyResponseSender = Suppliers.memoize(new Supplier<DummyResponseSender>() {
@Override
public DummyResponseSender get() {
return new DummyResponseSender(scheduledExecutorService.get(), engine.get());
}
});
dependencyInjector.get().register("dummyResponseSender", new Supplier2Provider<>(dummyResponseSender));
dependencyInjector.get().register("backchannel", new Supplier2Provider<>(backchannel));
}
@Override
protected EntitySpec<?> getFirstMemberSpec() {
final EntitySpec<?> firstMemberSpec = super.getFirstMemberSpec();
if (firstMemberSpec != null) {
return applyDefaults(firstMemberSpec, Suppliers.ofInstance(MASTER_SERVER_ID), MASTER_CONFIG_URL);
}
final EntitySpec<?> memberSpec = super.getMemberSpec();
if (memberSpec != null) {
return applyDefaults(memberSpec, Suppliers.ofInstance(MASTER_SERVER_ID), MASTER_CONFIG_URL);
}
return EntitySpec.create(MySqlNode.class)
.displayName("MySql Master")
.configure(MySqlNode.MYSQL_SERVER_ID, MASTER_SERVER_ID)
.configure(MySqlNode.TEMPLATE_CONFIGURATION_URL, MASTER_CONFIG_URL);
}
@Autowired
public ItemController(ItemService itemService)
{
this.itemService = itemService;
memoizedPrices = Suppliers.memoizeWithExpiration(() -> new MemoizedPrices(itemService.fetchPrices().stream()
.map(priceEntry ->
{
ItemPrice itemPrice = new ItemPrice();
itemPrice.setId(priceEntry.getItem());
itemPrice.setName(priceEntry.getName());
itemPrice.setPrice(priceEntry.getPrice());
itemPrice.setTime(priceEntry.getTime());
return itemPrice;
})
.toArray(ItemPrice[]::new)), 30, TimeUnit.MINUTES);
}
Write getWrite(
String resourceName, long expectedSize, boolean autoflush, RequestMetadata requestMetadata) {
return new StubWriteOutputStream(
() ->
deadlined(bsBlockingStub).withInterceptors(attachMetadataInterceptor(requestMetadata)),
Suppliers.memoize(
() ->
ByteStreamGrpc.newStub(channel)
.withInterceptors(
attachMetadataInterceptor(
requestMetadata))), // explicitly avoiding deadline due to client
// cancellation determination
resourceName,
expectedSize,
autoflush);
}
@Override
protected void configure() {
bind(CassandraFactory.class).asEagerSingleton();
// Event Store
bind(ChannelConfiguration.class).to(QueueChannelConfiguration.class).asEagerSingleton();
bind(CuratorFramework.class).annotatedWith(EventStoreZooKeeper.class).to(Key.get(CuratorFramework.class, QueueZooKeeper.class));
bind(HostDiscovery.class).annotatedWith(EventStoreHostDiscovery.class).to(Key.get(HostDiscovery.class, DedupQueueHostDiscovery.class));
bind(DedupEventStoreChannels.class).toInstance(DedupEventStoreChannels.isolated("__dedupq_write:", "__dedupq_read:"));
bind(new TypeLiteral<Supplier<Boolean>>() {}).annotatedWith(DedupEnabled.class).toInstance(Suppliers.ofInstance(true));
install(new EventStoreModule("bv.emodb.queue", _metricRegistry));
// Bind the Queue instance that the rest of the application will consume
bind(QueueService.class).to(DefaultQueueService.class).asEagerSingleton();
expose(QueueService.class);
// Bind the DedupQueue instance that the rest of the application will consume
bind(DedupQueueService.class).to(DefaultDedupQueueService.class).asEagerSingleton();
expose(DedupQueueService.class);
}
VHUPackingInfo(final I_M_HU vhu)
{
Check.assumeNotNull(vhu, "Parameter vhu is not null");
huProductStorageSupplier = Suppliers.memoize(() -> {
final List<IHUProductStorage> productStorages = Services.get(IHandlingUnitsBL.class)
.getStorageFactory()
.getStorage(vhu)
.getProductStorages();
if (productStorages.size() == 1)
{
return productStorages.get(0);
}
else
{
return null;
}
});
}
public DashboardResource(String defaultDashboard,
HostAndPort breakerboxHostAndPort,
Set<String> specifiedMetaClusters) {
this.defaultDashboard = defaultDashboard;
this.breakerboxHostAndPort = breakerboxHostAndPort;
this.specifiedMetaClusters = specifiedMetaClusters;
indexSupplier = Suppliers.memoize(
() -> {
try {
return Resources.asByteSource(Resources.getResource("index.html"))
.read();
} catch (IOException err) {
throw new IllegalStateException(err);
}
});
}
public void testFindHeadRevisions() throws Exception {
HgClonedRepository mockRepo = mockClonedRepo(MOCK_REPO_NAME);
expect(
cmd.runCommand(
CLONE_TEMP_DIR,
"hg",
ImmutableList.of("heads", "mybranch", "--template={node} {branch}\n")))
.andReturn("mockChangesetID1 branch1\nmockChangesetID2 branch2\nmockChangesetID3 unused");
control.replay();
HgRevisionHistory rh = new HgRevisionHistory(cmd, HG_CMD, Suppliers.ofInstance(mockRepo));
ImmutableList<Revision> revs = ImmutableList.copyOf(rh.findHeadRevisions());
assertEquals(MOCK_REPO_NAME, revs.get(0).repositoryName());
assertEquals("mockChangesetID1", revs.get(0).revId());
assertEquals(MOCK_REPO_NAME, revs.get(1).repositoryName());
assertEquals("mockChangesetID2", revs.get(1).revId());
control.verify();
}
@Override
public Supplier<T> get()
{
if (retVal != null) {
return retVal;
}
try {
final T config = configurator.configurate(props, propertyBase, classToProvide);
retVal = Suppliers.ofInstance(config);
}
catch (RuntimeException e) {
// When a runtime exception gets thrown out, this provider will get called again if the object is asked for again.
// This will have the same failed result, 'cause when it's called no parameters will have actually changed.
// Guice will then report the same error multiple times, which is pretty annoying. Cache a null supplier and
// return that instead. This is technically enforcing a singleton, but such is life.
retVal = Suppliers.ofInstance(null);
throw e;
}
return retVal;
}
@Override
public Iterator<Map.Entry<String, MaintenanceOp>> listMaintenanceOps() {
final Iterator<Map<String, Object>> tableIter =
_backingStore.scan(_systemTable, null, LimitCounter.max(), ReadConsistency.STRONG);
final Supplier<List<TableEventDatacenter>> tableEventDatacenterSupplier = Suppliers.memoize(this::getTableEventDatacenters);
return new AbstractIterator<Map.Entry<String, MaintenanceOp>>() {
@Override
protected Map.Entry<String, MaintenanceOp> computeNext() {
while (tableIter.hasNext()) {
TableJson json = new TableJson(tableIter.next());
MaintenanceOp op = getNextMaintenanceOp(json, false/*don't expose task outside this class*/, tableEventDatacenterSupplier);
if (op != null) {
return Maps.immutableEntry(json.getTable(), op);
}
}
return endOfData();
}
};
}
@Nonnull
public Collection<VcsRef> getRefs() {
return new AbstractCollection<VcsRef>() {
private final Supplier<Collection<VcsRef>> myLoadedRefs =
Suppliers.memoize(() -> CompressedRefs.this.stream().collect(Collectors.toList()));
@Nonnull
@Override
public Iterator<VcsRef> iterator() {
return myLoadedRefs.get().iterator();
}
@Override
public int size() {
return myLoadedRefs.get().size();
}
};
}
@Before
public void setUp() {
// TODO fake supplier
devices = CachedAddressableListSource.get(
ImmutableList.<String>of(),
Suppliers.ofInstance(
Futures.<List<DeviceModel>>succeededFuture(ImmutableList.<DeviceModel>of())
)
);
source = new SettableModelSource<>();
controller = new SecurityDeviceStatusController(SecuritySubsystem.ALARMMODE_ON, source, devices);
}
private Supplier<byte[]> initializeAlignmentSupplier(){
return Suppliers.memoize(() ->{
byte[] alignment = null;
String alignmentString = getAlignmentString();
if (alignmentString != null) {
String[] tokens = alignmentString.split("[-\\s]+");
alignment = new byte[tokens.length];
for (int i = 0; i < tokens.length; i++)
alignment[i] = (byte) Short.parseShort(tokens[i]);
}
return alignment;
});
}
public void testFindHighestRevision() throws Exception {
GitClonedRepository mockRepo = mockClonedRepo(repositoryName);
expectLogCommand(mockRepo, LOG_FORMAT_COMMIT_ID, "HEAD").andReturn("mockHashID");
control.replay();
GitRevisionHistory rh = new GitRevisionHistory(Suppliers.ofInstance(mockRepo));
Revision rev = rh.findHighestRevision(null);
assertEquals(repositoryName, rev.repositoryName());
assertEquals("mockHashID", rev.revId());
control.verify();
}
/**
* A test for finding the last equivalence for the following history starting
* with repo2{4} and <em>only searching linear history</em> instead of following multi-parent
* commits:<pre>
*
* _____
* | |
* | 4 |
* |_____|
* | \
* | \
* | \
* __|__ \_____
* | | | |
* | 3a | | 3b |
* |_____| |_____|
* | /
* | /
* | /
* ____ __|__/
* | | | |
* |1002|=====================| 2 |
* |____| |_____|
*
* repo1 repo2
* </pre>
*
* @throws Exception
*/
public void testFindLastEquivalence_linearSearch() throws Exception {
GitClonedRepository mockRepo = mockClonedRepo("repo2");
expectLogCommandIgnoringMissing(mockRepo, LOG_FORMAT_ALL_METADATA, "4")
.andReturn(METADATA_JOINER.join("4", "author", GIT_COMMIT_DATE, "3a 3b", "description"));
expectLogCommandIgnoringMissing(mockRepo, LOG_FORMAT_ALL_METADATA, "3a")
.andReturn(METADATA_JOINER.join("3a", "author", GIT_COMMIT_DATE, "2", "description"));
// Note revision 3b is <em>not</em> expected here for a linear history search.
control.replay();
FileDb database =
new FileDb(null, GsonModule.provideGson().fromJson(testDb1, DbStorage.class), null);
GitRevisionHistory history = new GitRevisionHistory(Suppliers.ofInstance(mockRepo));
Result result =
history.findRevisions(
Revision.create(4, "repo2"),
new RepositoryEquivalenceMatcher("repo1", database),
SearchType.LINEAR);
RepositoryEquivalence expectedEq =
RepositoryEquivalence.create(
Revision.create(1002, "repo1"), Revision.create(2, "repo2"));
assertThat(result.getRevisionsSinceEquivalence().getBreadthFirstHistory())
.containsExactly(Revision.create(4, "repo2"), Revision.create("3a", "repo2"))
.inOrder();
assertThat(result.getEquivalences()).containsExactly(expectedEq);
control.verify();
}
/**
* {@inheritDoc}
*/
@Override
public void start(Collection<? extends Location> locations) {
ServiceStateLogic.setExpectedState(this, Lifecycle.STARTING);
try {
final Entity targetEntity = resolveTarget();
final String effectorName = getRequiredConfig(EFFECTOR_NAME);
final Map<String, ?> effectorParams = getConfig(EFFECTOR_PARAMS);
final Duration timeout = getConfig(TIMEOUT);
final Integer maxAttempts = getConfig(MAX_ATTEMPTS);
final Duration backoffToPeriod = getConfig(BACKOFF_TO_PERIOD);
if (!getChildren().isEmpty()) {
throw new RuntimeException(String.format("The entity [%s] cannot have child entities", getClass().getName()));
}
Maybe<Effector<?>> effector = EffectorUtils.findEffectorDeclared(targetEntity, effectorName);
if (effector.isAbsentOrNull()) {
throw new AssertionError(String.format("No effector with name [%s]", effectorName));
}
Object effectorResult = invokeEffector(targetEntity, effector.get(), effectorParams, maxAttempts, timeout, backoffToPeriod);
final List<Map<String, Object>> assertions = getAssertions(this, ASSERTIONS);
if(assertions != null && !assertions.isEmpty()){
Supplier<?> supplier = Suppliers.ofInstance(effectorResult);
TestFrameworkAssertions.checkAssertionsEventually(new AssertionOptions(effectorName, supplier)
.maxAttempts(1)
.timeout(timeout)
.assertions(assertions));
}
//Add result of effector to sensor
sensors().set(EFFECTOR_RESULT, effectorResult);
setUpAndRunState(true, Lifecycle.RUNNING);
} catch (Throwable t) {
setUpAndRunState(false, Lifecycle.ON_FIRE);
throw Exceptions.propagate(t);
}
}
@JsonCreator
public Route6FilterList(
@JsonProperty(PROP_NAME) String name,
@JsonProperty(PROP_LINES) List<Route6FilterLine> lines) {
super(name);
_deniedCache = Suppliers.memoize(new Route6FilterList.CacheSupplier());
_permittedCache = Suppliers.memoize(new Route6FilterList.CacheSupplier());
_lines = firstNonNull(lines, Collections.emptyList());
}
@Test
public void testPermanentRedirectNoFollow() throws Exception {
proxy.setFailureSupplier(Suppliers.ofInstance(Failure.HTTP_301));
client.setFollowRedirects(false);
assertThat(client.GET(httpBinEndpoint + "/status/200").getStatus())
.as("status").isEqualTo(301);
}
@Test
public void testMemoize() {
Map<String, Supplier<String>> suppliers = new HashMap<>();
suppliers.put(
"foo", Suppliers.memoize(new NonRecursiveSupplier<>(() -> suppliers.get("bar").get())));
suppliers.put(
"bar", Suppliers.memoize(new NonRecursiveSupplier<>(() -> suppliers.get("foo").get())));
exception.expect(NonRecursiveSupplierException.class);
suppliers.get("foo").get();
}
/**
* Constructs a CommunityList with the given name for {@link #_name}, and lines for {@link
* #_lines}
*
* @param name The name of the structure
* @param lines The lines in the list
*/
public CommunityList(
@Nonnull String name, @Nonnull List<CommunityListLine> lines, boolean invertMatch) {
_name = name;
_lines = lines;
_invertMatch = invertMatch;
_communityCache = Suppliers.memoize(new CommunityCacheSupplier());
}
@Override
public Path<V, E> next() {
if (next.get() == null) {
throw new NoSuchElementException("No more path between " + src + "-" + dst);
}
// lastPath: the path to return at the end of this call
Path<V, E> lastPath = next.get();
resultPaths.add(lastPath);
next = Suppliers.memoize(() -> computeNext(lastPath));
return lastPath;
}