下面列出了 io.netty.handler.codec.http2.Http2FrameLogger #com.google.common.base.Supplier 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
Solution(File inputSpecFile) {
name = relativePathFunction(ROOT, inputSpecFile);
solutionFolder =
new File(TESTDATA_PATH + relativePathFunction(ROOT, inputSpecFile.getParentFile()));
goldenFolder = new File(solutionFolder, "golden");
solutionPackage =
Suppliers.memoize(
new Supplier<SolutionPackage>() {
@Override
public SolutionPackage get() {
try {
DeploymentPackageInput.Builder input = DeploymentPackageInput.newBuilder();
TextFormat.getParser()
.merge(
Files.asCharSource(inputSpecFile, StandardCharsets.UTF_8).read(),
input);
return Autogen.getInstance()
.generateDeploymentPackage(
input.build(), SharedSupportFilesStrategy.INCLUDED);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
});
}
/**
* Wait for the specified test to return true. The test will be performed
* initially and then every {@code checkEveryMillis} until at least
* {@code waitForMillis} time has expired. If {@code check} is null or
* {@code waitForMillis} is less than {@code checkEveryMillis} this method
* will throw an {@link IllegalArgumentException}.
*
* @param check the test to perform
* @param checkEveryMillis how often to perform the test
* @param waitForMillis the amount of time after which no more tests
* will be
* performed
* @throws TimeoutException if the test does not return true in the
* allotted
* time
* @throws InterruptedException if the method is interrupted while waiting
*/
public static void waitFor(Supplier<Boolean> check, int checkEveryMillis,
int waitForMillis) throws TimeoutException, InterruptedException {
Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
ERROR_INVALID_ARGUMENT);
long st = monotonicNow();
boolean result = check.get();
while (!result && (monotonicNow() - st < waitForMillis)) {
Thread.sleep(checkEveryMillis);
result = check.get();
}
if (!result) {
throw new TimeoutException("Timed out waiting for condition. " +
"Thread diagnostics:\n" +
TimedOutTestsListener.buildThreadDiagnosticString());
}
}
private CompletableFuture<RaftClientReply> sendRequestAsync(
ContainerCommandRequestProto request) {
return TracingUtil.executeInNewSpan(
"XceiverClientRatis." + request.getCmdType().name(),
(Supplier<CompletableFuture<RaftClientReply>>) () -> {
final ContainerCommandRequestMessage message
= ContainerCommandRequestMessage.toMessage(
request, TracingUtil.exportCurrentSpan());
if (HddsUtils.isReadOnly(request)) {
if (LOG.isDebugEnabled()) {
LOG.debug("sendCommandAsync ReadOnly {}", message);
}
return getClient().sendReadOnlyAsync(message);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("sendCommandAsync {}", message);
}
return getClient().sendAsync(message);
}
}
);
}
/**
* Transforms a set of input observables with a function.
*
* @param inputs The input observables.
* @param function The function to apply to all of the inputs.
* @param <I> The type of all inputs values.
* @param <O> The type of the output values.
* @return An observable which will reflect the combination of all inputs
* with the given function. Changes in the output value will result
* in calls to any callbacks registered with the output.
*/
static <I, O> Observable<O> transform(final List<? extends Observable<I>> inputs,
final Function<List<I>, O> function)
{
return new ObservableCombiner<>(inputs, new Supplier<O>()
{
@Override
public O get()
{
ArrayList<I> deps = new ArrayList<>();
for (Observable<? extends I> dependency : inputs)
{
deps.add(dependency.get());
}
return function.apply(deps);
}
});
}
@Before
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
proxySelectorSupplier = new Supplier<ProxySelector>() {
@Override
public ProxySelector get() {
return proxySelector;
}
};
proxyDetector = new ProxyDetectorImpl(proxySelectorSupplier, authenticator, null);
int proxyPort = 1234;
unresolvedProxy = InetSocketAddress.createUnresolved("10.0.0.1", proxyPort);
proxyParmeters = new ProxyParameters(
new InetSocketAddress(InetAddress.getByName(unresolvedProxy.getHostName()), proxyPort),
NO_USER,
NO_PW);
}
@PostConstruct
private void init() {
configCache = CacheBuilder.newBuilder()
.maximumSize(5000)
.expireAfterAccess(10, TimeUnit.SECONDS)
.recordStats()
.build(new CacheLoader<VersionData<ConfigMeta>, ChecksumData<String>>() {
@Override
public ChecksumData<String> load(VersionData<ConfigMeta> configId) throws ConfigNotFoundException {
return loadConfig(configId);
}
});
Metrics.gauge("configFile_notFound_cache_hitRate", new Supplier<Double>() {
@Override
public Double get() {
return configCache.stats().hitRate();
}
});
}
@Test
public void testQueueDrops() throws Exception {
final int TEST_SIZE = 1000000;
Supplier<DropCounter> dropSupplier = Suppliers.memoize(DropCounterSupplier.INSTANCE);
DropCounter drops = dropSupplier.get();
T scheduler = createPacketScheduler()
.dropOnQueueFull()
.setDropHandler(dropSupplier)
.build();
PacketScheduler.Producer<Packet> producer = scheduler.attach();
for (int i = 0; i < TEST_SIZE; ++i) {
Packet sent1 = new Packet(i);
producer.send(sent1);
Packet sent2 = new Packet(TEST_SIZE+i);
producer.send(sent2);
Packet recv = scheduler.poll();
assertSame("received an unexpected packet", sent1, recv);
}
assertEquals("queue dropped packets", TEST_SIZE, drops.get());
}
/**
* Returns elements of {@code outer} for which there is (semi-join) / is not (anti-semi-join)
* a member of {@code inner} with a matching key. A specified
* {@code EqualityComparer<TSource>} is used to compare keys.
*/
private static <TSource, TInner, TKey> Enumerable<TSource> semiEquiJoin_(
final Enumerable<TSource> outer, final Enumerable<TInner> inner,
final Function1<TSource, TKey> outerKeySelector,
final Function1<TInner, TKey> innerKeySelector,
final EqualityComparer<TKey> comparer,
final boolean anti) {
return new AbstractEnumerable<TSource>() {
public Enumerator<TSource> enumerator() {
// CALCITE-2909 Delay the computation of the innerLookup until the moment when we are sure
// that it will be really needed, i.e. when the first outer enumerator item is processed
final Supplier<Enumerable<TKey>> innerLookup = Suppliers.memoize(() ->
comparer == null
? inner.select(innerKeySelector).distinct()
: inner.select(innerKeySelector).distinct(comparer));
final Predicate1<TSource> predicate = anti
? v0 -> !innerLookup.get().contains(outerKeySelector.apply(v0))
: v0 -> innerLookup.get().contains(outerKeySelector.apply(v0));
return EnumerableDefaults.where(outer.enumerator(), predicate);
}
};
}
/** Creates an OLAPValuesRel. */
public static OLAPValuesRel create(RelOptCluster cluster, final RelDataType rowType,
final ImmutableList<ImmutableList<RexLiteral>> tuples) {
final RelMetadataQuery mq = cluster.getMetadataQuery();
final RelTraitSet traitSet = cluster.traitSetOf(OLAPRel.CONVENTION)
.replaceIfs(RelCollationTraitDef.INSTANCE, new Supplier<List<RelCollation>>() {
public List<RelCollation> get() {
return RelMdCollation.values(mq, rowType, tuples);
}
}).replaceIf(RelDistributionTraitDef.INSTANCE, new Supplier<RelDistribution>() {
public RelDistribution get() {
return RelMdDistribution.values(rowType, tuples);
}
});
return new OLAPValuesRel(cluster, rowType, tuples, traitSet);
}
@Override
public RelNode convert(final RelNode rel) {
// KYLIN-3281
// OLAPProjectRule can't normal working with projectRel[input=sortRel]
final LogicalProject project = (LogicalProject) rel;
final RelNode convertChild = convert(project.getInput(),
project.getInput().getTraitSet().replace(OLAPRel.CONVENTION));
final RelOptCluster cluster = convertChild.getCluster();
final RelTraitSet traitSet = cluster.traitSet().replace(OLAPRel.CONVENTION)
.replaceIfs(RelCollationTraitDef.INSTANCE, new Supplier<List<RelCollation>>() {
public List<RelCollation> get() {
// CALCITE-88
return RelMdCollation.project(cluster.getMetadataQuery(), convertChild, project.getProjects());
}
});
return new OLAPProjectRel(convertChild.getCluster(), traitSet, convertChild, project.getProjects(),
project.getRowType());
}
/**
* Save an asset and create blob.
*
* @return blob content
*/
@Override
@Nullable
public Content saveAsset(final StorageTx tx,
final Asset asset,
final Supplier<InputStream> contentSupplier,
final Payload payload)
{
try {
if (payload instanceof Content) {
AttributesMap contentAttributes = ((Content) payload).getAttributes();
String contentType = payload.getContentType();
return saveAsset(tx, asset, contentSupplier, contentType, contentAttributes);
}
return saveAsset(tx, asset, contentSupplier, null, null);
}
catch (IOException ex) {
log.warn("Could not set blob {}", ex.getMessage(), ex);
return null;
}
}
public static Provider create() {
Supplier<String> nettyProvider = ConfigService.supplier("iris.gateway.provider", String.class, "");
switch (nettyProvider.get()) {
case "epoll":
if (Epoll.isAvailable()) {
log.debug("using netty epoll provider for gateway connection");
return epoll();
} else {
if (!"".equals(nettyProvider.get())) {
log.warn("netty epoll provider requested but not available, using nio for gateway connection:", Epoll.unavailabilityCause());
} else {
log.debug("using netty nio provider for gateway connection");
}
return nio();
}
case "":
case "nio":
log.debug("using netty nio provider for gateway connection");
return nio();
default:
log.warn("unknown netty provider, using nio by default");
return nio();
}
}
public FairQueuingPacketScheduler(
NetworkClock clock,
RateLimiter outputRateLimit,
RateLimiters.Builder<? extends RateLimiter> queueRateLimiter,
Supplier<? extends BlockingQueue<T>> queueSupplier,
Supplier<? extends PacketScheduler.PacketDropHandler<? super T>> dropHandler,
Supplier<? extends PacketScheduler.QueueStateHandler<? super T>> queueHandler,
double lowWatermarkPercent,
double highWatermarkPercent,
boolean blocking
) {
this.clock = clock;
this.outputRateLimit = outputRateLimit;
this.queueRateLimiter = queueRateLimiter;
this.queueSupplier = queueSupplier;
this.dropHandler = dropHandler;
this.queueHandler = queueHandler;
this.lowWatermarkPercent = lowWatermarkPercent;
this.highWatermarkPercent = highWatermarkPercent;
this.blocking = blocking;
this.next = new AtomicInteger(0);
this.available = new Semaphore(0);
this.producers = new ArrayList<>();
}
public ControlModeSelector(
Supplier<Boolean> hdrSetting,
Supplier<FaceDetectMode> faceDetectMode,
SupportedHardwareLevel supportedHardwareLevel)
{
mHdrSetting = hdrSetting;
mFaceDetectMode = faceDetectMode;
mSupportedHardwareLevel = supportedHardwareLevel;
}
/**
* Save an asset && create blob.
*
* @return blob content
*/
@Override
public Content saveAsset(final StorageTx tx,
final Asset asset,
final Supplier<InputStream> contentSupplier,
final Payload payload) throws IOException
{
AttributesMap contentAttributes = null;
String contentType = null;
if (payload instanceof Content) {
contentAttributes = ((Content) payload).getAttributes();
contentType = payload.getContentType();
}
return saveAsset(tx, asset, contentSupplier, contentType, contentAttributes);
}
PushConsumerImpl(String subject, String group, RegistParam param) {
super(param.getExecutor(), param.getMessageListener());
this.consumeParam = new ConsumeParam(subject, group, param);
String[] values = {subject, group};
Metrics.gauge("qmq_pull_buffer_size", SUBJECT_GROUP_ARRAY, values, new Supplier<Double>() {
@Override
public Double get() {
return (double) messageBuffer.size();
}
});
this.createToHandleTimer = Metrics.timer("qmq_pull_createToHandle_timer", SUBJECT_GROUP_ARRAY, values);
this.handleTimer = Metrics.timer("qmq_pull_handle_timer", SUBJECT_GROUP_ARRAY, values);
this.handleFailCounter = Metrics.counter("qmq_pull_handleFail_count", SUBJECT_GROUP_ARRAY, values);
}
/**
* Create a transport connected to a fake peer for test.
*/
@VisibleForTesting
OkHttpClientTransport(
String userAgent,
Executor executor,
FrameReader frameReader,
FrameWriter testFrameWriter,
int nextStreamId,
Socket socket,
Supplier<Stopwatch> stopwatchFactory,
@Nullable Runnable connectingCallback,
SettableFuture<Void> connectedFuture,
int maxMessageSize,
int initialWindowSize,
Runnable tooManyPingsRunnable,
TransportTracer transportTracer) {
address = null;
this.maxMessageSize = maxMessageSize;
this.initialWindowSize = initialWindowSize;
defaultAuthority = "notarealauthority:80";
this.userAgent = GrpcUtil.getGrpcUserAgent("okhttp", userAgent);
this.executor = Preconditions.checkNotNull(executor, "executor");
serializingExecutor = new SerializingExecutor(executor);
this.testFrameReader = Preconditions.checkNotNull(frameReader, "frameReader");
this.testFrameWriter = Preconditions.checkNotNull(testFrameWriter, "testFrameWriter");
this.socket = Preconditions.checkNotNull(socket, "socket");
this.nextStreamId = nextStreamId;
this.stopwatchFactory = stopwatchFactory;
this.connectionSpec = null;
this.connectingCallback = connectingCallback;
this.connectedFuture = Preconditions.checkNotNull(connectedFuture, "connectedFuture");
this.proxy = null;
this.tooManyPingsRunnable =
Preconditions.checkNotNull(tooManyPingsRunnable, "tooManyPingsRunnable");
this.maxInboundMetadataSize = Integer.MAX_VALUE;
this.transportTracer = Preconditions.checkNotNull(transportTracer, "transportTracer");
initTransportTracer();
}
@Test
public void testRateLimitsOutput() throws Exception {
final int TEST_SIZE = 20;
final double RATE_LIMIT = 10.0;
final double EXPECTED_TIME = (double)TEST_SIZE / RATE_LIMIT;
final double EPSILON = 0.100;
Supplier<DropCounter> dropSupplier = Suppliers.memoize(DropCounterSupplier.INSTANCE);
DropCounter drops = dropSupplier.get();
T scheduler = createPacketScheduler()
.setDropHandler(dropSupplier)
.setRateLimiter(RateLimiters.tokenBucket(1, RATE_LIMIT).setInitiallyEmpty())
.useArrayQueue(TEST_SIZE)
.build();
PacketScheduler.Producer<Packet> producer = scheduler.attach();
for (int i = 0; i < TEST_SIZE; ++i) {
Packet sent = new Packet(i);
producer.send(sent);
}
long start = System.nanoTime();
for (int i = 0; i < TEST_SIZE; ++i) {
scheduler.take();
}
long elapsed = System.nanoTime() - start;
double seconds = (double)elapsed / 1000000000.0;
double epsilon = Math.abs(seconds - EXPECTED_TIME);
assertEquals("queue dropped packets", 0, drops.get());
assertTrue("execution time was different than expected: expected=" + EXPECTED_TIME + ", actual=" + seconds, epsilon < EPSILON);
}
/**
* Creates a {@link CensusStatsModule} with the default OpenCensus implementation.
*/
CensusStatsModule(Supplier<Stopwatch> stopwatchSupplier, boolean propagateTags) {
this(
Tags.getTagger(),
Tags.getTagPropagationComponent().getBinarySerializer(),
Stats.getStatsRecorder(),
stopwatchSupplier,
propagateTags);
}
@Test
public void testRateLimitsProducers() throws Exception {
final int TEST_SIZE = 20;
final double RATE_LIMIT = 10.0;
final double EXPECTED_TIME = (double)TEST_SIZE / RATE_LIMIT;
final double EPSILON = 0.100;
Supplier<DropCounter> dropSupplier = Suppliers.memoize(DropCounterSupplier.INSTANCE);
DropCounter drops = dropSupplier.get();
T scheduler = createPacketScheduler()
.setDropHandler(dropSupplier)
.setProducerRateLimiter(RateLimiters.tokenBucket(1, RATE_LIMIT))
//.useArrayQueue(TEST_SIZE)
.build();
PacketScheduler.Producer<Packet> producer = scheduler.attach();
long start = System.nanoTime();
for (int i = 0; i < TEST_SIZE; ++i) {
Packet sent = new Packet(i);
producer.send(sent);
scheduler.take();
}
long elapsed = System.nanoTime() - start;
double seconds = (double)elapsed / 1000000000.0;
double epsilon = Math.abs(seconds - EXPECTED_TIME);
assertEquals("queue dropped packets", 0, drops.get());
assertTrue("execution time was different than expected: expected=" + EXPECTED_TIME + ", actual=" + seconds, epsilon < EPSILON);
}
/**
* Attaches the given value to all derived RequestBuilders. Note that the
* value is polled when each new RequestBuilder is created.
*/
public <T> RequestTemplate setParam(CaptureRequest.Key<T> key,
Supplier<T> value)
{
mParameters.add(new Parameter<T>(key, value));
return this;
}
private static Supplier<Map<String, Object>> metricsSupplier(final JsonCodec<Map<String, Object>> metricsCodec, final URI metadataUri)
{
return () -> {
try {
byte[] json = getHttpResponse(metadataUri).bytes();
Map<String, Object> metrics = metricsCodec.fromJson(json);
return metrics;
}
catch (IOException | URISyntaxException e) {
throw new UncheckedIOException((IOException) e);
}
};
}
private static Supplier<Map<String, Map<String, ExampleTable>>> schemasSupplier(final JsonCodec<Map<String, List<ExampleTable>>> catalogCodec, final URI metadataUri)
{
return () -> {
try {
return lookupSchemas(metadataUri, catalogCodec);
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
};
}
FlashBasedPhotoCommand(Logger.Factory logFactory,
Supplier<OneCamera.PhotoCaptureParameters.Flash> flashMode,
ImageCaptureCommand flashOnCommand,
ImageCaptureCommand flashAutoCommand,
ImageCaptureCommand flashOffCommand)
{
mLog = logFactory.create(new Log.Tag("FlashBasedPhotoCmd"));
mFlashMode = flashMode;
mFlashOnCommand = flashOnCommand;
mFlashAutoCommand = flashAutoCommand;
mFlashOffCommand = flashOffCommand;
}
/** Creates an ArrayTable. */
ArrayTable(Type elementType, RelProtoDataType protoRowType,
Supplier<Content> supplier) {
super(elementType);
this.protoRowType = protoRowType;
this.supplier = supplier;
}
/**
* Call the extension point(s) corresponding to the given id
* <p>
* This iteration was isolated here to protect against ConcurrentModificationException using PluginRegistry's lock
*
* @param log log channel to pass to extension point call
* @param id the id of the extension point interface
* @param object object to pass to extension point call
*/
public void callExtensionPoint( ILogChannel log, String id, Object object ) throws HopException {
lock.readLock().lock();
try {
if ( extensionPointPluginMap.containsRow( id ) && !extensionPointPluginMap.rowMap().get( id ).values().isEmpty() ) {
for ( Supplier<IExtensionPoint> extensionPoint : extensionPointPluginMap.row( id ).values() ) {
extensionPoint.get().callExtensionPoint( log, object );
}
}
} finally {
lock.readLock().unlock();
}
}
PlaceExecutorRegistryMetrics(IrisMetricSet metrics) {
metrics.gauge("places", (Supplier<Integer>) () -> sample());
metrics.monitor("executor.cache", executors);
// explicitly specify reset on snapshot reservoir because this is really a gauge, so
// accumulating samples across snapshots doesn't make a lot of sense
this.rulesPerPlace = metrics.histogram("rules.total", IrisMetrics.hdrHistogramResetOnSnapshotReservoir());
this.activeRulesPerPlace = metrics.histogram("rules.active", IrisMetrics.hdrHistogramResetOnSnapshotReservoir());
this.scenesPerPlace = metrics.histogram("scenes.total", IrisMetrics.hdrHistogramResetOnSnapshotReservoir());
this.activeScenesPerPlace = metrics.histogram("scenes.active", IrisMetrics.hdrHistogramResetOnSnapshotReservoir());
}
private SchedulerMetrics() {
IrisMetricSet metrics = IrisMetrics.metrics("scheduler");
this.partitionScheduled = metrics.counter("partition.scheduled");
this.partitionCompleted = metrics.counter("partition.completed");
this.partitionErrors = metrics.counter("partition.errors");
this.partitionSchedulingTime = metrics.timer("partition.schedule.time");
this.commandScheduled = metrics.counter("command.scheduled");
this.commandFired = metrics.counter("command.sent");
this.commandExpired = metrics.counter("command.expired");
this.commandError = metrics.counter("command.error");
this.commandRescheduled = metrics.counter("command.rescheduled");
metrics.gauge("partition.count", (Supplier<Integer>) () -> partitions.size());
metrics.gauge("partition.pending", (Supplier<Integer>) () -> activeJobs.size());
}
public List<StorageCredentials> getStorageAzureAccounts() {
List<StorageCredentials> result = new ArrayList<>();
for (int i = 1; true; ++i) {
String rawAccount = "previews.storage.azure.account" + i;
ConfigurationKey confAccount = new ConfigurationKey(rawAccount, KeyParser.parse(rawAccount));
Supplier<String> supAccount = configProvider.getStringSupplier(confAccount, null);
String account = (supAccount == null) ? null : supAccount.get();
if (account == null || account.trim().isEmpty()) {
break;
}
try {
StorageCredentials creds = StorageCredentials.tryParseCredentials(account);
if (creds == null) {
throw new RuntimeException("invalid azure storage credentials");
}
result.add(creds);
} catch (InvalidKeyException ex) {
throw new RuntimeException(ex);
}
}
return result;
}
@Override
public <I> ScheduledTask scheduleDelayed(
Function<I,?> task,
Supplier<I> input,
long timeout,
TimeUnit unit
) {
return scheduleDelayed(new ProdConRunner<I>(task, input), timeout, unit);
}