下面列出了怎么用com.google.protobuf.Duration的API类实例代码及写法,或者点击链接到github查看源代码。
@Before
public void setUp() throws Exception {
Instant now = Instant.now();
createdAt = Timestamp.newBuilder().setSeconds(now.getEpochSecond()).setNanos(now.getNano()).build();
message = TestMessage.newBuilder()
.setOrderNumber("order-1")
.setOrderUrl("order-url")
.setOrderDetails("order-details")
.setCreatedAt(createdAt)
.setStatus(Status.COMPLETED)
.putCurrentState("payment", "cash")
.setUserToken(ByteString.copyFrom("token".getBytes()))
.setTripDuration(Duration.newBuilder().setSeconds(1).setNanos(1000000000).build())
.addAliases("alias1").addAliases("alias2").addAliases("alias3")
.setOrderDate(com.google.type.Date.newBuilder().setYear(1996).setMonth(11).setDay(11))
.build();
}
@Test
public void testRetryableExceptionWithDelay() {
RetryInfo retryInfo =
RetryInfo.newBuilder()
.setRetryDelay(Duration.newBuilder().setSeconds(22L))
.build();
Metadata errorMetadata = new Metadata();
errorMetadata.put(ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()), retryInfo);
StatusRuntimeException retryableException =
new StatusRuntimeException(Status.RESOURCE_EXHAUSTED, errorMetadata);
assertThat(createR2dbcException(retryableException))
.isInstanceOf(R2dbcTransientResourceException.class);
}
/**
* Determine if the feature data in the given feature row is outside maxAge. Data is outside
* maxAge to be when the difference ingestion time set in feature row and the retrieval time set
* in entity row exceeds featureset max age.
*
* @param featureSetRequest contains the spec where feature's max age is extracted.
* @param entityRow contains the retrieval timing of when features are pulled.
* @param featureRow contains the ingestion timing and feature data.
*/
private static boolean checkOutsideMaxAge(
FeatureSetRequest featureSetRequest, EntityRow entityRow, Optional<FeatureRow> featureRow) {
Duration maxAge = featureSetRequest.getSpec().getMaxAge();
if (featureRow.isEmpty()) { // no data to consider
return false;
}
if (maxAge.equals(Duration.getDefaultInstance())) { // max age is not set
return false;
}
long givenTimestamp = entityRow.getEntityTimestamp().getSeconds();
if (givenTimestamp == 0) {
givenTimestamp = System.currentTimeMillis() / 1000;
}
long timeDifference = givenTimestamp - featureRow.get().getEventTimestamp().getSeconds();
return timeDifference > maxAge.getSeconds();
}
/**
* Generate the information necessary for the sql templating for point in time correctness join to
* the entity dataset for each feature set requested.
*
* @param featureSetRequests List of {@link FeatureSetRequest} containing a {@link FeatureSetSpec}
* and its corresponding {@link FeatureReference}s provided by the user.
* @return List of FeatureSetInfos
*/
public static List<FeatureSetQueryInfo> getFeatureSetInfos(
List<FeatureSetRequest> featureSetRequests) throws IllegalArgumentException {
List<FeatureSetQueryInfo> featureSetInfos = new ArrayList<>();
for (FeatureSetRequest featureSetRequest : featureSetRequests) {
FeatureSetSpec spec = featureSetRequest.getSpec();
Duration maxAge = spec.getMaxAge();
List<String> fsEntities =
spec.getEntitiesList().stream().map(EntitySpec::getName).collect(Collectors.toList());
List<FeatureReference> features = featureSetRequest.getFeatureReferences().asList();
featureSetInfos.add(
new FeatureSetQueryInfo(
spec.getProject(), spec.getName(), maxAge.getSeconds(), fsEntities, features, ""));
}
return featureSetInfos;
}
@Override
protected void validateAction(
String operationName,
Action action,
PreconditionFailure.Builder preconditionFailure,
RequestMetadata requestMetadata)
throws InterruptedException, StatusException {
if (action.hasTimeout() && config.hasMaximumActionTimeout()) {
Duration timeout = action.getTimeout();
Duration maximum = config.getMaximumActionTimeout();
if (timeout.getSeconds() > maximum.getSeconds()
|| (timeout.getSeconds() == maximum.getSeconds()
&& timeout.getNanos() > maximum.getNanos())) {
preconditionFailure
.addViolationsBuilder()
.setType(VIOLATION_TYPE_INVALID)
.setSubject(Durations.toString(timeout) + " > " + Durations.toString(maximum))
.setDescription(TIMEOUT_OUT_OF_BOUNDS);
}
}
super.validateAction(operationName, action, preconditionFailure, requestMetadata);
}
@Override
protected void validateAction(
Action action,
@Nullable Command command,
Map<Digest, Directory> directoriesIndex,
Consumer<Digest> onInputDigest,
PreconditionFailure.Builder preconditionFailure) {
if (action.hasTimeout() && hasMaxActionTimeout()) {
Duration timeout = action.getTimeout();
if (timeout.getSeconds() > maxActionTimeout.getSeconds()
|| (timeout.getSeconds() == maxActionTimeout.getSeconds()
&& timeout.getNanos() > maxActionTimeout.getNanos())) {
preconditionFailure
.addViolationsBuilder()
.setType(VIOLATION_TYPE_INVALID)
.setSubject(Durations.toString(timeout) + " > " + Durations.toString(maxActionTimeout))
.setDescription(TIMEOUT_OUT_OF_BOUNDS);
}
}
super.validateAction(action, command, directoriesIndex, onInputDigest, preconditionFailure);
}
private void removeStaleData(Timestamp now) {
// currentSlot != lastUsedSlot means stepping over to a new bucket.
// The data in the new bucket should be thrown away before storing new data.
int currentSlot = getCurrentSlot(now);
if (lastOperationCompleteTime != null && lastUsedSlot >= 0) {
Duration duration = Timestamps.between(lastOperationCompleteTime, now);
// if 1) duration between the new added operation and last added one is longer than period
// or 2) the duration is shorter than period but longer than time range of a single bucket
// and at the same time currentSlot == lastUsedSlot
if ((Durations.toMillis(duration) >= Durations.toMillis(period))
|| (lastUsedSlot == currentSlot
&& Durations.toMillis(duration) > (Durations.toMillis(period) / buckets.length))) {
for (OperationStageDurations bucket : buckets) {
bucket.reset();
}
} else if (lastUsedSlot != currentSlot) {
// currentSlot < lastUsedSlot means wrap around happened
currentSlot = currentSlot < lastUsedSlot ? currentSlot + NumOfSlots : currentSlot;
for (int i = lastUsedSlot + 1; i <= currentSlot; i++) {
buckets[i % buckets.length].reset();
}
}
}
}
WorkerContext createTestContext(Platform platform, Iterable<ExecutionPolicy> policies) {
return new ShardWorkerContext(
"test",
platform,
/* operationPollPeriod=*/ Duration.getDefaultInstance(),
/* operationPoller=*/ (queueEntry, stage, requeueAt) -> {
return false;
},
/* inlineContentLimit=*/ 0,
/* inputFetchStageWidth=*/ 0,
/* executeStageWidth=*/ 0,
backplane,
execFileSystem,
inputStreamFactory,
policies,
instance,
/* deadlineAfter=*/ 0,
/* deadlineAfterUnits=*/ SECONDS,
/* defaultActionTimeout=*/ Duration.getDefaultInstance(),
/* maximumActionTimeout=*/ Duration.getDefaultInstance(),
/* limitExecution=*/ false,
/* limitGlobalExecution=*/ false,
/* onlyMulticoreTests=*/ false);
}
private static void createUptimeCheck(
String projectId, String displayName, String hostName, String pathName) throws IOException {
CreateUptimeCheckConfigRequest request =
CreateUptimeCheckConfigRequest.newBuilder()
.setParent(ProjectName.format(projectId))
.setUptimeCheckConfig(
UptimeCheckConfig.newBuilder()
.setDisplayName(displayName)
.setMonitoredResource(
MonitoredResource.newBuilder()
.setType("uptime_url")
.putLabels("host", hostName))
.setHttpCheck(HttpCheck.newBuilder().setPath(pathName).setPort(80))
.setTimeout(Duration.newBuilder().setSeconds(10))
.setPeriod(Duration.newBuilder().setSeconds(300)))
.build();
try (UptimeCheckServiceClient client = UptimeCheckServiceClient.create()) {
UptimeCheckConfig config = client.createUptimeCheckConfig(request);
System.out.println("Uptime check created: " + config.getDisplayName());
} catch (Exception e) {
usage("Exception creating uptime check: " + e.toString());
throw e;
}
}
private static void updateUptimeCheck(
String projectId, String displayName, String hostName, String pathName) throws IOException {
String fullCheckName = UptimeCheckConfigName.format(projectId, displayName);
UpdateUptimeCheckConfigRequest request =
UpdateUptimeCheckConfigRequest.newBuilder()
.setUpdateMask(FieldMask.newBuilder().addPaths("http_check.path"))
.setUptimeCheckConfig(
UptimeCheckConfig.newBuilder()
.setName(fullCheckName)
.setMonitoredResource(
MonitoredResource.newBuilder()
.setType("uptime_url")
.putLabels("host", hostName))
.setHttpCheck(HttpCheck.newBuilder().setPath(pathName).setPort(80))
.setTimeout(Duration.newBuilder().setSeconds(10))
.setPeriod(Duration.newBuilder().setSeconds(300)))
.build();
try (UptimeCheckServiceClient client = UptimeCheckServiceClient.create()) {
UptimeCheckConfig config = client.updateUptimeCheckConfig(request);
System.out.println("Uptime check updated: \n" + config.toString());
} catch (Exception e) {
usage("Exception updating uptime check: " + e.toString());
throw e;
}
}
/**
* Logs the client header. This method logs the appropriate number of bytes
* as determined by the binary logging configuration.
*/
abstract void logClientHeader(
long seq,
String methodName,
// not all transports have the concept of authority
@Nullable String authority,
@Nullable Duration timeout,
Metadata metadata,
GrpcLogEntry.Logger logger,
long callId,
// null on client side
@Nullable SocketAddress peerAddress);
private FeatureSetSpec getFeatureSetSpec() {
return FeatureSetSpec.newBuilder()
.setProject("project")
.setName("featureSet")
.addEntities(EntitySpec.newBuilder().setName("entity1"))
.addEntities(EntitySpec.newBuilder().setName("entity2"))
.addFeatures(FeatureSpec.newBuilder().setName("feature1"))
.addFeatures(FeatureSpec.newBuilder().setName("feature2"))
.setMaxAge(Duration.newBuilder().setSeconds(30)) // default
.build();
}
private FeatureSetSpec getFeatureSetSpec() {
return FeatureSetSpec.newBuilder()
.setProject("project")
.setName("featureSet")
.addEntities(EntitySpec.newBuilder().setName("entity1"))
.addEntities(EntitySpec.newBuilder().setName("entity2"))
.addFeatures(FeatureSpec.newBuilder().setName("feature1"))
.addFeatures(FeatureSpec.newBuilder().setName("feature2"))
.setMaxAge(Duration.newBuilder().setSeconds(30)) // default
.build();
}
@Before
public void setUp() {
SourceProto.Source oldSource =
SourceProto.Source.newBuilder()
.setType(SourceType.KAFKA)
.setKafkaSourceConfig(
KafkaSourceConfig.newBuilder()
.setBootstrapServers("kafka:9092")
.setTopic("mytopic"))
.build();
oldFeatureSetProto =
FeatureSetProto.FeatureSet.newBuilder()
.setSpec(
FeatureSetSpec.newBuilder()
.setName("featureSet")
.setProject("project")
.setMaxAge(Duration.newBuilder().setSeconds(100))
.setSource(oldSource)
.addFeatures(
FeatureSpec.newBuilder().setName("feature1").setValueType(Enum.INT64))
.addFeatures(
FeatureSpec.newBuilder().setName("feature2").setValueType(Enum.STRING))
.addEntities(
EntitySpec.newBuilder().setName("entity").setValueType(Enum.STRING))
.build())
.build();
}
@Override
public void doMerge(JsonParser parser, int unused, Message.Builder messageBuilder)
throws IOException {
Duration.Builder builder = (Duration.Builder) messageBuilder;
try {
builder.mergeFrom(Durations.parse(ParseSupport.parseString(parser)));
} catch (ParseException e) {
throw new InvalidProtocolBufferException(
"Failed to readValue duration: " + parser.getText());
}
}
public Details getServerStatus() {
RuntimeMXBean rb = ManagementFactory.getRuntimeMXBean();
Duration uptime = Durations.fromMillis(rb.getUptime());
if (!leaderActivator.isLeader()) {
return Details.newBuilder()
.setStatus(NOT_SERVING)
.setLeader(false)
.setActive(false)
.setUptime(uptime)
.build();
}
boolean active = leaderActivator.isActivated();
List<ServiceActivation> serviceActivations = activationLifecycle.getServiceActionTimesMs().stream()
.sorted(Comparator.comparing(Pair::getRight))
.map(pair -> ServiceActivation.newBuilder()
.setName(pair.getLeft())
.setActivationTime(Durations.fromMillis(pair.getRight()))
.build())
.collect(Collectors.toList());
Details.Builder details = Details.newBuilder()
.setStatus(SERVING)
.setCell(cellInfoResolver.getCellName())
.setLeader(true)
.setActive(active)
.setUptime(uptime)
.setElectionTimestamp(Timestamps.fromMillis(leaderActivator.getElectionTimestamp()))
.setActivationTimestamp(Timestamps.fromMillis(leaderActivator.getActivationEndTimestamp()))
.addAllServiceActivationTimes(serviceActivations);
if (active) {
details.setActivationTime(Durations.fromMillis(leaderActivator.getActivationTime()));
}
return details.build();
}
private void onDispatched(Operation operation) {
final String operationName = operation.getName();
Duration timeout = config.getOperationPollTimeout();
Watchdog requeuer =
new Watchdog(
timeout,
() -> {
logger.log(Level.INFO, format("REQUEUEING %s", operation.getName()));
requeuers.remove(operationName);
requeueOperation(operation);
});
requeuers.put(operation.getName(), requeuer);
new Thread(requeuer).start();
}
private Duration getWaitTime() {
checkNotNull(periodDeadline);
Deadline waitDeadline = expirationDeadline.minimum(periodDeadline);
long waitMicros = waitDeadline.timeRemaining(MICROSECONDS);
if (waitMicros <= 0) {
return Duration.getDefaultInstance();
}
return Durations.fromMicros(waitMicros);
}
private void waitForNextDeadline() {
try {
Duration waitTime = getWaitTime();
if (waitTime.getSeconds() != 0 || waitTime.getNanos() != 0) {
wait(
waitTime.getSeconds() * 1000 + waitTime.getNanos() / 1000000,
waitTime.getNanos() % 1000000);
}
} catch (InterruptedException e) {
running = false;
}
}
public Watchdog(Duration petTimeout, InterruptingRunnable runnable) {
this.runnable = runnable;
this.petTimeout = petTimeout;
stopped = false;
done = false;
pet();
}
private static Retrier createStubRetrier() {
return new Retrier(
Backoff.exponential(
java.time.Duration.ofMillis(/*options.experimentalRemoteRetryStartDelayMillis=*/ 100),
java.time.Duration.ofMillis(/*options.experimentalRemoteRetryMaxDelayMillis=*/ 5000),
/*options.experimentalRemoteRetryMultiplier=*/ 2,
/*options.experimentalRemoteRetryJitter=*/ 0.1,
/*options.experimentalRemoteRetryMaxAttempts=*/ 5),
Retrier.DEFAULT_IS_RETRIABLE,
retryScheduler);
}
/**
* Logs the client header. This method logs the appropriate number of bytes
* as determined by the binary logging configuration.
*/
abstract void logClientHeader(
long seq,
String methodName,
// not all transports have the concept of authority
@Nullable String authority,
@Nullable Duration timeout,
Metadata metadata,
GrpcLogEntry.Logger logger,
long callId,
// null on client side
@Nullable SocketAddress peerAddress);
private static Retrier createBackplaneRetrier() {
return new Retrier(
Backoff.exponential(
java.time.Duration.ofMillis(/*options.experimentalRemoteRetryStartDelayMillis=*/ 100),
java.time.Duration.ofMillis(/*options.experimentalRemoteRetryMaxDelayMillis=*/ 5000),
/*options.experimentalRemoteRetryMultiplier=*/ 2,
/*options.experimentalRemoteRetryJitter=*/ 0.1,
/*options.experimentalRemoteRetryMaxAttempts=*/ 5),
Retrier.REDIS_IS_RETRIABLE);
}
private synchronized void decMounts() {
if (--mounts == 0 && mountPath != null) {
logger.log(Level.INFO, "Scheduling FuseCAS unmount in 10s");
unmounter =
new Watchdog(
Duration.newBuilder().setSeconds(10).setNanos(0).build(),
() -> {
logger.log(Level.INFO, "Unmounting FuseCAS");
umount();
mounted = false;
});
new Thread(unmounter).start();
}
}
private static float millisecondBetween(Timestamp from, Timestamp to) {
// The time unit we want is millisecond.
// 1 second = 1000 milliseconds
// 1 millisecond = 1000,000 nanoseconds
Duration d = Timestamps.between(from, to);
return d.getSeconds() * 1000.0f + d.getNanos() / (1000.0f * 1000.0f);
}
@Test
public void actionWithExcessiveTimeoutFailsValidation()
throws InterruptedException, InvalidProtocolBufferException {
Duration timeout = Durations.fromSeconds(9000);
Digest actionDigestWithExcessiveTimeout = createAction(Action.newBuilder().setTimeout(timeout));
Watcher watcher = mock(Watcher.class);
IllegalStateException timeoutInvalid = null;
instance.execute(
actionDigestWithExcessiveTimeout,
/* skipCacheLookup=*/ true,
ExecutionPolicy.getDefaultInstance(),
ResultsCachePolicy.getDefaultInstance(),
RequestMetadata.getDefaultInstance(),
watcher);
ArgumentCaptor<Operation> watchCaptor = ArgumentCaptor.forClass(Operation.class);
verify(watcher, times(1)).observe(watchCaptor.capture());
Operation watchOperation = watchCaptor.getValue();
assertThat(watchOperation.getResponse().is(ExecuteResponse.class)).isTrue();
Status status = watchOperation.getResponse().unpack(ExecuteResponse.class).getStatus();
assertThat(status.getCode()).isEqualTo(Code.FAILED_PRECONDITION.getNumber());
assertThat(status.getDetailsCount()).isEqualTo(1);
assertThat(status.getDetails(0).is(PreconditionFailure.class)).isTrue();
PreconditionFailure preconditionFailure =
status.getDetails(0).unpack(PreconditionFailure.class);
assertThat(preconditionFailure.getViolationsList())
.contains(
Violation.newBuilder()
.setType(VIOLATION_TYPE_INVALID)
.setSubject(
Durations.toString(timeout)
+ " > "
+ Durations.toString(MAXIMUM_ACTION_TIMEOUT))
.setDescription(TIMEOUT_OUT_OF_BOUNDS)
.build());
}
@Test
public void itSetsDurationWhenPresentInJson() throws IOException {
String json = "{\"duration\":\"30s\"}";
HasOneof message = camelCase().readValue(json, HasOneof.class);
switch (message.getOneofCase()) {
case DURATION:
assertThat(message.getDuration()).isEqualTo(Duration.newBuilder().setSeconds(30).build());
break;
default:
fail("Unexpected oneof set: " + message.getOneofCase());
}
}
/** Event specific subscriber method. */
@Subscribe
public void onActionEventTerminal(RemoteExecutionActionEvent.Terminal event) {
hasFirstRemoteActionStarted.set(true);
getStateCount(State.WAITING).decrement();
getStateCount(event.getState()).increment();
if (event.getExecutedActionMetadata().isPresent()) {
ExecutedActionInfo executedActionInfo =
event.getRemoteExecutionMetadata().get().getExecutedActionInfo();
remoteCpuTimeMs.add(TimeUnit.MICROSECONDS.toMillis(executedActionInfo.getCpuStatUsageUsec()));
Duration queueDuration =
Timestamps.between(
event.getExecutedActionMetadata().get().getQueuedTimestamp(),
event.getExecutedActionMetadata().get().getWorkerStartTimestamp());
remoteQueueTimeMs.add(
TimeUnit.SECONDS.toMillis(queueDuration.getSeconds())
+ TimeUnit.NANOSECONDS.toMillis(queueDuration.getNanos()));
Duration totalDuration =
Timestamps.between(
event.getExecutedActionMetadata().get().getWorkerStartTimestamp(),
event.getExecutedActionMetadata().get().getWorkerCompletedTimestamp());
totalRemoteTimeMs.add(
TimeUnit.SECONDS.toMillis(totalDuration.getSeconds())
+ TimeUnit.NANOSECONDS.toMillis(totalDuration.getNanos()));
}
}
@Test
public void testDurationIsNotProto2() {
// Duration is a core Protocol Buffers type that uses proto3 syntax.
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage(Duration.class.getCanonicalName());
thrown.expectMessage("in file " + Duration.getDescriptor().getFile().getName());
checkProto2Syntax(Duration.class, ExtensionRegistry.getEmptyRegistry());
}
/** Demonstrates listing time series and aggregating them. */
void listTimeSeriesAggregrate() throws IOException {
// [START monitoring_read_timeseries_align]
MetricServiceClient metricServiceClient = MetricServiceClient.create();
String projectId = System.getProperty("projectId");
ProjectName name = ProjectName.of(projectId);
// Restrict time to last 20 minutes
long startMillis = System.currentTimeMillis() - ((60 * 20) * 1000);
TimeInterval interval =
TimeInterval.newBuilder()
.setStartTime(Timestamps.fromMillis(startMillis))
.setEndTime(Timestamps.fromMillis(System.currentTimeMillis()))
.build();
Aggregation aggregation =
Aggregation.newBuilder()
.setAlignmentPeriod(Duration.newBuilder().setSeconds(600).build())
.setPerSeriesAligner(Aggregation.Aligner.ALIGN_MEAN)
.build();
ListTimeSeriesRequest.Builder requestBuilder =
ListTimeSeriesRequest.newBuilder()
.setName(name.toString())
.setFilter("metric.type=\"compute.googleapis.com/instance/cpu/utilization\"")
.setInterval(interval)
.setAggregation(aggregation);
ListTimeSeriesRequest request = requestBuilder.build();
ListTimeSeriesPagedResponse response = metricServiceClient.listTimeSeries(request);
System.out.println("Got timeseries: ");
for (TimeSeries ts : response.iterateAll()) {
System.out.println(ts);
}
// [END monitoring_read_timeseries_align]
}