org.junit.jupiter.api.AfterAll#com.google.common.base.Preconditions源码实例Demo

下面列出了org.junit.jupiter.api.AfterAll#com.google.common.base.Preconditions 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: bistoury   文件: DefaultMonitor.java
@Override
public synchronized boolean startup(InstrumentInfo instrumentInfo) {
    if (status == Status.started) {
        return true;
    }
    if (status != Status.notStart) {
        return false;
    }
    Preconditions.checkNotNull(instrumentInfo, "instrumentation not allowed null");
    this.instrumentInfo = instrumentInfo;
    this.inst = instrumentInfo.getInstrumentation();
    this.lock = instrumentInfo.getLock();
    this.classFileBuffer = instrumentInfo.getClassFileBuffer();

    this.classPathLookup = createClassPathLookup();
    if (classPathLookup == null) {
        status = Status.error;
        return false;
    }
    status = Status.started;
    logger.info("qmonitor started");
    return true;
}
 
源代码2 项目: NullAway   文件: DefinitelyDerefedParamsDriver.java
/**
 * Write model jar file with nullability model at DEFAULT_ASTUBX_LOCATION
 *
 * @param outPath Path of output model jar file.
 */
private void writeModelJAR(String outPath) throws IOException {
  Preconditions.checkArgument(
      outPath.endsWith(ASTUBX_JAR_SUFFIX), "invalid model file path! " + outPath);
  ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(outPath));
  if (!nonnullParams.isEmpty()) {
    ZipEntry entry = new ZipEntry(DEFAULT_ASTUBX_LOCATION);
    // Set the modification/creation time to 0 to ensure that this jars always have the same
    // checksum
    entry.setTime(0);
    entry.setCreationTime(FileTime.fromMillis(0));
    zos.putNextEntry(entry);
    writeModel(new DataOutputStream(zos));
    zos.closeEntry();
  }
  zos.close();
  LOG(VERBOSE, "Info", "wrote model to: " + outPath);
}
 
源代码3 项目: hadoop   文件: RegistrySecurity.java
/**
 * Parse a string down to an ID, adding a realm if needed
 * @param idPair id:data tuple
 * @param realm realm to add
 * @return the ID.
 * @throws IllegalArgumentException if the idPair is invalid
 */
public Id parse(String idPair, String realm) {
  int firstColon = idPair.indexOf(':');
  int lastColon = idPair.lastIndexOf(':');
  if (firstColon == -1 || lastColon == -1 || firstColon != lastColon) {
    throw new IllegalArgumentException(
        "ACL '" + idPair + "' not of expected form scheme:id");
  }
  String scheme = idPair.substring(0, firstColon);
  String id = idPair.substring(firstColon + 1);
  if (id.endsWith("@")) {
    Preconditions.checkArgument(
        StringUtils.isNotEmpty(realm),
        "@ suffixed account but no realm %s", id);
    id = id + realm;
  }
  return new Id(scheme, id);
}
 
/**
 * Find all {@link ResourceConsumption} instances at a specific consumption level.
 */
public static Map<String, ResourceConsumption> groupBy(CompositeResourceConsumption parent,
                                                       ResourceConsumption.ConsumptionLevel level) {
    Preconditions.checkArgument(parent.getConsumptionLevel().ordinal() <= level.ordinal());

    if (parent.getConsumptionLevel() == level) {
        return Collections.singletonMap(parent.getConsumerName(), parent);
    }

    // If this is the Application level, the next one needs to be the InstanceType level, which is the last one
    if (parent.getConsumptionLevel() == ResourceConsumption.ConsumptionLevel.Application) {
        Preconditions.checkArgument(level == ResourceConsumption.ConsumptionLevel.InstanceType);
        return parent.getContributors();
    }

    Map<String, ResourceConsumption> result = new HashMap<>();
    for (ResourceConsumption nested : parent.getContributors().values()) {
        result.putAll(groupBy((CompositeResourceConsumption) nested, level));
    }
    return result;
}
 
源代码5 项目: bazel-buildfarm   文件: MemoryInstance.java
private SetMultimap<String, String> getOperationProvisions(Operation operation)
    throws InterruptedException {

  ExecuteOperationMetadata metadata = expectExecuteOperationMetadata(operation);
  Preconditions.checkState(metadata != null, "metadata not found");

  Action action =
      getUnchecked(
          expect(
              metadata.getActionDigest(),
              Action.parser(),
              newDirectExecutorService(),
              RequestMetadata.getDefaultInstance()));
  Preconditions.checkState(action != null, "action not found");

  Command command =
      getUnchecked(
          expect(
              action.getCommandDigest(),
              Command.parser(),
              newDirectExecutorService(),
              RequestMetadata.getDefaultInstance()));
  Preconditions.checkState(command != null, "command not found");

  return createProvisions(command.getPlatform());
}
 
源代码6 项目: syncer   文件: FilterConfig.java
public SyncFilter toFilter(SpelExpressionParser parser) {
  switch (getType()) {
    case SWITCH:
      return new Switch(parser, getSwitcher());
    case STATEMENT:
      return new Statement(parser, getStatement());
    case FOREACH:
      return new ForeachFilter(parser, getForeach());
    case IF:
      return new If(parser, getIf());
    case DROP:
      return new Drop();
    case CREATE:
      try {
        return getCreate().toAction(parser);
      } catch (NoSuchFieldException e) {
        throw new InvalidConfigException("Unknown field of `SyncData` to copy", e);
      }
    case METHOD:
      Preconditions.checkState(filterMeta != null, "Not set filterMeta for method");
      return JavaMethod.build(consumerId, filterMeta, getMethod());
    default:
      throw new InvalidConfigException("Unknown filter type");
  }
}
 
源代码7 项目: arcusplatform   文件: DefaultDriverExecutor.java
private DeferredEvent poll() {
   synchronized(lock) {
      Preconditions.checkState(running == Thread.currentThread(), "Attempt to poll() from non-dispatch thread");
      if(stopped) {
         DeferredEvent event = events.poll();
         if(event == null) {
            return null;
         }

         if(canExecute(event)) {
            return event;
         }
         else if(event.event instanceof PlatformMessage) {
            return new CancelledMessageEvent((PlatformMessage) event.event);
         }
         else {
            // dropping it
            return null;
         }
      }
      else if(canExecute(events.peek())) {
         return events.poll();
      }
      return null;
   }
}
 
源代码8 项目: SpinalTap   文件: MysqlPipeFactory.java
private Pipe create(
    final MysqlConfiguration sourceConfig,
    final String partitionName,
    final StateRepositoryFactory repositoryFactory,
    final long leaderEpoch)
    throws Exception {
  final Source source = createSource(sourceConfig, repositoryFactory, partitionName, leaderEpoch);
  final DestinationConfiguration destinationConfig = sourceConfig.getDestinationConfiguration();

  Preconditions.checkState(
      !(sourceConfig.getHostRole().equals(MysqlConfiguration.HostRole.MIGRATION)
          && destinationConfig.getPoolSize() > 0),
      String.format(
          "Destination pool size is not 0 for MIGRATION source %s", sourceConfig.getName()));

  final Destination destination = createDestination(sourceConfig, destinationConfig);
  return new Pipe(source, destination, new PipeMetrics(source.getName(), metricRegistry));
}
 
源代码9 项目: iteratorx   文件: JdbcWriter.java
public int[] executeBatch(final String sql, final Object[]... records) {
	Preconditions.checkArgument(records.length <= this.batchSize,
			"records size is larger than max batch size, invoke executeBatch(final String sql, final Iterable<Object[]> records) instead of this one");

	try {
		conn = dataSource.getConnection();
		ps = conn.prepareStatement(sql);
		for (final Object[] record : records) {
			for (int i = 0; i < record.length; i++) {
				ps.setObject(i + 1, record[i]);
			}
			ps.addBatch();
		}
		ps.setQueryTimeout(queryTimeout);

		return ps.executeBatch();

	} catch (final SQLException e) {
		throw new RuntimeException(e);

	} finally {
		IOUtils.close(rs, ps, conn);
	}
}
 
源代码10 项目: javaide   文件: XmlElement.java
/**
 * Merges two children when this children's type allow multiple elements declaration with the
 * same key value. In that case, we only merge the lower priority child if there is not already
 * an element with the same key value that is equal to the lower priority child. Two children
 * are equals if they have the same attributes and children declared irrespective of the
 * declaration order.
 *
 * @param lowerPriorityChild the lower priority element's child.
 * @param mergingReport the merging report to log errors and actions.
 */
private void mergeChildrenWithMultipleDeclarations(
        XmlElement lowerPriorityChild,
        MergingReport.Builder mergingReport) {

    Preconditions.checkArgument(lowerPriorityChild.getType().areMultipleDeclarationAllowed());
    if (lowerPriorityChild.getType().areMultipleDeclarationAllowed()) {
        for (XmlElement sameTypeChild : getAllNodesByType(lowerPriorityChild.getType())) {
            if (sameTypeChild.getId().equals(lowerPriorityChild.getId()) &&
                    sameTypeChild.isEquals(lowerPriorityChild)) {
                return;
            }
        }
    }
    // if we end up here, we never found a child of this element with the same key and strictly
    // equals to the lowerPriorityChild so we should merge it in.
    addElement(lowerPriorityChild, mergingReport);
}
 
源代码11 项目: dremio-oss   文件: BaseTestMiniDFS.java
/**
 * Start a MiniDFS cluster backed SabotNode cluster
 * @param testClass
 * @param isImpersonationEnabled Enable impersonation in the cluster?
 * @throws Exception
 */
protected static void startMiniDfsCluster(final String testClass, Configuration configuration) throws Exception {
  Preconditions.checkArgument(!Strings.isNullOrEmpty(testClass), "Expected a non-null and non-empty test class name");
  dfsConf = Preconditions.checkNotNull(configuration);

  // Set the MiniDfs base dir to be the temp directory of the test, so that all files created within the MiniDfs
  // are properly cleanup when test exits.
  miniDfsStoragePath = Files.createTempDirectory(testClass).toString();
  dfsConf.set("hdfs.minidfs.basedir", miniDfsStoragePath);
  // HDFS-8880 and HDFS-8953 introduce metrics logging that requires log4j, but log4j is explicitly
  // excluded in build. So disable logging to avoid NoClassDefFoundError for Log4JLogger.
  dfsConf.set("dfs.namenode.metrics.logger.period.seconds", "0");
  dfsConf.set("dfs.datanode.metrics.logger.period.seconds", "0");

  // Start the MiniDfs cluster
  dfsCluster = new MiniDFSCluster.Builder(dfsConf)
      .numDataNodes(3)
      .format(true)
      .build();

  fs = dfsCluster.getFileSystem();
}
 
源代码12 项目: uavstack   文件: ReliableTaildirEventReader.java
/**
 * Create a ReliableTaildirEventReader to watch the given directory. map<serverid.appid.logid, logpath>
 */
private ReliableTaildirEventReader(Map<String, CollectTask> tasks, Table<String, String, String> headerTable,
        boolean skipToEnd, boolean addByteOffset) throws IOException {
    Map<String, LogPatternInfo> filePaths = getFilePaths(tasks);

    // Sanity checks
    Preconditions.checkNotNull(filePaths);
    // get operation system info
    if (log.isDebugEnable()) {
        log.debug(this, "Initializing {" + ReliableTaildirEventReader.class.getSimpleName() + "} with directory={"
                + filePaths + "}");
    }

    // tailFile
    this.tailFileTable = CacheBuilder.newBuilder().expireAfterWrite(2, TimeUnit.DAYS)
            .<String, LogPatternInfo> build();
    this.headerTable = headerTable;
    this.addByteOffset = addByteOffset;
    this.os = JVMToolHelper.isWindows() ? OS_WINDOWS : null;

    updatelog(filePaths);
    updateTailFiles(skipToEnd);

    log.info(this, "tailFileTable: " + tailFileTable.toString());
    log.info(this, "headerTable: " + headerTable.toString());
}
 
/**
 * Queries a list of Node Statuses.
 */
@Override
public List<HddsProtos.Node> queryNode(HddsProtos.NodeState
    nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
    throws IOException {
  // TODO : We support only cluster wide query right now. So ignoring checking
  // queryScope and poolName
  Preconditions.checkNotNull(nodeStatuses);
  NodeQueryRequestProto request = NodeQueryRequestProto.newBuilder()
      .setState(nodeStatuses)
      .setTraceID(TracingUtil.exportCurrentSpan())
      .setScope(queryScope).setPoolName(poolName).build();
  NodeQueryResponseProto response = submitRequest(Type.QueryNode,
      builder -> builder.setNodeQueryRequest(request)).getNodeQueryResponse();
  return response.getDatanodesList();

}
 
源代码14 项目: hadoop-ozone   文件: IncrementalChunkBuffer.java
/** @return the buffer containing the position. It may allocate buffers. */
private ByteBuffer getAndAllocateAtPosition(int position) {
  Preconditions.checkArgument(position >= 0);
  Preconditions.checkArgument(position < limit);
  final int i = position / increment;
  final ByteBuffer ith = getAndAllocateAtIndex(i);
  assertInt(position%increment, ith.position(), "position", i);
  return ith;
}
 
源代码15 项目: Bats   文件: PubSubWebSocketClientBuilder.java
private <T extends PubSubWebSocketClient> T build(Class<T> clazz)
{
  Preconditions.checkState(context != null, "Context not specified");
  String gatewayAddress = context.getValue(LogicalPlan.GATEWAY_CONNECT_ADDRESS);
  if (gatewayAddress != null) {
    int timeout = context.getValue(LogicalPlan.PUBSUB_CONNECT_TIMEOUT_MILLIS);
    boolean gatewayUseSsl = context.getValue(LogicalPlan.GATEWAY_USE_SSL);

    // The builder can be used to build different types of PubSub clients in future but for now only one is supported
    SharedPubSubWebSocketClient wsClient = null;

    try {
      wsClient = new SharedPubSubWebSocketClient((gatewayUseSsl ? "wss://" : "ws://") + gatewayAddress + "/pubsub", timeout);

      String gatewayUserName = context.getValue(LogicalPlan.GATEWAY_USER_NAME);
      String gatewayPassword = context.getValue(LogicalPlan.GATEWAY_PASSWORD);
      if (gatewayUserName != null && gatewayPassword != null) {
        wsClient.setLoginUrl((gatewayUseSsl ? "https://" : "http://") + gatewayAddress + GATEWAY_LOGIN_URL_PATH);
        wsClient.setUserName(gatewayUserName);
        wsClient.setPassword(gatewayPassword);
      }

      return (T)wsClient;
    } catch (URISyntaxException e) {
      logger.warn("Unable to initialize websocket for gateway address {}", gatewayAddress, e);
    }

    return null;
  }

  return null;
}
 
源代码16 项目: flink   文件: Configuration.java
/**
 * Gets storage size from a config file.
 *
 * @param name - Key to read.
 * @param defaultValue - The default value to return in case the key is
 * not present.
 * @param targetUnit - The Storage unit that should be used
 * for the return value.
 * @return - double value in the Storage Unit specified.
 */
public double getStorageSize(String name, double defaultValue,
                             StorageUnit targetUnit) {
	Preconditions.checkNotNull(targetUnit, "Conversion unit cannot be null.");
	Preconditions.checkState(isNotBlank(name), "Name cannot be blank.");
	String vString = get(name);
	if (isBlank(vString)) {
		return targetUnit.getDefault(defaultValue);
	}

	StorageSize measure = StorageSize.parse(vString);
	return convertStorageUnit(measure.getValue(), measure.getUnit(),
			targetUnit);

}
 
源代码17 项目: xian   文件: CuratorZookeeperClient.java
/**
 * @param zookeeperFactory factory for creating {@link ZooKeeper} instances
 * @param ensembleProvider the ensemble provider
 * @param sessionTimeoutMs session timeout
 * @param connectionTimeoutMs connection timeout
 * @param watcher default watcher or null
 * @param retryPolicy the retry policy to use
 * @param canBeReadOnly if true, allow ZooKeeper client to enter
 *                      read only mode in case of a network partition. See
 *                      {@link ZooKeeper#ZooKeeper(String, int, Watcher, long, byte[], boolean)}
 *                      for details
 */
public CuratorZookeeperClient(ZookeeperFactory zookeeperFactory, EnsembleProvider ensembleProvider, int sessionTimeoutMs, int connectionTimeoutMs, Watcher watcher, RetryPolicy retryPolicy, boolean canBeReadOnly)
{
    if ( sessionTimeoutMs < connectionTimeoutMs )
    {
        log.warn(String.format("session timeout [%d] is less than connection timeout [%d]", sessionTimeoutMs, connectionTimeoutMs));
    }

    retryPolicy = Preconditions.checkNotNull(retryPolicy, "retryPolicy cannot be null");
    ensembleProvider = Preconditions.checkNotNull(ensembleProvider, "ensembleProvider cannot be null");

    this.connectionTimeoutMs = connectionTimeoutMs;
    state = new ConnectionState(zookeeperFactory, ensembleProvider, sessionTimeoutMs, connectionTimeoutMs, watcher, tracer, canBeReadOnly);
    setRetryPolicy(retryPolicy);
}
 
源代码18 项目: ic   文件: TypeServiceImpl.java
private void initializeBaseType() {
    URL resource = getClass().getClassLoader().getResource(BASE_TYPE_FILE_NAME);
    Preconditions.checkNotNull(resource, BASE_TYPE_FILE_NAME + "文件不存在");
    try {
        baseType = objectMapper.readValue(resource, Type.class);
    } catch (IOException e) {
        throw new ICException("从" + BASE_TYPE_FILE_NAME + "中创建BaseType异常", e);
    }
}
 
源代码19 项目: dremio-oss   文件: MoreRelOptUtil.java
private ContainsRexVisitor(boolean checkOrigin, RelMetadataQuery mq, RelNode node, int index) {
  super(true);
  this.checkOrigin = checkOrigin;
  this.mq = mq;
  this.node = node;
  this.index = index;
  Preconditions.checkArgument(!checkOrigin || (mq != null && node != null));
}
 
源代码20 项目: dremio-oss   文件: IcebergCatalog.java
void truncateTable() {
  Preconditions.checkState(transaction == null, "Unexpected state");
  IcebergTableOperations tableOperations = new IcebergTableOperations(fsPath, configuration);
  table = new BaseTable(tableOperations, fsPath.getName());
  transaction = table.newTransaction();
  transaction.newDelete().deleteFromRowFilter(Expressions.alwaysTrue()).commit();
  transaction.commitTransaction();
  transaction = null;
}
 
源代码21 项目: aion   文件: Slice.java
public int setBytes(int index, FileChannel in, int position, int length) throws IOException {
    Preconditions.checkPositionIndexes(index, index + length, this.length);
    index += offset;
    ByteBuffer buf = ByteBuffer.wrap(data, index, length);
    int readBytes = 0;

    do {
        int localReadBytes;
        try {
            localReadBytes = in.read(buf, position + readBytes);
        } catch (ClosedChannelException e) {
            localReadBytes = -1;
        }
        if (localReadBytes < 0) {
            if (readBytes == 0) {
                return -1;
            } else {
                break;
            }
        } else if (localReadBytes == 0) {
            break;
        }
        readBytes += localReadBytes;
    } while (readBytes < length);

    return readBytes;
}
 
源代码22 项目: aion-germany   文件: PlayerAllianceService.java
/**
 * Move members between alliance groups
 */
public static void changeMemberGroup(Player player, int firstPlayer, int secondPlayer, int allianceGroupId) {
	PlayerAlliance alliance = player.getPlayerAlliance2();
	Preconditions.checkNotNull(alliance, "Alliance should not be null for group change");
	if (alliance.isLeader(player) || alliance.isViceCaptain(player)) {
		alliance.onEvent(new ChangeMemberGroupEvent(alliance, firstPlayer, secondPlayer, allianceGroupId));
	}
	else {
		PacketSendUtility.sendMessage(player, "You do not have the authority for that.");
	}
}
 
源代码23 项目: xrpc   文件: XUrl.java
public static String rawQueryParameters(String url) {
  Preconditions.checkNotNull(url);
  int paramStartIndex = url.indexOf("?");
  if (paramStartIndex == -1) {
    return url;
  } else {
    return url.substring(paramStartIndex, url.length());
  }
}
 
源代码24 项目: onedev   文件: DefaultProjectManager.java
@Inject
public DefaultProjectManager(Dao dao, CommitInfoManager commitInfoManager,  
		BuildManager buildManager, AvatarManager avatarManager, GroupManager groupManager,
		SettingManager settingManager, TransactionManager transactionManager, 
		SessionManager sessionManager, ListenerRegistry listenerRegistry, 
		TaskScheduler taskScheduler, UserAuthorizationManager userAuthorizationManager, 
		RoleManager roleManager) {
	super(dao);
	
    this.commitInfoManager = commitInfoManager;
    this.buildManager = buildManager;
    this.groupManager = groupManager;
    this.avatarManager = avatarManager;
    this.settingManager = settingManager;
    this.transactionManager = transactionManager;
    this.sessionManager = sessionManager;
    this.listenerRegistry = listenerRegistry;
    this.taskScheduler = taskScheduler;
    this.userAuthorizationManager = userAuthorizationManager;
    this.roleManager = roleManager;
    
    try (InputStream is = getClass().getClassLoader().getResourceAsStream("git-receive-hook")) {
    	Preconditions.checkNotNull(is);
        gitReceiveHook = StringUtils.join(IOUtils.readLines(is, Charset.defaultCharset()), "\n");
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

}
 
源代码25 项目: dremio-oss   文件: IcebergCatalog.java
public IcebergCatalog(String path, Configuration configuration) {
  Preconditions.checkArgument(path != null && !path.isEmpty(), "Table path must be provided");
  Preconditions.checkArgument(configuration != null, "Configuration must be provided");
  fsPath = new Path(path);
  this.configuration = configuration;
  transaction = null;
  table = null;
}
 
源代码26 项目: dremio-oss   文件: JobTelemetryServiceImpl.java
@Override
public void putQueryTailProfile(
  PutTailProfileRequest request, StreamObserver<Empty> responseObserver) {
  try {
    QueryId queryId = request.getQueryId();
    Preconditions.checkNotNull(queryId);

    // update tail profile.
    profileStore.putTailProfile(queryId, request.getProfile());

    // TODO: ignore errors ??
    if (saveFullProfileOnQueryTermination) {
      saveFullProfileAndDeletePartial(queryId);
    }
    responseObserver.onNext(Empty.getDefaultInstance());
    responseObserver.onCompleted();
  } catch (IllegalArgumentException e) {
    responseObserver.onError(
      Status.INVALID_ARGUMENT
        .withDescription("put tail profile failed " + e.getMessage())
        .asRuntimeException());
  } catch (Exception ex) {
    logger.error("put tail profile failed", ex);
    responseObserver.onError(
      Status.INTERNAL.withDescription(ex.getMessage()).asRuntimeException());
  }
}
 
源代码27 项目: datawave   文件: EvaluationRendering.java
public static boolean canDisableEvaluation(JexlNode script, ShardQueryConfiguration config, MetadataHelper helper, boolean allowRange) {
    Preconditions.checkNotNull(script);
    
    AtomicBoolean res = new AtomicBoolean(true);
    if (log.isTraceEnabled()) {
        log.trace(JexlStringBuildingVisitor.buildQuery(script));
    }
    EvaluationRendering visitor = new EvaluationRendering(config, helper);
    
    visitor.allowRange = allowRange;
    
    script.jjtAccept(visitor, res);
    
    return res.get();
}
 
源代码28 项目: kylin-on-parquet-v2   文件: SortedIteratorMerger.java
@Override
public E next() {
    PeekingImpl<E> poll = heap.poll();
    E current = poll.next();
    if (poll.hasNext()) {

        //TODO: remove this check when validated
        Preconditions.checkState(comparator.compare(current, poll.peek()) < 0,
                "Not sorted! current: " + current + " Next: " + poll.peek());

        heap.offer(poll);
    }
    return current;
}
 
源代码29 项目: dremio-oss   文件: InjectingSerializer.java
public InjectingSerializer(final Kryo kryo, final Class type, final InjectionMapping mapping) {
  super(kryo, type);
  this.mapping = Preconditions.checkNotNull(mapping, "injection mapping is required");
  config.setIgnoreSyntheticFields(true);
  config.setFieldsAsAccessible(true);
  transformFields();
}
 
@Inject
public PortabilityAuthServiceProviderRegistry(
    Map<String, AuthServiceExtension> serviceProviderMap) {

  ImmutableMap.Builder<String, AuthServiceExtension> serviceProviderBuilder =
      ImmutableMap.builder();
  ImmutableSet.Builder<String> supportedImportTypesBuilder = ImmutableSet.builder();
  ImmutableSet.Builder<String> supportedExportTypesBuilder = ImmutableSet.builder();

  serviceProviderMap.forEach(
      (service, provider) -> {
        List<String> importTypes = provider.getImportTypes();
        List<String> exportTypes = provider.getExportTypes();
        for (String type : importTypes) {
          Preconditions.checkArgument(
              exportTypes.contains(type),
              "TransferDataType [%s] is available for import but not export in [%s] AuthServiceExtension",
              type,
              service);
          supportedImportTypesBuilder.add(type);
        }
        supportedExportTypesBuilder.addAll(exportTypes);
        serviceProviderBuilder.put(service, provider);
      });

  authServiceProviderMap = serviceProviderBuilder.build();
  supportedImportTypes = supportedImportTypesBuilder.build();
  supportedExportTypes = supportedExportTypesBuilder.build();
}