下面列出了怎么用com.google.common.util.concurrent.ListeningExecutorService的API类实例代码及写法,或者点击链接到github查看源代码。
@Produces
static ListenableFuture<List<Place>> fetchPlaces(
S2LatLngRect viewport, DSLContext db, @ForDatabase ListeningExecutorService dbExecutor) {
var coverer = new S2RegionCoverer();
var coveredCells = coverer.getCovering(viewport);
Condition locationCondition =
DSL.or(
Streams.stream(coveredCells)
.map(
cell ->
PLACE
.S2_CELL
.ge(ULong.valueOf(cell.rangeMin().id()))
.and(PLACE.S2_CELL.le(ULong.valueOf(cell.rangeMax().id()))))
.collect(toImmutableList()));
return dbExecutor.submit(
() -> db.selectFrom(PLACE).where(DSL.or(locationCondition)).fetchInto(Place.class));
}
/**
* Adds a new task to the queue. JobLimiter uses the provided service only for invoking the
* callable itself.
*/
public <V> ListenableFuture<V> schedule(
ListeningExecutorService service, ThrowingSupplier<ListenableFuture<V>, Exception> callable) {
// To help prevent stack overflows either this callable needs to be forced to be async, or the
// release() call when it's finished needs to be. It's easier to do it here.
ThrowingSupplier<ListenableFuture<V>, Exception> safeCallable =
() -> Futures.submitAsync(callable::get, service);
if (semaphore.tryAcquire()) {
return send(safeCallable);
}
ListenableFuture<V> enqueued = enqueue(safeCallable);
// It's possible that all running jobs finished after we failed to acquire the semaphore and
// before we enqueued the callable. To not get stuck in the queue forever, try again.
if (semaphore.tryAcquire()) {
release();
}
return enqueued;
}
private List<Future<?>> checkAggregateAndGaugeAndHeartbeatAlertsAsync(AgentRollup agentRollup,
ListeningExecutorService workerExecutor) {
List<Future<?>> futures = new ArrayList<>();
for (AgentRollup childAgentRollup : agentRollup.children()) {
futures.addAll(checkAggregateAndGaugeAndHeartbeatAlertsAsync(childAgentRollup,
workerExecutor));
}
futures.add(workerExecutor.submit(new Runnable() {
@Override
public void run() {
try {
centralAlertingService.checkAggregateAndGaugeAndHeartbeatAlertsAsync(
agentRollup.id(), agentRollup.display(), clock.currentTimeMillis());
} catch (InterruptedException e) {
// probably shutdown requested (see close method above)
} catch (Throwable t) {
logger.error("{} - {}", agentRollup.id(), t.getMessage(), t);
}
}
}));
return futures;
}
private static void initializeDirCaches(
ArtifactCacheEntries artifactCacheEntries,
BuckEventBus buckEventBus,
Function<String, UnconfiguredBuildTarget> unconfiguredBuildTargetFactory,
TargetConfigurationSerializer targetConfigurationSerializer,
ProjectFilesystem projectFilesystem,
ImmutableList.Builder<ArtifactCache> builder,
ListeningExecutorService storeExecutorService) {
for (DirCacheEntry cacheEntry : artifactCacheEntries.getDirCacheEntries()) {
builder.add(
createDirArtifactCache(
Optional.ofNullable(buckEventBus),
cacheEntry,
unconfiguredBuildTargetFactory,
targetConfigurationSerializer,
projectFilesystem,
storeExecutorService));
}
}
@Inject
ViewInteraction(
UiController uiController,
ViewFinder viewFinder,
@MainThread Executor mainThreadExecutor,
FailureHandler failureHandler,
Matcher<View> viewMatcher,
AtomicReference<Matcher<Root>> rootMatcherRef,
AtomicReference<Boolean> needsActivity,
RemoteInteraction remoteInteraction,
ListeningExecutorService remoteExecutor,
ControlledLooper controlledLooper) {
this.viewFinder = checkNotNull(viewFinder);
this.uiController = (InterruptableUiController) checkNotNull(uiController);
this.failureHandler = checkNotNull(failureHandler);
this.mainThreadExecutor = checkNotNull(mainThreadExecutor);
this.viewMatcher = checkNotNull(viewMatcher);
this.rootMatcherRef = checkNotNull(rootMatcherRef);
this.needsActivity = checkNotNull(needsActivity);
this.remoteInteraction = checkNotNull(remoteInteraction);
this.remoteExecutor = checkNotNull(remoteExecutor);
this.controlledLooper = checkNotNull(controlledLooper);
}
@Override
public <T, E extends Exception> ListenableFuture<Void> evalAsync(
TargetPatternResolver<T> resolver,
ImmutableSet<PathFragment> ignoredSubdirectories,
ImmutableSet<PathFragment> excludedSubdirectories,
BatchCallback<T, E> callback,
Class<E> exceptionClass,
ListeningExecutorService executor) {
return resolver.findTargetsBeneathDirectoryAsync(
directory.getRepository(),
getOriginalPattern(),
directory.getPackageFragment().getPathString(),
rulesOnly,
ignoredSubdirectories,
excludedSubdirectories,
callback,
exceptionClass,
executor);
}
@Override
protected AbstractDataBrokerTestCustomizer createDataBrokerTestCustomizer() {
return new ConcurrentDataBrokerTestCustomizer(true) {
@Override
public DOMStore createOperationalDatastore() {
realOperStore = new InMemoryDOMDataStore("OPER", getDataTreeChangeListenerExecutor());
spiedOperStore = spy(realOperStore);
getSchemaService().registerSchemaContextListener(spiedOperStore);
return spiedOperStore;
}
@Override
public ListeningExecutorService getCommitCoordinatorExecutor() {
return MoreExecutors.newDirectExecutorService();
}
};
}
@Test
public void readUnblocksWrite() throws ExecutionException, IOException, InterruptedException {
ListeningExecutorService service = listeningDecorator(newSingleThreadExecutor());
AtomicInteger counter = new AtomicInteger();
RingBufferInputStream buffer = new RingBufferInputStream(1);
byte[] content = new byte[1];
content[0] = 42;
buffer.write(content); // buffer is now full
ListenableFuture<Void> writeFuture =
service.submit(
() -> {
counter.getAndIncrement();
buffer.write(content);
return null;
});
while (counter.get() != 1) {
MICROSECONDS.sleep(10);
}
assertThat(writeFuture.isDone()).isFalse();
buffer.read();
assertThat(writeFuture.get()).isEqualTo(null);
service.shutdown();
service.awaitTermination(10, MICROSECONDS);
}
protected void runExecBigConcurrentCommand(int numCommands, long staggeredDelayBeforeStart) throws Exception {
ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
List<ListenableFuture<?>> futures = new ArrayList<ListenableFuture<?>>();
try {
for (int i = 0; i < numCommands; i++) {
long delay = (long) (Math.random() * staggeredDelayBeforeStart);
if (i > 0) Time.sleep(delay);
futures.add(executor.submit(new Runnable() {
@Override
public void run() {
String bigstring = Strings.repeat("abcdefghij", 1000); // 10KB
String out = execCommands("echo "+bigstring);
assertEquals(out, bigstring+"\n", "actualSize="+out.length()+"; expectedSize="+bigstring.length());
}}));
}
Futures.allAsList(futures).get();
} finally {
executor.shutdownNow();
}
}
@Test
public void writesUnblockReads() throws ExecutionException, InterruptedException {
ListeningExecutorService service = listeningDecorator(newSingleThreadExecutor());
AtomicInteger counter = new AtomicInteger();
RingBufferInputStream buffer = new RingBufferInputStream(1);
ListenableFuture<Integer> readFuture =
service.submit(
() -> {
counter.getAndIncrement();
return buffer.read();
});
byte[] content = new byte[1];
content[0] = 42;
while (counter.get() != 1) {
MICROSECONDS.sleep(10);
}
assertThat(readFuture.isDone()).isFalse();
buffer.write(content);
assertThat(readFuture.get()).isEqualTo(content[0]);
service.shutdown();
service.awaitTermination(10, MICROSECONDS);
}
@VisibleForTesting
Map<ArtifactLocation, String> parsePackageStrings(List<ArtifactLocation> sources)
throws Exception {
ListeningExecutorService executorService =
MoreExecutors.listeningDecorator(
Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()));
Map<ArtifactLocation, ListenableFuture<String>> futures = Maps.newHashMap();
for (final ArtifactLocation source : sources) {
futures.put(source, executorService.submit(() -> getDeclaredPackageOfJavaFile(source)));
}
Map<ArtifactLocation, String> map = Maps.newHashMap();
for (Entry<ArtifactLocation, ListenableFuture<String>> entry : futures.entrySet()) {
String value = entry.getValue().get();
if (value != null) {
map.put(entry.getKey(), value);
}
}
return map;
}
public DirArtifactCache(
String name,
ProjectFilesystem filesystem,
Path cacheDir,
CacheReadMode cacheReadMode,
Optional<Long> maxCacheSizeBytes,
ListeningExecutorService storeExecutorService)
throws IOException {
this.name = name;
this.filesystem = filesystem;
this.cacheDir = cacheDir;
this.maxCacheSizeBytes = maxCacheSizeBytes;
this.cacheReadMode = cacheReadMode;
this.storeExecutorService = storeExecutorService;
this.bytesSinceLastDeleteOldFiles = 0L;
// Check first, as mkdirs will fail if the path is a symlink.
if (!filesystem.isDirectory(cacheDir)) {
filesystem.mkdirs(cacheDir);
}
}
private ListenableFuture<Optional<BuildResult>> transformBuildResultIfNotPresent(
ListenableFuture<Optional<BuildResult>> future,
Callable<Optional<BuildResult>> function,
ListeningExecutorService executor) {
return transformBuildResultAsyncIfNotPresent(
future,
() ->
executor.submit(
() -> {
if (!shouldKeepGoing()) {
return Optional.of(canceled(firstFailure));
}
try (Scope ignored = buildRuleScope()) {
return function.call();
}
}));
}
@Test
public void combineFuturesFailWhenOneFails() throws InterruptedException {
SettableFuture<String> firstFuture = SettableFuture.create();
SettableFuture<Integer> secondFuture = SettableFuture.create();
ListeningExecutorService executor = MoreExecutors.newDirectExecutorService();
ListenableFuture<Pair<String, Integer>> combinedFuture =
MoreFutures.combinedFutures(firstFuture, secondFuture, executor);
assertFalse(combinedFuture.isDone());
executor.submit(() -> firstFuture.setException(new Exception()));
assertTrue(combinedFuture.isDone());
assertFalse(MoreFutures.isSuccess(combinedFuture));
}
public static HybridThriftOverHttpServiceImplArgs of(
HttpService service,
ListeningExecutorService executor,
ThriftProtocol thriftProtocol,
String hybridThriftPath) {
return ImmutableHybridThriftOverHttpServiceImplArgs.of(
service, executor, thriftProtocol, hybridThriftPath);
}
TezTaskRunner(Configuration tezConf, UserGroupInformation ugi, String[] localDirs,
TaskSpec taskSpec, TezTaskUmbilicalProtocol umbilical, int appAttemptNumber,
Map<String, ByteBuffer> serviceConsumerMetadata, Multimap<String, String> startedInputsMap,
TaskReporter taskReporter, ListeningExecutorService executor) throws IOException {
this.tezConf = tezConf;
this.ugi = ugi;
this.taskReporter = taskReporter;
this.executor = executor;
task = new LogicalIOProcessorRuntimeTask(taskSpec, appAttemptNumber, tezConf, localDirs, this,
serviceConsumerMetadata, startedInputsMap);
taskReporter.registerTask(task, this);
taskRunning = new AtomicBoolean(true);
}
@Test(timeout = 5000)
public void testSingleSuccessfulTask() throws IOException, InterruptedException, TezException,
ExecutionException {
ListeningExecutorService executor = null;
try {
ExecutorService rawExecutor = Executors.newFixedThreadPool(1);
executor = MoreExecutors.listeningDecorator(rawExecutor);
ApplicationId appId = ApplicationId.newInstance(10000, 1);
TaskExecutionTestHelpers.TezTaskUmbilicalForTest
umbilical = new TaskExecutionTestHelpers.TezTaskUmbilicalForTest();
TaskReporter taskReporter = createTaskReporter(appId, umbilical);
TezTaskRunner2 taskRunner = createTaskRunner(appId, umbilical, taskReporter, executor,
TestProcessor.CONF_EMPTY);
// Setup the executor
Future<TaskRunner2Result> taskRunnerFuture = taskExecutor.submit(
new TaskRunnerCallable2ForTest(taskRunner));
// Signal the processor to go through
TestProcessor.signal();
TaskRunner2Result result = taskRunnerFuture.get();
verifyTaskRunnerResult(result, EndReason.SUCCESS, null, false, null);
assertNull(taskReporter.currentCallable);
umbilical.verifyTaskSuccessEvent();
assertFalse(TestProcessor.wasAborted());
} finally {
executor.shutdownNow();
}
}
DefaultRequestDispatcher(final HttpConnector httpConnector,
final ListeningExecutorService executorService,
final boolean shutDownExecutorOnClose) {
this.executorService = executorService;
this.httpConnector = httpConnector;
this.shutDownExecutorOnClose = shutDownExecutorOnClose;
}
private Pair<TargetGraph, Iterable<TargetNode<?>>> computeTargetsAndGraphToShowTargetHash(
CommandRunnerParams params,
ListeningExecutorService executor,
Pair<TargetGraph, Iterable<TargetNode<?>>> targetGraphAndTargetNodes)
throws InterruptedException, BuildFileParseException, IOException {
if (isDetectTestChanges) {
ImmutableSet<BuildTarget> explicitTestTargets =
TargetNodes.getTestTargetsForNodes(
targetGraphAndTargetNodes
.getFirst()
.getSubgraph(targetGraphAndTargetNodes.getSecond())
.getNodes()
.iterator());
LOG.debug("Got explicit test targets: %s", explicitTestTargets);
ImmutableSet<BuildTarget> matchingBuildTargetsWithTests =
mergeBuildTargets(targetGraphAndTargetNodes.getSecond(), explicitTestTargets);
// Parse the BUCK files for the tests of the targets passed in from the command line.
TargetGraph targetGraphWithTests =
params
.getParser()
.buildTargetGraph(
createParsingContext(params.getCells().getRootCell(), executor)
.withSpeculativeParsing(SpeculativeParsing.ENABLED)
.withExcludeUnsupportedTargets(false),
matchingBuildTargetsWithTests)
.getTargetGraph();
return new Pair<>(
targetGraphWithTests, targetGraphWithTests.getAll(matchingBuildTargetsWithTests));
} else {
return targetGraphAndTargetNodes;
}
}
public FileSingleStreamSpiller(
PagesSerde serde,
ListeningExecutorService executor,
Path spillPath,
SpillerStats spillerStats,
SpillContext spillContext,
LocalMemoryContext memoryContext,
Optional<SpillCipher> spillCipher)
{
this.serde = requireNonNull(serde, "serde is null");
this.executor = requireNonNull(executor, "executor is null");
this.spillerStats = requireNonNull(spillerStats, "spillerStats is null");
this.localSpillContext = spillContext.newLocalSpillContext();
this.memoryContext = requireNonNull(memoryContext, "memoryContext is null");
if (requireNonNull(spillCipher, "spillCipher is null").isPresent()) {
closer.register(spillCipher.get()::close);
}
// HACK!
// The writePages() method is called in a separate thread pool and it's possible that
// these spiller thread can run concurrently with the close() method.
// Due to this race when the spiller thread is running, the driver thread:
// 1. Can zero out the memory reservation even though the spiller thread physically holds onto that memory.
// 2. Can close/delete the temp file(s) used for spilling, which doesn't have any visible side effects, but still not desirable.
// To hack around the first issue we reserve the memory in the constructor and we release it in the close() method.
// This means we start accounting for the memory before the spiller thread allocates it, and we release the memory reservation
// before/after the spiller thread allocates that memory -- -- whether before or after depends on whether writePages() is in the
// middle of execution when close() is called (note that this applies to both readPages() and writePages() methods).
this.memoryContext.setBytes(BUFFER_SIZE);
try {
this.targetFile = closer.register(new FileHolder(Files.createTempFile(spillPath, SPILL_FILE_PREFIX, SPILL_FILE_SUFFIX)));
}
catch (IOException e) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, "Failed to create spill file", e);
}
}
private static ListenableFuture<List<Path>> submitRenderingTasks(
ListeningExecutorService executor, Injector injector, Class<? extends Annotation> qualifier)
throws InterruptedException {
List<RenderTask> tasks = injector.getInstance(new Key<List<RenderTask>>(qualifier) {});
@SuppressWarnings("unchecked") // Safe by the contract of invokeAll().
List<ListenableFuture<Path>> stage1 = (List) executor.invokeAll(tasks);
return allAsList(stage1);
}
private TezTaskRunner2 createTaskRunner(ApplicationId appId,
TaskExecutionTestHelpers.TezTaskUmbilicalForTest umbilical,
TaskReporter taskReporter,
ListeningExecutorService executor, byte[] processorConf,
boolean updateSysCounters)
throws IOException {
return createTaskRunner(appId, umbilical, taskReporter, executor, TestProcessor.class.getName(),
processorConf, false, updateSysCounters);
}
@Test
public void testGetAbiAsync() {
final CountDownLatch testLock = new CountDownLatch(1);
final String[] retrievedEosioAbiJsonStrings = {new String()};
ListeningExecutorService service = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(10));
ListenableFuture<String> getEosioAbiFuture = service.submit(new Callable<String>() {
@Override
public String call() throws Exception {
// Its not thread safe to use the global mocks like this but we're not trying to
// run concurrent calls.
IABIProvider abiProvider = new ABIProviderImpl(mockRpcProvider, mockSerializationProvider);
return abiProvider.getAbi(chainId, new EOSIOName("eosio"));
}
});
Futures.addCallback(getEosioAbiFuture, new FutureCallback<String>() {
@Override
public void onSuccess(@NullableDecl String result) {
retrievedEosioAbiJsonStrings[0] = result;
testLock.countDown();
}
@Override
public void onFailure(Throwable t) {
testLock.countDown();
}
}, MoreExecutors.directExecutor());
try {
testLock.await(2000, TimeUnit.MILLISECONDS);
assertNotNull(retrievedEosioAbiJsonStrings[0]);
assertFalse(retrievedEosioAbiJsonStrings[0].isEmpty());
assertEquals(eosioAbiJsonString, retrievedEosioAbiJsonStrings[0]);
} catch (InterruptedException interruptedException) {
fail("Interrupted waiting for getAbi() to complete: " +
interruptedException.getLocalizedMessage());
}
}
@Test
public void sValue() throws Exception {
// Check that we never generate an S value that is larger than half the curve order. This avoids a malleability
// issue that can allow someone to change a transaction [hash] without invalidating the signature.
final int ITERATIONS = 10;
ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(ITERATIONS));
List<ListenableFuture<ECKey.ECDSASignature>> sigFutures = Lists.newArrayList();
final ECKey key = new ECKey();
for (byte i = 0; i < ITERATIONS; i++) {
final Sha256Hash hash = Sha256Hash.of(new byte[]{i});
sigFutures.add(executor.submit(new Callable<ECKey.ECDSASignature>() {
@Override
public ECKey.ECDSASignature call() throws Exception {
return key.sign(hash);
}
}));
}
List<ECKey.ECDSASignature> sigs = Futures.allAsList(sigFutures).get();
for (ECKey.ECDSASignature signature : sigs) {
assertTrue(signature.isCanonical());
}
final ECDSASignature first = sigs.get(0);
final ECKey.ECDSASignature duplicate = new ECKey.ECDSASignature(first.r, first.s);
assertEquals(first, duplicate);
assertEquals(first.hashCode(), duplicate.hashCode());
final ECKey.ECDSASignature highS = new ECKey.ECDSASignature(first.r, ECKey.CURVE.getN().subtract(first.s));
assertFalse(highS.isCanonical());
}
@Test
public void whenExecutingRunnableInListeningExecutor_shouldLogThreadExecution() throws Exception {
ConcurrentHashMap<String, Boolean> threadExecutions = new ConcurrentHashMap<>();
Runnable logThreadRun = () -> threadExecutions.put(Thread.currentThread().getName(), true);
ListeningExecutorService executor = MoreExecutors.newDirectExecutorService();
executor.execute(logThreadRun);
Assert.assertTrue(threadExecutions.get("main"));
}
public PublicAnnouncementManager(
Clock clock,
BuckEventBus eventBus,
AbstractConsoleEventBusListener consoleEventBusListener,
RemoteLogBuckConfig logConfig,
ListeningExecutorService service) {
this.clock = clock;
this.consoleEventBusListener = consoleEventBusListener;
this.eventBus = eventBus;
this.logConfig = logConfig;
this.service = service;
}
private static ArtifactCache createRetryingArtifactCache(
HttpCacheEntry cacheDescription,
String hostToReportToRemote,
BuckEventBus buckEventBus,
Function<String, UnconfiguredBuildTarget> unconfiguredBuildTargetFactory,
TargetConfigurationSerializer targetConfigurationSerializer,
ProjectFilesystem projectFilesystem,
ListeningExecutorService httpWriteExecutorService,
ListeningExecutorService httpFetchExecutorService,
ArtifactCacheBuckConfig config,
NetworkCacheFactory factory,
ArtifactCacheMode cacheMode,
Optional<ClientCertificateHandler> clientCertificateHandler) {
ArtifactCache cache =
createHttpArtifactCache(
cacheDescription,
hostToReportToRemote,
buckEventBus,
unconfiguredBuildTargetFactory,
targetConfigurationSerializer,
projectFilesystem,
httpWriteExecutorService,
httpFetchExecutorService,
config,
factory,
cacheMode,
clientCertificateHandler);
return new RetryingCacheDecorator(cacheMode, cache, config.getMaxFetchRetries(), buckEventBus);
}
private static ArtifactCache createDirArtifactCache(
Optional<BuckEventBus> buckEventBus,
DirCacheEntry dirCacheConfig,
Function<String, UnconfiguredBuildTarget> unconfiguredBuildTargetFactory,
TargetConfigurationSerializer targetConfigurationSerializer,
ProjectFilesystem projectFilesystem,
ListeningExecutorService storeExecutorService) {
Path cacheDir = dirCacheConfig.getCacheDir();
try {
DirArtifactCache dirArtifactCache =
new DirArtifactCache(
"dir",
projectFilesystem,
cacheDir,
dirCacheConfig.getCacheReadMode(),
dirCacheConfig.getMaxSizeBytes(),
storeExecutorService);
if (!buckEventBus.isPresent()) {
return dirArtifactCache;
}
return new LoggingArtifactCacheDecorator(
buckEventBus.get(),
dirArtifactCache,
new DirArtifactCacheEvent.DirArtifactCacheEventFactory(
unconfiguredBuildTargetFactory, targetConfigurationSerializer));
} catch (IOException e) {
throw new HumanReadableException(
e, "Failure initializing artifact cache directory: %s", cacheDir);
}
}
@Test
public void sValue() throws Exception {
// Check that we never generate an S value that is larger than half the curve order. This avoids a malleability
// issue that can allow someone to change a transaction [hash] without invalidating the signature.
final int ITERATIONS = 10;
ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(ITERATIONS));
List<ListenableFuture<ECKey.ECDSASignature>> sigFutures = Lists.newArrayList();
final ECKey key = new ECKey();
for (byte i = 0; i < ITERATIONS; i++) {
final Sha256Hash hash = Sha256Hash.of(new byte[]{i});
sigFutures.add(executor.submit(new Callable<ECKey.ECDSASignature>() {
@Override
public ECKey.ECDSASignature call() throws Exception {
return key.sign(hash);
}
}));
}
List<ECKey.ECDSASignature> sigs = Futures.allAsList(sigFutures).get();
for (ECKey.ECDSASignature signature : sigs) {
assertTrue(signature.isCanonical());
}
final ECDSASignature first = sigs.get(0);
final ECKey.ECDSASignature duplicate = new ECKey.ECDSASignature(first.r, first.s);
assertEquals(first, duplicate);
assertEquals(first.hashCode(), duplicate.hashCode());
final ECKey.ECDSASignature highS = new ECKey.ECDSASignature(first.r, ECKey.CURVE.getN().subtract(first.s));
assertFalse(highS.isCanonical());
}
/** Helper to construct a PerBuildState and use it to get nodes. */
private static void getRawTargetNodes(
Parser parser,
TypeCoercerFactory typeCoercerFactory,
BuckEventBus eventBus,
Cell cell,
KnownRuleTypesProvider knownRuleTypesProvider,
boolean enableProfiling,
ListeningExecutorService executor,
ExecutableFinder executableFinder,
Path buildFile)
throws BuildFileParseException {
try (PerBuildState state =
new PerBuildStateFactory(
typeCoercerFactory,
new DefaultConstructorArgMarshaller(),
knownRuleTypesProvider,
new ParserPythonInterpreterProvider(cell.getBuckConfig(), executableFinder),
WatchmanFactory.NULL_WATCHMAN,
eventBus,
new ParsingUnconfiguredBuildTargetViewFactory(),
UnconfiguredTargetConfiguration.INSTANCE)
.create(
ParsingContext.builder(cell, executor).setProfilingEnabled(enableProfiling).build(),
parser.getPermState())) {
AbstractParser.getTargetNodeRawAttributes(state, cell, AbsPath.of(buildFile)).getTargets();
}
}