下面列出了java.util.Collections#synchronizedList ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Test
public void whenFailingToParallelProcess_thenMainThreadShouldTimeout() throws InterruptedException {
// Given
List<String> outputScraper = Collections.synchronizedList(new ArrayList<>());
CountDownLatch countDownLatch = new CountDownLatch(5);
List<Thread> workers = Stream.generate(() -> new Thread(new BrokenWorker(outputScraper, countDownLatch)))
.limit(5)
.collect(toList());
// When
workers.forEach(Thread::start);
final boolean result = countDownLatch.await(3L, TimeUnit.SECONDS);
// Then
assertThat(result).isFalse();
}
@Test(dataProvider = "IntStreamTestData", dataProviderClass = IntStreamTestDataProvider.class)
public void testIntForEach(String name, TestData.OfInt data) {
Function<IntStream, List<Integer>> terminalFunc = s -> {
List<Integer> l = Collections.synchronizedList(new ArrayList<Integer>());
s.forEach(l::add);
return l;
};
// Test head
withData(data).
terminal(terminalFunc).
resultAsserter(resultAsserter()).
exercise();
// Test multiple stages
withData(data).
terminal(s -> s.map(i -> i), terminalFunc).
resultAsserter(resultAsserter()).
exercise();
}
@Override
public void fillCompletionVariants(@NotNull CompletionParameters parameters, @NotNull CompletionResultSet result) {
Document document = parameters.getEditor().getDocument();
Editor editor = parameters.getEditor();
Project project = parameters.getOriginalFile().getProject();
int offset = parameters.getOffset();
initiateLanguageServers(project, document);
CompletionParams param;
try {
param = LSPIJUtils.toCompletionParams(LSPIJUtils.toUri(document), offset, document);
List<LookupElement> proposals = Collections.synchronizedList(new ArrayList<>());
this.completionLanguageServersFuture
.thenComposeAsync(languageServers -> CompletableFuture.allOf(languageServers.stream()
.map(languageServer -> languageServer.getTextDocumentService().completion(param)
.thenAcceptAsync(completion -> proposals
.addAll(toProposals(project, editor, document, offset, completion, languageServer))))
.toArray(CompletableFuture[]::new)))
.get();
result.addAllElements(proposals);
} catch (RuntimeException | InterruptedException | ExecutionException e) {
LOGGER.warn(e.getLocalizedMessage(), e);
result.addElement(createErrorProposal(offset, e));
}
super.fillCompletionVariants(parameters, result);
}
@Test(dataProvider = "StreamTestData<Integer>", dataProviderClass = StreamTestDataProvider.class)
public void testForEach(String name, TestData.OfRef<Integer> data) {
Function<Stream<Integer>, List<Integer>> terminalFunc = s -> {
List<Integer> l = Collections.synchronizedList(new ArrayList<>());
s.forEach(l::add);
return l;
};
// Test head
withData(data).
terminal(terminalFunc).
resultAsserter(resultAsserter()).
exercise();
// Test multiple stages
withData(data).
terminal(s -> s.map(LambdaTestHelpers.identity()), terminalFunc).
resultAsserter(resultAsserter()).
exercise();
}
/**
* Constructs a new PingInjector and gets the list of open NetworkManager instances
*/
public PingInjector() {
try {
CraftServer craftserver = (CraftServer) Bukkit.getServer();
Field console = craftserver.getClass().getDeclaredField("console");
console.setAccessible(true);
this.server = (MinecraftServer) console.get(craftserver);
ServerConnection conn = this.server.ai();
networkManagers = Collections.synchronizedList((List<?>) this.getNetworkManagerList(conn));
} catch(IllegalAccessException | NoSuchFieldException e) {
e.printStackTrace();
}
}
@Test
public void maxUrisTestWithIteratorTask() {
DataMovementManager dmManager = client.newDataMovementManager();
List<String> uris = new ArrayList<String>();
List<String> outputUris = Collections.synchronizedList(new ArrayList<String>());
class Output {
AtomicInteger counter = new AtomicInteger(0);
}
for(int i=0; i<40; i++)
uris.add(UUID.randomUUID().toString());
QueryBatcher queryBatcher = dmManager.newQueryBatcher(uris.iterator());
final Output output = new Output();
queryBatcher.setMaxBatches(2);
queryBatcher.withBatchSize(10).withThreadCount(2)
.onUrisReady(batch -> {
outputUris.addAll(Arrays.asList(batch.getItems()));
output.counter.incrementAndGet();
})
.onQueryFailure((QueryBatchException failure) -> {
System.out.println(failure.getMessage());
});
dmManager.startJob(queryBatcher);
queryBatcher.awaitCompletion();
dmManager.stopJob(queryBatcher);
assertTrue("Counter value not as expected", output.counter.get() == 2);
assertTrue("Output list does not contain expected number of outputs", outputUris.size() == 20);
}
@Override
public List<String> readMessage(int expectedNumMessages, String groupId, String topic) throws IOException {
final List<String> messages = Collections.synchronizedList(new ArrayList<>(
expectedNumMessages));
try (final AutoClosableProcess kafka = AutoClosableProcess
.create(kafkaDir.resolve(Paths.get("bin", "kafka-console-consumer.sh")).toString(),
"--bootstrap-server",
KAFKA_ADDRESS,
"--from-beginning",
"--max-messages",
String.valueOf(expectedNumMessages),
"--topic",
topic,
"--consumer-property",
"group.id=" + groupId)
.setStdoutProcessor(messages::add)
.runNonBlocking()) {
final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(120));
while (deadline.hasTimeLeft() && messages.size() < expectedNumMessages) {
try {
LOG.info("Waiting for messages. Received {}/{}.", messages.size(),
expectedNumMessages);
Thread.sleep(500);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
if (messages.size() != expectedNumMessages) {
throw new IOException("Could not read expected number of messages.");
}
return messages;
}
}
synchronized public void listen(String eventType, EventObserver eventObserver) {
List<EventObserver> observers = events.get(eventType);
if (observers == null) {
observers = Collections.synchronizedList(new ArrayList<EventObserver>());
}
observers.add(eventObserver);
events.put(eventType, observers);
}
/**
*
* @param source
* @param target
*/
public CDKMCSHandler(IAtomContainer source, IAtomContainer target) {
this.source = source;
this.target = target;
this.am = AtomMatcher.forQuery();
this.bm = BondMatcher.forQuery();
this.allAtomMCS = Collections.synchronizedList(new ArrayList<>());
this.allMCS = Collections.synchronizedList(new ArrayList<>());
this.timeout = searchMCS();
}
WeEventClient(String brokerUrl, String groupId, String userName, String password, int timeout) throws BrokerException {
validateParam(brokerUrl);
this.brokerUrl = brokerUrl;
this.brokerRestfulUrl = brokerUrl.concat("/rest");
this.groupId = groupId;
this.userName = userName;
this.password = password;
this.httpClientHelper = new HttpClientHelper(timeout);
this.subscribeIdList = Collections.synchronizedList(new ArrayList<>());
buildStomp();
}
public SystemTopicClientBase(PulsarClient client, TopicName topicName) {
this.client = client;
this.topicName = topicName;
this.writers = Collections.synchronizedList(new ArrayList<>());
this.readers = Collections.synchronizedList(new ArrayList<>());
}
protected List<String> getObject() {
List<String> list = new ArrayList<String>();
list.add("string");
return Collections.synchronizedList(list);
}
/**
* Resets the stack.
*
*/
public void reset() {
synchronized (list) {
list = Collections.synchronizedList(new ArrayList());
}
}
/**
* Trying to cancel long running query in situation that there's no worker for cancel query,
* cause server thread pool is full. No exceptions expected.
* In order to guarantee correct concurrent processing of query itself and it's cancellation request
* thress latches and some other stuff is used.
* For more details see <code>TestSQLFunctions#awaitLatchCancelled()</code>,
* <code>TestSQLFunctions#awaitQuerySuspensionLatch()</code>
* and <code>JdbcThinStatementCancelSelfTest#cancel(java.sql.Statement)</code>.
*
* @throws Exception If failed.
*/
@Test
public void testCancelAgainstFullServerThreadPool() throws Exception {
List<Statement> statements = Collections.synchronizedList(new ArrayList<>());
List<Connection> connections = Collections.synchronizedList(new ArrayList<>());
// Prepares connections and statemens in order to use them for filling thread pool with pseuso-infine quries.
for (int i = 0; i < SERVER_THREAD_POOL_SIZE; i++) {
Connection yaConn = DriverManager.getConnection(URL);
yaConn.setSchema('"' + DEFAULT_CACHE_NAME + '"');
connections.add(yaConn);
Statement yaStmt = yaConn.createStatement();
statements.add(yaStmt);
}
try {
IgniteInternalFuture cancelRes = cancel(statements.get(SERVER_THREAD_POOL_SIZE - 1));
// Completely fills server thread pool.
IgniteInternalFuture<Long> fillPoolRes = fillServerThreadPool(statements, SERVER_THREAD_POOL_SIZE - 1);
GridTestUtils.assertThrows(log, () -> {
statements.get(SERVER_THREAD_POOL_SIZE - 1).executeQuery(
"select * from Integer where _key in " +
"(select _key from Integer where awaitLatchCancelled() = 0) and" +
" shouldNotBeCalledInCaseOfCancellation()");
return null;
}, SQLException.class, "The query was cancelled while executing.");
// Releases queries in thread pool.
TestSQLFunctions.suspendQryLatch.countDown();
// Ensures that there were no exceptions within async cancellation process.
cancelRes.get(CHECK_RESULT_TIMEOUT);
// Ensures that there were no exceptions within async thread pool filling process.
fillPoolRes.get(CHECK_RESULT_TIMEOUT);
}
finally {
for (Statement statement : statements)
statement.close();
for (Connection connection : connections)
connection.close();
}
}
public Async() {
counter = new AtomicInteger(0);
executor = Executors.newCachedThreadPool(new DefaultThreadFactory(Async.class, true));
tasks = Collections.synchronizedList(new ArrayList<>());
}
public TransformStatus() {
sampleRows = Collections.synchronizedList( new LinkedList<Object[]>() );
}
public Model2D() {
t = new float[nx][ny];
u = new float[nx][ny];
v = new float[nx][ny];
q = new float[nx][ny];
tb = new float[nx][ny];
uWind = new float[nx][ny];
vWind = new float[nx][ny];
conductivity = new float[nx][ny];
specificHeat = new float[nx][ny];
density = new float[nx][ny];
fluidity = new boolean[nx][ny];
parts = Collections.synchronizedList(new ArrayList<>());
particles = Collections.synchronizedList(new ArrayList<>());
heatFluxSensors = Collections.synchronizedList(new ArrayList<>());
anemometers = Collections.synchronizedList(new ArrayList<>());
thermometers = Collections.synchronizedList(new ArrayList<>());
thermostats = Collections.synchronizedList(new ArrayList<>());
photons = Collections.synchronizedList(new ArrayList<>());
clouds = Collections.synchronizedList(new ArrayList<>());
trees = Collections.synchronizedList(new ArrayList<>());
fans = Collections.synchronizedList(new ArrayList<>());
heliostats = Collections.synchronizedList(new ArrayList<>());
particleFeeders = Collections.synchronizedList(new ArrayList<>());
init();
heatSolver = new HeatSolver2DImpl(nx, ny);
heatSolver.setSpecificHeat(specificHeat);
heatSolver.setConductivity(conductivity);
heatSolver.setDensity(density);
heatSolver.setPower(q);
heatSolver.setVelocity(u, v);
heatSolver.setTemperatureBoundary(tb);
heatSolver.setFluidity(fluidity);
fluidSolver = new FluidSolver2DImpl(nx, ny);
fluidSolver.setFluidity(fluidity);
fluidSolver.setTemperature(t);
fluidSolver.setWindSpeed(uWind, vWind);
photonSolver = new PhotonSolver2D(lx, ly);
photonSolver.setPower(q);
radiositySolver = new RadiositySolver2D(this);
particleSolver = new ParticleSolver2D(this);
setGridCellSize();
propertyChangeListeners = new ArrayList<>();
manipulationListeners = new ArrayList<>();
}
@SuppressWarnings("rawtypes")
private TestListWrapper() {
lists = Collections.synchronizedList(new ArrayList<List<? extends Comparable>>());
}
protected List<String> getAnotherObject() {
List<String> list = Collections.emptyList();
return Collections.synchronizedList(list);
}
public void run() {
logger.info("Started gap-filling " + peakList);
setStatus(TaskStatus.PROCESSING);
// Get total number of rows
totalRows = peakList.getNumberOfRows();
// Get feature list columns
RawDataFile columns[] = peakList.getRawDataFiles();
// Create new feature list
processedPeakList = new SimplePeakList(peakList + " " + suffix, columns);
/*************************************************************
* Creating a stream to process the data in parallel
*/
processedRowsAtomic = new AtomicInteger(0);
List<PeakListRow> outputList = Collections.synchronizedList(new ArrayList<>());
peakList.parallelStream().forEach(sourceRow -> {
// Canceled?
if (isCanceled())
return;
PeakListRow newRow = new SimplePeakListRow(sourceRow.getID());
// Copy comment
newRow.setComment(sourceRow.getComment());
// Copy identities
for (PeakIdentity ident : sourceRow.getPeakIdentities())
newRow.addPeakIdentity(ident, false);
if (sourceRow.getPreferredPeakIdentity() != null)
newRow.setPreferredPeakIdentity(sourceRow.getPreferredPeakIdentity());
// Copy each peaks and fill gaps
for (RawDataFile column : columns) {
// Canceled?
if (isCanceled())
return;
// Get current peak
Feature currentPeak = sourceRow.getPeak(column);
// If there is a gap, try to fill it
if (currentPeak == null)
currentPeak = fillGap(sourceRow, column);
// If a peak was found or created, add it
if (currentPeak != null)
newRow.addPeak(column, currentPeak);
}
outputList.add(newRow);
processedRowsAtomic.getAndAdd(1);
});
outputList.stream().forEach(newRow -> {
processedPeakList.addRow((PeakListRow) newRow);
});
/* End Parallel Implementation */
/*******************************************************************************/
// Canceled?
if (isCanceled())
return;
// Append processed feature list to the project
project.addPeakList(processedPeakList);
// Add quality parameters to peaks
QualityParameters.calculateQualityParameters(processedPeakList);
// Add task description to peakList
processedPeakList.addDescriptionOfAppliedTask(
new SimplePeakListAppliedMethod("Gap filling using RT and m/z range", parameters));
// Remove the original peaklist if requested
if (removeOriginal)
project.removePeakList(peakList);
setStatus(TaskStatus.FINISHED);
logger.info("Finished gap-filling " + peakList);
}