下面列出了com.mongodb.client.model.Aggregates#java.util.concurrent.ConcurrentSkipListSet 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Override
public Set<String> getArtifactIds(Integer limit) {
Set<String> ids = new ConcurrentSkipListSet<>();
try (Stream<String> stream = storageStore.allKeys()) {
// exists can be costly ...
if (limit != null) {
stream.filter(this::exists)
.limit(limit)
.forEach(ids::add);
} else {
stream.filter(this::exists).forEach(ids::add);
}
}
ids.remove(GLOBAL_RULES_ID);
return ids;
}
public TableCacheImpl(CacheCleanupPolicy cleanupPolicy) {
// As for full table cache only we need elements to be inserted in sorted
// manner, so that list will be easy. For other we can go with Hash map.
if (cleanupPolicy == CacheCleanupPolicy.NEVER) {
cache = new ConcurrentSkipListMap<>();
} else {
cache = new ConcurrentHashMap<>();
}
epochEntries = new ConcurrentSkipListSet<>();
// Created a singleThreadExecutor, so one cleanup will be running at a
// time.
ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("PartialTableCache Cleanup Thread - %d").build();
executorService = Executors.newSingleThreadExecutor(build);
this.cleanupPolicy = cleanupPolicy;
}
/**
* Subsets of subsets subdivide correctly
*/
public void testRecursiveSubSets() throws Exception {
int setSize = expensiveTests ? 1000 : 100;
Class cl = ConcurrentSkipListSet.class;
NavigableSet<Integer> set = newSet(cl);
BitSet bs = new BitSet(setSize);
populate(set, setSize, bs);
check(set, 0, setSize - 1, true, bs);
check(set.descendingSet(), 0, setSize - 1, false, bs);
mutateSet(set, 0, setSize - 1, bs);
check(set, 0, setSize - 1, true, bs);
check(set.descendingSet(), 0, setSize - 1, false, bs);
bashSubSet(set.subSet(0, true, setSize, false),
0, setSize - 1, true, bs);
}
void doRun() {
checkTimes++;
logger.debug("\n========================================================================= {}", checkTimes);
dumpSegmentBuildJobCheckList();
coordinator.getStreamMetadataStore().reportStat();
List<SegmentJobBuildInfo> successJobs = traceEarliestSegmentBuildJob();
for (SegmentJobBuildInfo successJob : successJobs) {
ConcurrentSkipListSet<SegmentJobBuildInfo> submittedBuildJobs = segmentBuildJobCheckList
.get(successJob.cubeName);
logger.trace("Remove job {} from check list.", successJob.jobID);
submittedBuildJobs.remove(successJob);
}
findSegmentReadyToBuild();
if (checkTimes % 100 == 1) {
logger.info("Force traverse all cubes periodically.");
for (StreamingCubeInfo cubeInfo : coordinator.getEnableStreamingCubes()) {
List<String> segmentList = checkSegmentBuildJobFromMetadata(cubeInfo.getCubeName());
for (String segmentName : segmentList) {
submitSegmentBuildJob(cubeInfo.getCubeName(), segmentName);
}
}
}
}
/**
* Attempts to create a new collection of the same known type. Will return null if collection type is unknown.
*
* @param col Collection.
* @return New empty collection.
*/
public static <V> Collection<V> newKnownCollection(Object col) {
Class<?> cls = col == null ? null : col.getClass();
if (cls == HashSet.class)
return U.newHashSet(((Collection)col).size());
else if (cls == LinkedHashSet.class)
return U.newLinkedHashSet(((Collection)col).size());
else if (!wrapTrees() && cls == TreeSet.class)
return new TreeSet<>(((TreeSet<Object>)col).comparator());
else if (cls == ConcurrentSkipListSet.class)
return new ConcurrentSkipListSet<>(((ConcurrentSkipListSet<Object>)col).comparator());
else if (cls == ArrayList.class)
return new ArrayList<>(((Collection)col).size());
else if (cls == LinkedList.class)
return new LinkedList<>();
else if (cls == SINGLETON_LIST_CLS)
return new MutableSingletonList<>();
return null;
}
public void testSubSetContents2() {
ConcurrentSkipListSet set = set5();
SortedSet sm = set.subSet(two, three);
assertEquals(1, sm.size());
assertEquals(two, sm.first());
assertEquals(two, sm.last());
assertFalse(sm.contains(one));
assertTrue(sm.contains(two));
assertFalse(sm.contains(three));
assertFalse(sm.contains(four));
assertFalse(sm.contains(five));
Iterator i = sm.iterator();
Object k;
k = (Integer)(i.next());
assertEquals(two, k);
assertFalse(i.hasNext());
Iterator j = sm.iterator();
j.next();
j.remove();
assertFalse(set.contains(two));
assertEquals(4, set.size());
assertEquals(0, sm.size());
assertTrue(sm.isEmpty());
assertFalse(sm.remove(three));
assertEquals(4, set.size());
}
/**
* headSet returns set with keys in requested range
*/
public void testHeadSetContents() {
ConcurrentSkipListSet set = set5();
SortedSet sm = set.headSet(four);
assertTrue(sm.contains(one));
assertTrue(sm.contains(two));
assertTrue(sm.contains(three));
assertFalse(sm.contains(four));
assertFalse(sm.contains(five));
Iterator i = sm.iterator();
Object k;
k = (Integer)(i.next());
assertEquals(one, k);
k = (Integer)(i.next());
assertEquals(two, k);
k = (Integer)(i.next());
assertEquals(three, k);
assertFalse(i.hasNext());
sm.clear();
assertTrue(sm.isEmpty());
assertEquals(2, set.size());
assertEquals(four, set.first());
}
@Override
protected ConcurrentSkipListSet<MeanShiftSeed> compute() {
if(high - low <= 1) { // generally should equal one...
return reduce(chunks.get(low));
} else {
int mid = this.low + (this.high - this.low) / 2;
ParallelSeedExecutor left = new ParallelSeedExecutor(this, low, mid);
ParallelSeedExecutor right = new ParallelSeedExecutor(this, mid, high);
left.fork();
right.compute();
left.join();
return computedSeeds;
}
}
@Override
public void delete(RegionInfo regionInfo) {
Set<RegionInfo> infoSet = GaeCollectionUtils.getAndCreateIfNeed(
regionInfo.getUnitId(),
unitRegionMap,
() -> new ConcurrentSkipListSet<>()
);
infoSet.remove(regionInfo);
}
@Before
public void initialize() throws Exception {
testingServer = new TestingServer();
testingServer.start();
incrSequenceZKHandler = new IncrSequenceZKHandler[MAX_CONNECTION];
results = new ConcurrentSkipListSet();
}
@Test
public void testLambda() {
Map<String, Set<Long>> map = new HashMap<>();
Set<Long> set = getOrCreate("key", map, ConcurrentSkipListSet::new);
set.add(666L);
log.debug("now the map is {}", map);
//result:
//09:59:34.002 [main] DEBUG top.ezttf.ad.withoutspring.Jdk8Test - now the map is {key=[666]}
set = getOrCreate("key", map, ConcurrentSkipListSet::new);
set.add(777L);
log.debug("now the map is {}", map);
}
@Override
public void register() throws Exception {
applications.compute(getApplication().getAppName(), (k, v) -> {
Set<Application> container = Optional.ofNullable(v)
.orElse(new ConcurrentSkipListSet<>());
container.add(getApplication());
return container;
});
}
@Override
public void publish(StagingTableId stagingTableId, TableId tableId) {
stagingTableIds.remove(formatTableIdKey(tableId));
final DatasetId datasetId = datasetIdOf(tableId);
publishedTables.computeIfAbsent(datasetId, k -> new ConcurrentSkipListSet<>())
.add(tableId.getTable());
}
public void recycle() {
if (queries.size() > MAX_RECORDS) {
SortedSet<SqlLast> queries2 = new ConcurrentSkipListSet<>();
List<SqlLast> keyList = new ArrayList<>(queries);
int i = 0;
for (SqlLast key : keyList) {
if (i == MAX_RECORDS) {
break;
}
queries2.add(key);
i++;
}
queries = queries2;
}
}
/**
* ceiling returns next element
*/
public void testCeiling() {
ConcurrentSkipListSet q = set5();
Object e1 = q.ceiling(three);
assertEquals(three, e1);
Object e2 = q.ceiling(zero);
assertEquals(one, e2);
Object e3 = q.ceiling(five);
assertEquals(five, e3);
Object e4 = q.ceiling(six);
assertNull(e4);
}
QueueElement _poll(String ppoolId) {
logger.trace("_poll({})", ppoolId);
ConcurrentSkipListSet<QueueElement> queue = _findQueue(ppoolId);
QueueElement qe = queue.pollFirst();
if (qe != null) {
logger.debug("dequeued for ppoolId={}: wfId={}", ppoolId, qe.wfId);
}
return qe;
}
public void addSegmentBuildJob(SegmentJobBuildInfo segmentBuildJob) {
ConcurrentSkipListSet<SegmentJobBuildInfo> buildInfos = segmentBuildJobMap.get(segmentBuildJob.cubeName);
if (buildInfos == null) {
buildInfos = new ConcurrentSkipListSet<>();
ConcurrentSkipListSet<SegmentJobBuildInfo> previousValue = segmentBuildJobMap
.putIfAbsent(segmentBuildJob.cubeName, buildInfos);
if (previousValue != null) {
buildInfos = previousValue;
}
}
buildInfos.add(segmentBuildJob);
}
/**
* Constructs new transition.
*
* @param frameRate frames which should be processed per second for this transition
*/
public AbstractTransition ( final FrameRate frameRate )
{
super ();
this.state = TransitionState.constructed;
this.listeners = new ConcurrentSkipListSet<TransitionListener<V>> ( new TransitionListenerComparator<V> () );
setFrameRate ( frameRate != null ? frameRate : DEFAULT_FRAME_RATE );
setEventHandler ( null );
setOptimizeEvents ( true );
}
void dumpSegmentBuildJobCheckList() {
if (!logger.isTraceEnabled())
return;
StringBuilder sb = new StringBuilder("Dump JobCheckList:\t");
for (Map.Entry<String, ConcurrentSkipListSet<SegmentJobBuildInfo>> cube : segmentBuildJobCheckList.entrySet()) {
sb.append(cube.getKey()).append(":").append(cube.getValue());
}
if (logger.isTraceEnabled()) {
logger.trace(sb.toString());
}
}
public ReactorRabbitMQChannelPool(Mono<Connection> connectionMono, int poolSize) {
this.connectionMono = connectionMono;
ChannelFactory channelFactory = new ChannelFactory(connectionMono);
GenericObjectPoolConfig<Channel> config = new GenericObjectPoolConfig<>();
config.setMaxTotal(poolSize);
this.pool = new GenericObjectPool<>(channelFactory, config);
this.borrowedChannels = new ConcurrentSkipListSet<>(Comparator.comparingInt(System::identityHashCode));
}
public void add(String value, DecoratedKey key)
{
ConcurrentSkipListSet<DecoratedKey> keys = get(value);
if (keys == null)
{
ConcurrentSkipListSet<DecoratedKey> newKeys = new ConcurrentSkipListSet<>(DecoratedKey.comparator);
keys = putIfAbsent(value, newKeys);
if (keys == null)
keys = newKeys;
}
keys.add(key);
}
/**
* Returns the first (eldest) value of this LRU cache.
*
* @return first (eldest) value of this LRU cache
*/
public synchronized T getFirstValue() {
// Get all keys sorted
SortedSet<Integer> keys =
new ConcurrentSkipListSet<Integer>(this.keySet());
// Return the value that corresponds to the first key
return this.get(keys.first());
}
/**
* pollLast succeeds unless empty
*/
public void testPollLast() {
ConcurrentSkipListSet q = populatedSet(SIZE);
for (int i = SIZE - 1; i >= 0; --i) {
assertEquals(i, q.pollLast());
}
assertNull(q.pollFirst());
}
/**
* Constructs new {@link TimedAnimationPipeline}.
*/
public TimedAnimationPipeline ()
{
// Concurrent transitions set
transitions = new ConcurrentSkipListSet<Transition> ();
// Active transitions animator
animator = new Thread ( TimedAnimationPipeline.this );
animator.setName ( ReflectUtils.getClassName ( TimedAnimationPipeline.this.getClass () ) );
animator.setDaemon ( true );
animator.start ();
}
@Test(timeout = 10000)
public void testThreadIDCollision() throws InterruptedException {
CountDownLatch threadsStart = new CountDownLatch(1);
CountDownLatch threadsFinished = new CountDownLatch(ThreadIndexCalculator.MAX_THREADS);
ThreadIndexCalculator indexCalculator = ThreadIndexCalculator.newInstance();
ConcurrentSkipListSet<Integer> uniqueIndices = new ConcurrentSkipListSet<>();
List<Thread> threads = new ArrayList<>(ThreadIndexCalculator.MAX_THREADS);
while (threads.size() < ThreadIndexCalculator.MAX_THREADS) {
Thread thread = new Thread(() -> {
try {
threadsStart.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
int index = indexCalculator.getIndex();
uniqueIndices.add(index);
threadsFinished.countDown();
});
if (thread.getId() % ThreadIndexCalculator.MAX_THREADS == 0) {
threads.add(thread);
thread.start();
}
}
threadsStart.countDown();
threadsFinished.await();
Assert.assertEquals(ThreadIndexCalculator.MAX_THREADS, uniqueIndices.size());
}
/**
* addAll(null) throws NPE
*/
public void testAddAll1() {
ConcurrentSkipListSet q = new ConcurrentSkipListSet();
try {
q.addAll(null);
shouldThrow();
} catch (NullPointerException success) {}
}
/**
* removeAll(c) removes only those elements of c and reports true if changed
*/
public void testRemoveAll() {
for (int i = 1; i < SIZE; ++i) {
ConcurrentSkipListSet q = populatedSet(SIZE);
ConcurrentSkipListSet p = populatedSet(i);
assertTrue(q.removeAll(p));
assertEquals(SIZE - i, q.size());
for (int j = 0; j < i; ++j) {
Integer x = (Integer)(p.pollFirst());
assertFalse(q.contains(x));
}
}
}
private synchronized void recordVote(final long messagePosition, final SignedMessage bftMessage,
final int voterId) {
// Get the map for this position
this.votes.putIfAbsent(messagePosition, new ConcurrentHashMap<SignedMessage, Set<Integer>>());
final ConcurrentMap<SignedMessage, Set<Integer>> positionVotes = this.votes.get(messagePosition);
// Get the set of votes for this message
positionVotes.putIfAbsent(bftMessage, new ConcurrentSkipListSet<>());
final Set<Integer> messageVotes = positionVotes.get(bftMessage);
messageVotes.add(voterId);
// Check if Opt-BFT quorum has been met
if (messageVotes.size() == this.optQuorum) {
// System.err.println("QUORUM MET, added " + (optChain.size() + 1) + "th message
// to Opt-BFT Chain: " /*+ bftMessage*/);
synchronized (this.optChain) {
System.out.println("Certified message #" + (messagePosition + 1) + " is available.");
if (this.optChain.putIfAbsent(messagePosition + 1, bftMessage) == null) {
// Increment contiguousOptMessages if we are contiguous
while (this.optChain.containsKey(new Long(contiguousOptMessages.get() + 1))) {
contiguousOptMessages.incrementAndGet();
}
final String msgFileName = String.format("%08d", messagePosition + 1) + ".msg";
final File messageFile = new File(this.certifiedMessageFolder, msgFileName);
try {
AtomicFileOperations.atomicWriteSignedMessage(messageFile, bftMessage);
} catch (IOException e) {
e.printStackTrace();
System.exit(-1);
}
this.notifyAll();
}
}
}
}
/**
* Set contains all elements of collection used to initialize
*/
public void testConstructor6() {
Integer[] ints = new Integer[SIZE];
for (int i = 0; i < SIZE; ++i)
ints[i] = new Integer(i);
ConcurrentSkipListSet q = new ConcurrentSkipListSet(Arrays.asList(ints));
for (int i = 0; i < SIZE; ++i)
assertEquals(ints[i], q.pollFirst());
}
/**
* 判定相似度的方式:Jaccard相似性系数
* @param words1 词列表1
* @param words2 词列表2
* @return 相似度分值
*/
@Override
protected double getSimilarityImpl(List<Word> words1, List<Word> words2) {
if (words1.isEmpty() && words2.isEmpty()) {
return 1.0;
}
//HashSet的contains性能要大于ArrayList的contains
Set<Word> words2Set = new HashSet<>();
words2Set.addAll(words2);
//求交集
Set<String> intersectionSet = new ConcurrentSkipListSet<>();
words1.parallelStream().forEach(word -> {
if (words2Set.contains(word)) {
intersectionSet.add(word.getName());
}
});
//交集的大小
int intersectionSize = intersectionSet.size();
//求并集
Set<String> unionSet = new HashSet<>();
words1.forEach(word -> unionSet.add(word.getName()));
words2.forEach(word -> unionSet.add(word.getName()));
//并集的大小
int unionSize = unionSet.size();
//相似度分值
double score = intersectionSize / (double) unionSize;
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("交集的大小:" + intersectionSize);
LOGGER.debug("并集的大小:" + unionSize);
LOGGER.debug("相似度分值=" + intersectionSize + "/(double)" + unionSize + "=" + score);
}
return score;
}