下面列出了java.util.concurrent.atomic.AtomicLong#decrementAndGet() 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private void emitItem(GroupState<K, T> groupState, Object item) {
Queue<Object> q = groupState.buffer;
AtomicLong keyRequested = groupState.requested;
REQUESTED.decrementAndGet(this);
// short circuit buffering
if (keyRequested != null && keyRequested.get() > 0 && (q == null || q.isEmpty())) {
@SuppressWarnings("unchecked")
Observer<Object> obs = (Observer<Object>) groupState.getObserver();
NotificationLite.accept(obs, item);
keyRequested.decrementAndGet();
} else {
q.add(item);
BUFFERED_COUNT.incrementAndGet(this);
if (groupState.count.getAndIncrement() == 0) {
pollQueue(groupState);
}
}
requestMoreIfNecessary();
}
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
AtomicLong counter = opCounter.apply(method);
Preconditions.checkState(counter != null);
long current = counter.getAndIncrement();
try {
if (current > threshold) {
NNProxy.proxyMetrics.throttledOps.incr();
throw new StandbyException("Too many requests (" + current + "/" + threshold + "), try later");
}
Object ret = method.invoke(underlying, args);
NNProxy.proxyMetrics.successOps.incr();
return ret;
} catch (InvocationTargetException e) {
NNProxy.proxyMetrics.failedOps.incr();
throw e.getCause();
} finally {
counter.decrementAndGet();
}
}
private void checkExecution(RPromise<Long> result, AtomicReference<Throwable> failed, AtomicLong count,
AtomicLong executed) {
if (executed.decrementAndGet() == 0) {
if (failed.get() != null) {
if (count.get() > 0) {
RedisException ex = new RedisException(
"" + count.get() + " keys has been deleted. But one or more nodes has an error",
failed.get());
result.tryFailure(ex);
} else {
result.tryFailure(failed.get());
}
} else {
result.trySuccess(count.get());
}
}
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception
{
counter.decrementAndGet();
InetAddress address = ((InetSocketAddress) ctx.channel().remoteAddress()).getAddress();
AtomicLong count = connectionsPerClient.get(address);
if (count != null)
{
if (count.decrementAndGet() <= 0)
{
connectionsPerClient.remove(address);
}
}
ctx.fireChannelInactive();
}
private void drainIfPossible(final Subscriber<? super T> child,
AtomicLong requested,
AtomicInteger bufferedCount,
AtomicBoolean onCompleteReceived,
AtomicInteger completionEmitted
) {
while (requested.get() > 0) {
Object t = queue.poll();
if (t != null) {
NotificationLite.accept((Observer) child, t);
requested.decrementAndGet();
requestedGauge.decrement();
bufferedCount.decrementAndGet();
bufferedGauge.decrement();
// System.out.println("buffered count: " + bufferedGauge.value() + " next " + next.value()) ;
} else {
if (onCompleteReceived.get()) {
if (completionEmitted.compareAndSet(0, 1)) {
child.onCompleted();
queue.clear();
bufferedGauge.set(0);
}
}
// queue is empty break
break;
}
}
}
public void finishReadFragmentData(DataSegmentFragment fragment) {
AtomicLong refCounter = refCounters.get(fragment);
if (refCounter != null) {
refCounter.decrementAndGet();
} else {
logger.warn("Ref counter not exist for fragment:{}", fragment);
}
}
/**
* {@inheritDoc}
*/
@Override
public void decrementReferenceCounter(final long id) {
final AtomicLong counter = referenceCounter.get(id);
if (counter == null || counter.get() <= 0) {
log.warn("Tried to decrement a payload reference counter ({}) that was already zero.", id);
if (InternalConfigurations.LOG_REFERENCE_COUNTING_STACKTRACE_AS_WARNING) {
if (log.isWarnEnabled()) {
for (int i = 0; i < Thread.currentThread().getStackTrace().length; i++) {
log.warn(Thread.currentThread().getStackTrace()[i].toString());
}
}
} else {
if (log.isDebugEnabled()) {
for (int i = 0; i < Thread.currentThread().getStackTrace().length; i++) {
log.debug(Thread.currentThread().getStackTrace()[i].toString());
}
}
}
return;
}
final long referenceCount = counter.decrementAndGet();
if (referenceCount == 0) {
removablePayloads.add(new RemovablePayload(id, System.currentTimeMillis()));
//Note: We'll remove the AtomicLong from the reference counter in the cleanup
}
}
/**
* Decrements <code>limit</code>.
*
* Throws a <code>ResultLimitExceededException</code>
* if <code>limit</code> becomes negative.
*
* @param limit the limit to decrement
*/
private void checkLimit(AtomicLong limit) {
if (limit == null) {
return;
}
long result = limit.decrementAndGet();
if (result < 0) {
throw new ResultLimitExceededException();
}
}
public void finishReadFragmentData(DataSegmentFragment fragment) {
AtomicLong refCounter = refCounters.get(fragment);
if (refCounter != null) {
refCounter.decrementAndGet();
} else {
logger.warn("Ref counter not exist for fragment:{}", fragment);
}
}
/**
* Decrement a stateful counter outside the stream.
*
* {code
* <pre>
* observable
* .doOnNext(RxUtil.decrement(mycounter))
* </pre>
* }
*
* @param metric
* @return
*/
public static <T> Action01<T> decrement(final AtomicLong metric) {
return new Action01<T>() {
@Override
public void call(T t1) {
metric.decrementAndGet();
}
@Override
public void call() {
metric.decrementAndGet();
}
};
}
@Override
protected void onNext(Context context, Subscriber<? super T> subscriber, T data) {
AtomicLong count = subs.get(subscriber);
if (count == null) return; // Means we already completed it
long remaining = count.decrementAndGet();
super.onNext(context, subscriber, data);
if (remaining == 0) {
close();
}
}
private void consumeMessages(AtomicLong count) throws Exception {
JmsConnection connection = (JmsConnection) factory.createConnection();
connection.start();
Session session = connection.createSession(false, ActiveMQSession.AUTO_ACKNOWLEDGE);
Queue queue = session.createQueue(getDestinationName());
MessageConsumer consumer = session.createConsumer(queue);
long v;
while ((v = count.decrementAndGet()) > 0) {
if ((count.get() % 10000) == 0) {
LOG.info("Received message: {}", NUM_SENDS - count.get());
}
assertNotNull("got message " + v, consumer.receive(15000));
}
consumer.close();
}
/**
* Run checks against all HDDS volumes.
* <p>
* This check may be performed at service startup and subsequently at
* regular intervals to detect and handle failed volumes.
*
* @param volumes - Set of volumes to be checked. This set must be immutable
* for the duration of the check else the results will be
* unexpected.
* @return set of failed volumes.
*/
public Set<HddsVolume> checkAllVolumes(Collection<HddsVolume> volumes)
throws InterruptedException {
final long gap = timer.monotonicNow() - lastAllVolumesCheck;
if (gap < minDiskCheckGapMs) {
numSkippedChecks.incrementAndGet();
if (LOG.isTraceEnabled()) {
LOG.trace(
"Skipped checking all volumes, time since last check {} is less " +
"than the minimum gap between checks ({} ms).",
gap, minDiskCheckGapMs);
}
return Collections.emptySet();
}
lastAllVolumesCheck = timer.monotonicNow();
final Set<HddsVolume> healthyVolumes = new HashSet<>();
final Set<HddsVolume> failedVolumes = new HashSet<>();
final Set<HddsVolume> allVolumes = new HashSet<>();
final AtomicLong numVolumes = new AtomicLong(volumes.size());
final CountDownLatch latch = new CountDownLatch(1);
for (HddsVolume v : volumes) {
Optional<ListenableFuture<VolumeCheckResult>> olf =
delegateChecker.schedule(v, null);
LOG.info("Scheduled health check for volume {}", v);
if (olf.isPresent()) {
allVolumes.add(v);
Futures.addCallback(olf.get(),
new ResultHandler(v, healthyVolumes, failedVolumes,
numVolumes, (ignored1, ignored2) -> latch.countDown()),
MoreExecutors.directExecutor());
} else {
if (numVolumes.decrementAndGet() == 0) {
latch.countDown();
}
}
}
// Wait until our timeout elapses, after which we give up on
// the remaining volumes.
if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
LOG.warn("checkAllVolumes timed out after {} ms",
maxAllowedTimeForCheckMs);
}
numAllVolumeChecks.incrementAndGet();
synchronized (this) {
// All volumes that have not been detected as healthy should be
// considered failed. This is a superset of 'failedVolumes'.
//
// Make a copy under the mutex as Sets.difference() returns a view
// of a potentially changing set.
return new HashSet<>(Sets.difference(allVolumes, healthyVolumes));
}
}
@Override
public Subscriber<? super T> call(final Subscriber<? super T> o) {
final AtomicLong requested = new AtomicLong();
o.add(Subscriptions.create(() -> {
}));
o.setProducer(new Producer() {
@Override
public void request(long n) {
if (requested.get() == Long.MAX_VALUE) {
logger.warn("current requested is int max do not increment");
} else {
requested.getAndAdd(n);
}
}
});
return new Subscriber<T>(o) {
@Override
public void onCompleted() {
complete.increment();
o.onCompleted();
}
@Override
public void onError(Throwable e) {
error.increment();
logger.error("onError() occured in DropOperator for groupId: {}", metricGroupId.id(), e);
o.onError(e);
}
@Override
public void onNext(T t) {
if (requested.get() > 0) {
o.onNext(t);
next.increment();
requested.decrementAndGet();
} else {
dropped.increment();
}
}
@Override
public void setProducer(Producer p) {
p.request(Long.MAX_VALUE);
}
};
}
@Override
public Subscriber<? super T> call(final Subscriber<? super T> child) {
subscribe.increment();
final AtomicLong requested = new AtomicLong();
final AtomicInteger completionEmitted = new AtomicInteger();
final AtomicInteger terminated = new AtomicInteger();
final AtomicInteger bufferedCount = new AtomicInteger();
final AtomicBoolean onCompleteReceived = new AtomicBoolean();
final AtomicInteger wip = new AtomicInteger();
child.add(Subscriptions.create(new Action0() {
@Override
public void call() {
subscribe.decrement();
}
}));
child.setProducer(new Producer() {
@Override
public void request(long n) {
requested.getAndAdd(n);
requestedGauge.increment(n);
// System.out.println("request: " + requested.get());
pollQueue(child,
requested,
bufferedCount,
onCompleteReceived,
completionEmitted,
wip);
}
});
Subscriber<T> parent = new Subscriber<T>() {
@Override
public void onStart() {
request(Long.MAX_VALUE);
}
@Override
public void onCompleted() {
if (terminated.compareAndSet(0, 1)) {
complete.increment();
onCompleteReceived.set(true);
pollQueue(child,
requested,
bufferedCount,
onCompleteReceived,
completionEmitted,
wip);
}
}
@Override
public void onError(Throwable e) {
if (terminated.compareAndSet(0, 1)) {
child.onError(e);
error.increment();
queue.clear();
}
}
@Override
public void onNext(T t) {
emitItem(NotificationLite.next(t));
}
private void emitItem(Object item) {
// short circuit buffering
if (requested.get() > 0 && queue.isEmpty()) {
NotificationLite.accept((Observer) child, item);
requested.decrementAndGet();
requestedGauge.decrement();
next.increment();
// System.out.println("next count: " + next.value());
} else {
boolean success = queue.offer(item);
if (success) {
bufferedCount.incrementAndGet();
bufferedGauge.increment();
// System.out.println("buffered count: " + bufferedGauge.value());
drainIfPossible(child, requested, bufferedCount, onCompleteReceived, completionEmitted);
} else {
dropped.increment();
// System.out.println("dropped count: " + dropped.value());
// dropped
}
}
}
};
// if child unsubscribes it should unsubscribe the parent, but not the other way around
child.add(parent);
return parent;
}
public void decrementCount(T identity) {
NullArgumentChecker.DEFAULT.check(identity, "identity");
AtomicLong counter = getCounter(identity);
counter.decrementAndGet();
}
@Override
public Object apply(WarpScriptStack stack) throws WarpScriptException {
long endNanos = System.nanoTime();
Object o1 = stack.pop();
if (!(o1 instanceof String)) {
throw new WarpScriptException(getName() + " expects a STRING alias.");
}
String alias = (String) o1;
String keyStart = CHRONOSTATS.getStartKey(alias, stack);
String keyActiveCount = CHRONOSTATS.getActiveCountKey(alias, stack);
String keyTotalCount = CHRONOSTATS.getTotalCountKey(alias, stack);
AtomicLong activeCount = (AtomicLong) stack.getAttribute(keyActiveCount);
if (null == activeCount) {
throw new WarpScriptException(getName() + " called before " + WarpScriptLib.CHRONOSTART + " for " + alias + ".");
}
// Decrease the number of "active" starts
activeCount.decrementAndGet();
// If there are active chronos, do nothing. Takes care of recursivity.
if (activeCount.intValue() > 0) {
return stack;
}
if (activeCount.intValue() < 0) {
throw new WarpScriptException(getName() + " called more times than " + WarpScriptLib.CHRONOSTART + " for " + alias + ".");
}
// No need to check for nullity because it has been done to active count
AtomicLong startNanos = (AtomicLong) stack.getAttribute(keyStart);
AtomicLong totalCount = (AtomicLong) stack.getAttribute(keyTotalCount);
Map<String, AtomicLong[]> stats = (Map<String, AtomicLong[]>) stack.getAttribute(CHRONOSTATS.key);
if (null == stats) {
stats = new ConcurrentHashMap<String, AtomicLong[]>();
stack.setAttribute(CHRONOSTATS.key, stats);
}
AtomicLong[] alias_stats = stats.get(alias);
if (null == alias_stats) {
alias_stats = new AtomicLong[]{new AtomicLong(), new AtomicLong()};
stats.put(alias, alias_stats);
}
alias_stats[0].addAndGet(endNanos - startNanos.longValue()); // Total elapsed time
alias_stats[1].addAndGet(totalCount.longValue()); // Total call count
// Reset total count. No need for start as it will be overridden.
totalCount.set(0L);
return stack;
}
/** 返回一个Udf,每次调用这个UDF,都会返回一个 Number。Number值较上一次会自减 1。 */
public static Udf incNumber(long initValue) {
AtomicLong atomicLong = new AtomicLong(initValue);
return (params, readOnly) -> atomicLong.decrementAndGet();
}
/**
* Due to there is only one client to one server in one jvm It can't do this test
*
* @throws InterruptedException
*/
public void test_multiple_client() {
System.out.println("!!!!!!!!Start test_multiple_client !!!!!!!!!!!");
final String req_msg = setupLargMsg();
final int clientNum = 3;
final AtomicLong received = new AtomicLong(clientNum);
for (int i = 0; i < clientNum; i++) {
new Thread(new Runnable() {
@Override
public void run() {
IConnection client = context.connect(null, "localhost", port);
List<TaskMessage> list = new ArrayList<TaskMessage>();
TaskMessage message = new TaskMessage(task, req_msg.getBytes());
list.add(message);
client.send(message);
System.out.println("!!Client has sent data");
while (received.get() != 0) {
JStormUtils.sleepMs(1000);
}
client.close();
}
}).start();
}
IConnection server = null;
JStormUtils.sleepMs(1000);
System.out.println("!!server begin start!!!!!");
server = initNettyServer();
for (int i = 0; i < clientNum; i++) {
byte[] recv = (byte[]) server.recv(task, 0);
Assert.assertEquals(req_msg, new String(recv));
received.decrementAndGet();
}
server.close();
System.out.println("!!!!!!!!!!!!End test_multiple_client!!!!!!!!!!!!!");
}