java.util.concurrent.atomic.AtomicLongArray#length()源码实例Demo

下面列出了java.util.concurrent.atomic.AtomicLongArray#length() 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: consulo   文件: ConcurrentBitSet.java
/**
 * Compares this object against the specified object.
 * The result is {@code true} if and only if the argument is
 * not {@code null} and is a {@code ConcurrentBitSet} object that has
 * exactly the same set of bits set to {@code true} as this bit
 * set. That is, for every nonnegative {@code int} index {@code k},
 * <pre>((ConcurrentBitSet)obj).get(k) == this.get(k)</pre>
 * must be true. The current sizes of the two bit sets are not compared.
 *
 * @param obj the object to compare with
 * @return {@code true} if the objects are the same;
 * {@code false} otherwise
 * @see #size()
 */
@Override
public boolean equals(Object obj) {
  if (!(obj instanceof ConcurrentBitSet)) {
    return false;
  }
  if (this == obj) {
    return true;
  }

  ConcurrentBitSet set = (ConcurrentBitSet)obj;

  for (int i = 0; i < arrays.length(); i++) {
    AtomicLongArray array1 = arrays.get(i);
    AtomicLongArray array2 = set.arrays.get(i);
    if (array1 == null && array2 == null) continue;
    int size = array1 == null ? array2.length() : array1.length();
    for (int k=0; k<size;k++) {
      long word1 = array1 == null ? 0 : array1.get(k);
      long word2 = array2 == null ? 0 : array2.get(k);
      if (word1 != word2) return false;
    }
  }

  return true;
}
 
源代码2 项目: hollow   文件: ThreadSafeBitSet.java
public long maxSetBit() {
    ThreadSafeBitSetSegments segments = this.segments.get();

    int segmentIdx = segments.numSegments() - 1;

    for(;segmentIdx >= 0; segmentIdx--) {
        AtomicLongArray segment = segments.getSegment(segmentIdx);
        for(int longIdx=segment.length() - 1; longIdx >= 0; longIdx--) {
            long l = segment.get(longIdx);
            if(l != 0)
                return (segmentIdx << log2SegmentSize) + (longIdx * 64) + (63 - Long.numberOfLeadingZeros(l));
        }
    }

    return -1;
}
 
源代码3 项目: zeno   文件: ByteArrayOrdinalMap.java
/**
 * Grow the key array.  All of the values in the current array must be re-hashed and added to the new array.
 */
private void growKeyArray() {
    AtomicLongArray newKeys = emptyKeyArray(pointersAndOrdinals.length() * 2);

    long valuesToAdd[] = new long[size];

    int counter = 0;

    /// do not iterate over these values in the same order in which they appear in the hashed array.
    /// if we do so, we cause large clusters of collisions to appear (because we resolve collisions with linear probing).
    for(int i=0;i<pointersAndOrdinals.length();i++) {
        long key = pointersAndOrdinals.get(i);
        if(key != EMPTY_BUCKET_VALUE) {
            valuesToAdd[counter++] = key;
        }
    }

    Arrays.sort(valuesToAdd);

    populateNewHashArray(newKeys, valuesToAdd);

    /// 70% load factor
    sizeBeforeGrow = (newKeys.length() * 7) / 10;
    pointersAndOrdinals = newKeys;
}
 
源代码4 项目: hollow   文件: ByteArrayOrdinalMap.java
private int get(ByteDataBuffer serializedRepresentation, int hash) {
    AtomicLongArray pao = pointersAndOrdinals;

    int modBitmask = pao.length() - 1;
    int bucket = hash & modBitmask;
    long key = pao.get(bucket);

    // Linear probing to resolve collisions
    // Given the load factor it is guaranteed that the loop will terminate
    // as there will be at least one empty bucket
    // To ensure this is the case it is important that pointersAndOrdinals
    // is read into a local variable and thereafter used, otherwise a concurrent
    // size increase may break this invariant
    while (key != EMPTY_BUCKET_VALUE) {
        if (compare(serializedRepresentation, key)) {
            return (int) (key >>> BITS_PER_POINTER);
        }

        bucket = (bucket + 1) & modBitmask;
        key = pao.get(bucket);
    }

    return -1;
}
 
源代码5 项目: zeno   文件: ThreadSafeBitSet.java
/**
 * Serialize this ThreadSafeBitSet to an OutputStream
 */
public void serializeTo(DataOutputStream os) throws IOException {
    os.write(log2SegmentSize);

    ThreadSafeBitSetSegments segments = this.segments.get();

    os.writeInt(segments.numSegments());

    for(int i=0;i<segments.numSegments();i++) {
        AtomicLongArray arr = segments.getSegment(i);

        for(int j=0;j<arr.length();j++) {
            os.writeLong(arr.get(j));
        }
    }
}
 
源代码6 项目: zeno   文件: FastBlobTypeDeserializationState.java
/**
 * Fill this state from the serialized data which exists in this ByteArrayOrdinalMap
 *
 * @param ordinalMap
 */
public void populateFromByteOrdinalMap(final ByteArrayOrdinalMap ordinalMap) {
    ByteDataBuffer byteData = ordinalMap.getByteData();
    AtomicLongArray pointersAndOrdinals = ordinalMap.getPointersAndOrdinals();
    FastBlobDeserializationRecord rec = new FastBlobDeserializationRecord(getSchema(), byteData.getUnderlyingArray());
    for (int i = 0; i < pointersAndOrdinals.length(); i++) {
        long pointerAndOrdinal = pointersAndOrdinals.get(i);
        if(!ByteArrayOrdinalMap.isPointerAndOrdinalEmpty(pointerAndOrdinal)) {
            long pointer = ByteArrayOrdinalMap.getPointer(pointerAndOrdinal);
            int ordinal = ByteArrayOrdinalMap.getOrdinal(pointerAndOrdinal);

            int sizeOfData = VarInt.readVInt(byteData.getUnderlyingArray(), pointer);
            pointer += VarInt.sizeOfVInt(sizeOfData);

            rec.position(pointer);

            add(ordinal, rec);
        }
    }
}
 
private List<CountAtBucket> nonZeroBuckets() {
    List<CountAtBucket> buckets = new ArrayList<>();

    long zeroSnap = zeros.get();
    if (zeroSnap > 0) {
        buckets.add(new CountAtBucket(UPPER_BOUNDS[getRangeIndex(ZERO.bucketIdx, ZERO.offset)], zeroSnap));
    }

    long lowerSnap = lower.get();
    if (lowerSnap > 0) {
        buckets.add(new CountAtBucket(UPPER_BOUNDS[getRangeIndex(LOWER.bucketIdx, LOWER.offset)], lowerSnap));
    }

    long upperSnap = upper.get();
    if (upperSnap > 0) {
        buckets.add(new CountAtBucket(UPPER_BOUNDS[getRangeIndex(UPPER.bucketIdx, UPPER.offset)], upperSnap));
    }

    for (int i = 0; i < values.length(); i++) {
        AtomicLongArray bucket = values.get(i);
        if (bucket != null) {
            for (int j = 0; j < bucket.length(); j++) {
                long cnt = bucket.get(j);
                if (cnt > 0) {
                    buckets.add(new CountAtBucket(UPPER_BOUNDS[getRangeIndex(i, j)], cnt));
                }
            }
        }
    }

    return buckets;
}
 
源代码8 项目: hollow   文件: ThreadSafeBitSet.java
/**
 * Clear all bits to 0.
 */
public void clearAll() {
    ThreadSafeBitSetSegments segments = this.segments.get();

    for(int i=0;i<segments.numSegments();i++) {
        AtomicLongArray segment = segments.getSegment(i);

        for(int j=0;j<segment.length();j++) {
            segment.set(j, 0L);
        }
    }
}
 
源代码9 项目: hollow   文件: ByteArrayOrdinalMap.java
/**
 * Assign a predefined ordinal to a serialized representation.<p>
 * <p>
 * WARNING: THIS OPERATION IS NOT THREAD-SAFE.<p>
 * WARNING: THIS OPERATION WILL NOT UPDATE THE FreeOrdinalTracker.
 *
 * @param serializedRepresentation the serialized representation
 * @param ordinal the ordinal
 */
public void put(ByteDataBuffer serializedRepresentation, int ordinal) {
    if (ordinal < 0 || ordinal > ORDINAL_MASK) {
        throw new IllegalArgumentException(String.format(
                "The given ordinal %s is out of bounds and not within the closed interval [0, %s]",
                ordinal, ORDINAL_MASK));
    }
    if (size > sizeBeforeGrow) {
        growKeyArray();
    }

    int hash = HashCodes.hashCode(serializedRepresentation);

    AtomicLongArray pao = pointersAndOrdinals;

    int modBitmask = pao.length() - 1;
    int bucket = hash & modBitmask;
    long key = pao.get(bucket);

    while (key != EMPTY_BUCKET_VALUE) {
        bucket = (bucket + 1) & modBitmask;
        key = pao.get(bucket);
    }

    long pointer = byteData.length();

    VarInt.writeVInt(byteData, (int) serializedRepresentation.length());
    serializedRepresentation.copyTo(byteData);
    if (byteData.length() > MAX_BYTE_DATA_LENGTH) {
        throw new IllegalStateException(String.format(
                "The number of bytes for the serialized representations, %s, is too large and is greater than the maximum of %s bytes",
                byteData.length(), MAX_BYTE_DATA_LENGTH));
    }

    key = ((long) ordinal << BITS_PER_POINTER) | pointer;

    size++;

    pao.set(bucket, key);
}
 
源代码10 项目: consulo   文件: ConcurrentBitSet.java
/**
 * Returns the index of the first bit that is set to {@code false}
 * that occurs on or after the specified starting index.
 *
 * @param fromIndex the index to start checking from (inclusive)
 * @return the index of the next clear bit
 * @throws IndexOutOfBoundsException if the specified index is negative
 */
public int nextClearBit(int fromIndex) {
  if (fromIndex < 0) {
    throw new IndexOutOfBoundsException("fromIndex < 0: " + fromIndex);
  }

  int arrayIndex = arrayIndex(fromIndex);
  AtomicLongArray array = arrays.get(arrayIndex);
  int wordIndexInArray = wordIndexInArray(fromIndex);
  if (array == null) {
    return ((1<<arrayIndex)-1+wordIndexInArray) * BITS_PER_WORD+(fromIndex%BITS_PER_WORD);
  }

  long word = ~array.get(wordIndexInArray) & (WORD_MASK << fromIndex);

  while (true) {
    if (word != 0) {
      return ((1<<arrayIndex)-1 + wordIndexInArray) * BITS_PER_WORD + Long.numberOfTrailingZeros(word);
    }
    if (++wordIndexInArray == array.length()) {
      wordIndexInArray = 0;
      if (++arrayIndex == arrays.length()) return -1;
      array = arrays.get(arrayIndex);
      if (array == null) {
        return ((1<<arrayIndex)-1+wordIndexInArray) * BITS_PER_WORD;
      }
    }

    word = ~array.get(wordIndexInArray);
  }
}
 
源代码11 项目: zeno   文件: ThreadSafeBitSet.java
/**
 * Clear all bits to 0.
 */
public void clearAll() {
    ThreadSafeBitSetSegments segments = this.segments.get();

    for(int i=0;i<segments.numSegments();i++) {
        AtomicLongArray segment = segments.getSegment(i);

        for(int j=0;j<segment.length();j++) {
            segment.set(j, 0L);
        }
    }
}
 
源代码12 项目: plog   文件: SimpleStatisticsReporter.java
private static JsonArray arrayForLogStats(AtomicLongArray data) {
    final JsonArray result = new JsonArray();
    for (int i = 0; i < data.length(); i++) {
        result.add(data.get(i));
    }
    return result;
}
 
源代码13 项目: stratio-cassandra   文件: HistogramBuilderTest.java
private static long[] toArray(AtomicLongArray a)
{
    final long[] r = new long[a.length()];
    for (int i = 0 ; i < r.length ; i++)
        r[i] = a.get(i);
    return r;
}
 
源代码14 项目: incubator-retired-blur   文件: BlurUtil.java
public static List<Long> getList(AtomicLongArray atomicLongArray) {
  if (atomicLongArray == null) {
    return null;
  }
  List<Long> counts = new ArrayList<Long>(atomicLongArray.length());
  for (int i = 0; i < atomicLongArray.length(); i++) {
    counts.add(atomicLongArray.get(i));
  }
  return counts;
}
 
源代码15 项目: incubator-retired-blur   文件: BlurUtil.java
public static List<Long> toList(AtomicLongArray atomicLongArray) {
  if (atomicLongArray == null) {
    return null;
  }
  int length = atomicLongArray.length();
  List<Long> result = new ArrayList<Long>(length);
  for (int i = 0; i < length; i++) {
    result.add(atomicLongArray.get(i));
  }
  return result;
}
 
源代码16 项目: hollow   文件: ByteArrayOrdinalMap.java
private synchronized int assignOrdinal(ByteDataBuffer serializedRepresentation, int hash, int preferredOrdinal) {
    if (preferredOrdinal < -1 || preferredOrdinal > ORDINAL_MASK) {
        throw new IllegalArgumentException(String.format(
                "The given preferred ordinal %s is out of bounds and not within the closed interval [-1, %s]",
                preferredOrdinal, ORDINAL_MASK));
    }
    if (size > sizeBeforeGrow) {
        growKeyArray();
    }

    /// check to make sure that after acquiring the lock, the element still does not exist.
    /// this operation is akin to double-checked locking which is 'fixed' with the JSR 133 memory model in JVM >= 1.5.
    /// Note that this also requires pointersAndOrdinals be volatile so resizes are also visible
    AtomicLongArray pao = pointersAndOrdinals;

    int modBitmask = pao.length() - 1;
    int bucket = hash & modBitmask;
    long key = pao.get(bucket);

    while (key != EMPTY_BUCKET_VALUE) {
        if (compare(serializedRepresentation, key)) {
            return (int) (key >>> BITS_PER_POINTER);
        }

        bucket = (bucket + 1) & modBitmask;
        key = pao.get(bucket);
    }

    /// the ordinal for this object still does not exist in the list, even after the lock has been acquired.
    /// it is up to this thread to add it at the current bucket position.
    int ordinal = findFreeOrdinal(preferredOrdinal);
    if (ordinal > ORDINAL_MASK) {
        throw new IllegalStateException(String.format(
                "Ordinal cannot be assigned. The to be assigned ordinal, %s, is greater than the maximum supported ordinal value of %s",
                ordinal, ORDINAL_MASK));
    }

    long pointer = byteData.length();

    VarInt.writeVInt(byteData, (int) serializedRepresentation.length());
    /// Copying might cause a resize to the segmented array held by byteData
    /// A reading thread may observe a null value for a segment during the creation
    /// of a new segments array (see SegmentedByteArray.ensureCapacity).
    serializedRepresentation.copyTo(byteData);
    if (byteData.length() > MAX_BYTE_DATA_LENGTH) {
        throw new IllegalStateException(String.format(
                "The number of bytes for the serialized representations, %s, is too large and is greater than the maximum of %s bytes",
                byteData.length(), MAX_BYTE_DATA_LENGTH));
    }

    key = ((long) ordinal << BITS_PER_POINTER) | pointer;

    size++;

    /// this set on the AtomicLongArray has volatile semantics (i.e. behaves like a monitor release).
    /// Any other thread reading this element in the AtomicLongArray will have visibility to all memory writes this thread has made up to this point.
    /// This means the entire byte sequence is guaranteed to be visible to any thread which reads the pointer to that data.
    pao.set(bucket, key);

    return ordinal;
}
 
源代码17 项目: gemfirexd-oss   文件: LongStatsDeltaAggregator.java
private void initializeArray(AtomicLongArray arr){
  for(int i = 0; i<arr.length() ; i++){
    arr.set(i, Long.valueOf(0));
  }
}
 
源代码18 项目: ignite   文件: WalRecoveryTxLogicalRecordsTest.java
/**
 * @param ignite Node.
 * @param cacheName Cache name.
 * @return Cache free lists data (partition number to map of buckets to tails and buckets size).
 */
private Map<Integer, T2<Map<Integer, long[]>, int[]>> getFreeListData(Ignite ignite, String cacheName) throws IgniteCheckedException {
    GridCacheProcessor cacheProc = ((IgniteEx)ignite).context().cache();

    GridCacheContext ctx = cacheProc.cache(cacheName).context();

    List<GridDhtLocalPartition> parts = ctx.topology().localPartitions();

    assertTrue(!parts.isEmpty());
    assertEquals(ctx.affinity().partitions(), parts.size());

    Map<Integer, T2<Map<Integer, long[]>, int[]>> res = new HashMap<>();

    boolean foundNonEmpty = false;
    boolean foundTails = false;

    cacheProc.context().database().checkpointReadLock();

    try {
        for (GridDhtLocalPartition part : parts) {
            AbstractFreeList freeList = (AbstractFreeList)part.dataStore().rowStore().freeList();

            if (freeList == null)
                // Lazy store.
                continue;

            // Flush free-list onheap cache to page memory.
            freeList.saveMetadata(IoStatisticsHolderNoOp.INSTANCE);

            AtomicReferenceArray<PagesList.Stripe[]> buckets = GridTestUtils.getFieldValue(freeList,
                AbstractFreeList.class, "buckets");

            AtomicLongArray bucketsSize = GridTestUtils.getFieldValue(freeList, PagesList.class, "bucketsSize");

            assertNotNull(buckets);
            assertNotNull(bucketsSize);
            assertTrue(buckets.length() > 0);
            assertEquals(bucketsSize.length(), buckets.length());

            Map<Integer, long[]> tailsPerBucket = new HashMap<>();

            for (int i = 0; i < buckets.length(); i++) {
                PagesList.Stripe[] tails = buckets.get(i);

                long ids[] = null;

                if (tails != null) {
                    ids = new long[tails.length];

                    for (int j = 0; j < tails.length; j++)
                        ids[j] = tails[j].tailId;
                }

                tailsPerBucket.put(i, ids);

                if (tails != null) {
                    assertTrue(tails.length > 0);

                    foundTails = true;
                }
            }

            int[] cntsPerBucket = new int[bucketsSize.length()];

            for (int i = 0; i < bucketsSize.length(); i++) {
                cntsPerBucket[i] = (int)bucketsSize.get(i);

                if (cntsPerBucket[i] > 0)
                    foundNonEmpty = true;
            }

            res.put(part.id(), new T2<>(tailsPerBucket, cntsPerBucket));
        }
    }
    finally {
        cacheProc.context().database().checkpointReadUnlock();
    }

    assertTrue(foundNonEmpty);
    assertTrue(foundTails);

    return res;
}
 
源代码19 项目: glowroot   文件: GaugeValueDao.java
void reinitAfterDeletingDatabase() throws Exception {
    AtomicLongArray lastRollupTimes = initData(rollupConfigs, dataSource);
    for (int i = 0; i < lastRollupTimes.length(); i++) {
        this.lastRollupTimes.set(i, lastRollupTimes.get(i));
    }
}
 
源代码20 项目: consulo   文件: ConcurrentBitSet.java
/**
 * Returns the hash code value for this bit set. The hash code depends
 * only on which bits are set.
 * <p/>
 * <p>The hash code is defined to be the result of the following
 * calculation:
 * <pre> {@code
 * public int hashCode() {
 *     long h = 1234;
 *     for (int i = words.length; --i >= 0; )
 *         h ^= words[i] * (i + 1);
 *     return (int)((h >> 32) ^ h);
 * }}</pre>
 * Note that the hash code changes if the set of bits is altered.
 *
 * @return the hash code value for this bit set
 */
@Override
public int hashCode() {
  long h = 1234;
  for (int a = 0; a<arrays.length();a++) {
    AtomicLongArray array = arrays.get(a);
    if (array == null) continue;
    for (int i=0;i<array.length();i++) {
      long word = array.get(i);
      h ^= word * ((1<<a)+ i);
    }
  }

  return (int)(h >> 32 ^ h);
}