org.apache.commons.lang3.mutable.MutableLong#longValue ( )源码实例Demo

下面列出了org.apache.commons.lang3.mutable.MutableLong#longValue ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hbase   文件: SerialReplicationChecker.java
public boolean canPush(Entry entry, Cell firstCellInEdit) throws IOException {
  String encodedNameAsString = Bytes.toString(entry.getKey().getEncodedRegionName());
  long seqId = entry.getKey().getSequenceId();
  Long canReplicateUnderSeqId = canPushUnder.getIfPresent(encodedNameAsString);
  if (canReplicateUnderSeqId != null) {
    if (seqId < canReplicateUnderSeqId.longValue()) {
      LOG.trace("{} is before the end barrier {}, pass", entry, canReplicateUnderSeqId);
      return true;
    }
    LOG.debug("{} is beyond the previous end barrier {}, remove from cache", entry,
      canReplicateUnderSeqId);
    // we are already beyond the last safe point, remove
    canPushUnder.invalidate(encodedNameAsString);
  }
  // This is for the case where the region is currently opened on us, if the sequence id is
  // continuous then we are safe to replicate. If there is a breakpoint, then maybe the region
  // has been moved to another RS and then back, so we need to check the barrier.
  MutableLong previousPushedSeqId = pushed.getUnchecked(encodedNameAsString);
  if (seqId == previousPushedSeqId.longValue() + 1) {
    LOG.trace("The sequence id for {} is continuous, pass", entry);
    previousPushedSeqId.increment();
    return true;
  }
  return canPush(entry, CellUtil.cloneRow(firstCellInEdit));
}
 
源代码2 项目: hbase   文件: AbstractFSWAL.java
protected final long stampSequenceIdAndPublishToRingBuffer(RegionInfo hri, WALKeyImpl key,
  WALEdit edits, boolean inMemstore, RingBuffer<RingBufferTruck> ringBuffer)
  throws IOException {
  if (this.closed) {
    throw new IOException(
      "Cannot append; log is closed, regionName = " + hri.getRegionNameAsString());
  }
  MutableLong txidHolder = new MutableLong();
  MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(() -> {
    txidHolder.setValue(ringBuffer.next());
  });
  long txid = txidHolder.longValue();
  ServerCall<?> rpcCall = RpcServer.getCurrentCall().filter(c -> c instanceof ServerCall)
    .filter(c -> c.getCellScanner() != null).map(c -> (ServerCall) c).orElse(null);
  try (TraceScope scope = TraceUtil.createTrace(implClassName + ".append")) {
    FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore, rpcCall);
    entry.stampRegionSequenceId(we);
    ringBuffer.get(txid).load(entry);
  } finally {
    ringBuffer.publish(txid);
  }
  return txid;
}
 
源代码3 项目: count-db   文件: FileDataInterface.java
@Override
public long freeMemory() {
    MutableLong totalBytesReleased = new MutableLong(0);
    ifNotClosed(() -> {
        for (FileBucket bucket : fileBuckets) {
            bucket.lockRead();
            for (FileInfo fileInfo : bucket.getFiles()) {
                long bytesReleased = fileInfo.discardFileContents();
                updateSizeOfCachedFileContents(-bytesReleased);
                totalBytesReleased.add(bytesReleased);
            }
            bucket.unlockRead();
        }
    });
    return totalBytesReleased.longValue();
}
 
源代码4 项目: hbase   文件: WriteHeavyIncrementObserver.java
private long getUniqueTimestamp(byte[] row) {
  int slot = Bytes.hashCode(row) & mask;
  MutableLong lastTimestamp = lastTimestamps[slot];
  long now = System.currentTimeMillis();
  synchronized (lastTimestamp) {
    long pt = lastTimestamp.longValue() >> 10;
    if (now > pt) {
      lastTimestamp.setValue(now << 10);
    } else {
      lastTimestamp.increment();
    }
    return lastTimestamp.longValue();
  }
}
 
源代码5 项目: count-db   文件: BigramTestsMain.java
private void testSeparateWritingReading(DataType dataType, DataInterfaceFactory factory, DatabaseCachingType cachingType, int numberOfThreads, long numberOfItems) throws FileNotFoundException, InterruptedException {
    final BaseDataInterface dataInterface = createDataInterface(dataType, cachingType, factory);
    dataInterface.dropAllData();
    final DataInputStream inputStream = new DataInputStream(new BufferedInputStream(new FileInputStream(bigramFile)));

    //write data
    MutableLong numberOfItemsWritten = new MutableLong(0);
    CountDownLatch writeLatch = new CountDownLatch(numberOfThreads);
    long startOfWrite = System.nanoTime();
    for (int i = 0; i < numberOfThreads; i++) {
        new BigramTestsThread(dataType, numberOfItemsWritten, numberOfItems, inputStream, dataInterface, writeLatch, false).start();
    }
    writeLatch.await();
    dataInterface.flush();
    long endOfWrite = System.nanoTime();
    double writesPerSecond = numberOfItemsWritten.longValue() * 1e9 / (endOfWrite - startOfWrite);

    dataInterface.optimizeForReading();
    MutableLong numberOfItemsRead = new MutableLong(0);
    CountDownLatch readLatch = new CountDownLatch(numberOfThreads);
    long startOfRead = System.nanoTime();
    for (int i = 0; i < numberOfThreads; i++) {
        new BigramTestsThread(dataType, numberOfItemsRead, numberOfItems, inputStream, dataInterface, readLatch, true).start();
    }
    readLatch.await();
    dataInterface.flush();
    long endOfRead = System.nanoTime();
    double readsPerSecond = numberOfItemsRead.longValue() * 1e9 / (endOfRead - startOfRead);

    dataInterface.close();
    Log.i(factory.getClass().getSimpleName() + " threads " + numberOfThreads + " items " + numberOfItems + " write " + NumUtils.fmt(writesPerSecond) + " read " + NumUtils.fmt(readsPerSecond));
}
 
源代码6 项目: count-db   文件: UniformDataTestsMain.java
private void testBatchWritingAndReading(DataInterfaceFactory factory, DatabaseCachingType cachingType, int numberOfThreads, final long numberOfItems) throws FileNotFoundException, InterruptedException {
    final BaseDataInterface dataInterface = createDataInterface(cachingType, factory);
    dataInterface.dropAllData();

    MutableLong numberOfItemsWritten = new MutableLong(0);
    long startOfWrite = System.nanoTime();
    CountDownLatch countDownLatch = new CountDownLatch(numberOfThreads);
    for (int i = 0; i < numberOfThreads; i++) {
        new UniformDataTestsThread(numberOfItemsWritten, numberOfItems, dataInterface, countDownLatch, true).start();
    }
    countDownLatch.await();
    dataInterface.flush();
    long endOfWrite = System.nanoTime();
    double writesPerSecond = numberOfItemsWritten.longValue() * 1e9 / (endOfWrite - startOfWrite);

    countDownLatch = new CountDownLatch(numberOfThreads);
    long startOfRead = System.nanoTime();
    dataInterface.optimizeForReading();
    MutableLong numberOfItemsRead = new MutableLong(0);
    for (int i = 0; i < numberOfThreads; i++) {
        new UniformDataTestsThread(numberOfItemsRead, numberOfItems, dataInterface, countDownLatch, false).start();
    }
    countDownLatch.await();
    long endOfRead = System.nanoTime();
    double readsPerSecond = numberOfItemsRead.longValue() * 1e9 / (endOfRead - startOfRead);

    Log.i(factory.getClass().getSimpleName() + " threads " + numberOfThreads + " items " + numberOfItems + " write " + NumUtils.fmt(writesPerSecond) + " read " + NumUtils.fmt(readsPerSecond));
    dataInterface.close();
}
 
源代码7 项目: Bats   文件: ParquetGroupScanStatistics.java
public void collect(List<T> metadataList) {
  resetHolders();
  boolean first = true;
  for (T metadata : metadataList) {
    long localRowCount = (long) TableStatisticsKind.ROW_COUNT.getValue(metadata);
    for (Map.Entry<SchemaPath, ColumnStatistics> columnsStatistics : metadata.getColumnsStatistics().entrySet()) {
      SchemaPath schemaPath = columnsStatistics.getKey();
      ColumnStatistics statistics = columnsStatistics.getValue();
      MutableLong emptyCount = new MutableLong();
      MutableLong previousCount = columnValueCounts.putIfAbsent(schemaPath, emptyCount);
      if (previousCount == null) {
        previousCount = emptyCount;
      }
      Long nullsNum = (Long) statistics.getStatistic(ColumnStatisticsKind.NULLS_COUNT);
      if (previousCount.longValue() != GroupScan.NO_COLUMN_STATS && nullsNum != null && nullsNum != GroupScan.NO_COLUMN_STATS) {
        previousCount.add(localRowCount - nullsNum);
      } else {
        previousCount.setValue(GroupScan.NO_COLUMN_STATS);
      }
      ColumnMetadata columnMetadata = SchemaPathUtils.getColumnMetadata(schemaPath, metadata.getSchema());
      TypeProtos.MajorType majorType = columnMetadata != null ? columnMetadata.majorType() : null;
      boolean partitionColumn = checkForPartitionColumn(statistics, first, localRowCount, majorType, schemaPath);
      if (partitionColumn) {
        Object value = partitionValueMap.get(metadata.getLocation(), schemaPath);
        Object currentValue = statistics.getStatistic(ColumnStatisticsKind.MAX_VALUE);
        if (value != null && value != BaseParquetMetadataProvider.NULL_VALUE) {
          if (value != currentValue) {
            partitionColTypeMap.remove(schemaPath);
          }
        } else {
          // the value of a column with primitive type can not be null,
          // so checks that there are really null value and puts it to the map
          if (localRowCount == (long) statistics.getStatistic(ColumnStatisticsKind.NULLS_COUNT)) {
            partitionValueMap.put(metadata.getLocation(), schemaPath, BaseParquetMetadataProvider.NULL_VALUE);
          } else {
            partitionValueMap.put(metadata.getLocation(), schemaPath, currentValue);
          }
        }
      } else {
        partitionColTypeMap.remove(schemaPath);
      }
    }
    this.rowCount += localRowCount;
    first = false;
  }
}
 
源代码8 项目: attic-apex-malhar   文件: SumLong.java
@Override
public Long getOutput(MutableLong accumulatedValue)
{
  return accumulatedValue.longValue();
}