类com.google.common.io.CountingOutputStream源码实例Demo

下面列出了怎么用com.google.common.io.CountingOutputStream的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: kylin-on-parquet-v2   文件: FragmentFilesMerger.java
private Map<TblColRef, Dictionary<String>> mergeAndPersistDictionaries(FragmentMetaInfo fragmentMetaInfo,
        Map<TblColRef, List<Dictionary<String>>> dimDictListMap, CountingOutputStream fragmentOut)
        throws IOException {
    logger.info("merge dimension dictionaries");
    Map<TblColRef, Dictionary<String>> mergedDictMap = Maps.newHashMap();
    List<DimDictionaryMetaInfo> dimDictionaryMetaInfos = Lists.newArrayList();
    for (TblColRef dimension : parsedCubeInfo.dimensionsUseDictEncoding) {
        List<Dictionary<String>> dicts = dimDictListMap.get(dimension);
        MultipleDictionaryValueEnumerator multipleDictionaryValueEnumerator = new MultipleDictionaryValueEnumerator(
                dimension.getType(), dicts);
        Dictionary<String> mergedDict = DictionaryGenerator.buildDictionary(dimension.getType(),
                multipleDictionaryValueEnumerator);
        mergedDictMap.put(dimension, mergedDict);

        DimDictionaryMetaInfo dimDictionaryMetaInfo = new DimDictionaryMetaInfo();
        dimDictionaryMetaInfo.setDimName(dimension.getName());
        dimDictionaryMetaInfo.setDictType(mergedDict.getClass().getName());
        dimDictionaryMetaInfo.setStartOffset((int) fragmentOut.getCount());

        DictionarySerializer.serialize(mergedDict, fragmentOut);
        dimDictionaryMetaInfo.setDictLength((int) fragmentOut.getCount() - dimDictionaryMetaInfo.getStartOffset());
        dimDictionaryMetaInfos.add(dimDictionaryMetaInfo);
    }
    fragmentMetaInfo.setDimDictionaryMetaInfos(dimDictionaryMetaInfos);
    return mergedDictMap;
}
 
private Map<TblColRef, Dictionary<String>> buildAndPersistDictionaries(FragmentMetaInfo fragmentMetaInfo,
        List<List<Object>> allColumnarValues, CountingOutputStream fragmentOut) throws IOException {
    Map<TblColRef, Dictionary<String>> dictMaps = Maps.newHashMap();
    List<DimDictionaryMetaInfo> dimDictionaryMetaInfos = Lists.newArrayList();
    for (int i = 0; i < dimensions.length; i++) {
        TblColRef dimension = dimensions[i];
        List<Object> dimValueList = allColumnarValues.get(i);
        Dictionary<String> dict;
        DimDictionaryMetaInfo dimDictionaryMetaInfo = new DimDictionaryMetaInfo();
        if (dimensionsUseDictEncoding.contains(dimension)) {
            dict = buildDictionary(dimension, dimValueList);
            dictMaps.put(dimension, dict);

            dimDictionaryMetaInfo.setDimName(dimension.getName());
            dimDictionaryMetaInfo.setDictType(dict.getClass().getName());
            dimDictionaryMetaInfo.setStartOffset((int) fragmentOut.getCount());

            DictionarySerializer.serialize(dict, fragmentOut);
            dimDictionaryMetaInfo.setDictLength((int) fragmentOut.getCount()
                    - dimDictionaryMetaInfo.getStartOffset());
            dimDictionaryMetaInfos.add(dimDictionaryMetaInfo);
        }
    }
    fragmentMetaInfo.setDimDictionaryMetaInfos(dimDictionaryMetaInfos);
    return dictMaps;
}
 
源代码3 项目: dbeam   文件: JdbcAvroIO.java
@SuppressWarnings("deprecation") // uses internal test functionality.
@Override
protected void prepareWrite(WritableByteChannel channel) throws Exception {
  logger.info("jdbcavroio : Preparing write...");
  connection = jdbcAvroArgs.jdbcConnectionConfiguration().createConnection();
  Void destination = getDestination();
  Schema schema = dynamicDestinations.getSchema(destination);
  dataFileWriter =
      new DataFileWriter<>(new GenericDatumWriter<GenericRecord>(schema))
          .setCodec(jdbcAvroArgs.getCodecFactory())
          .setSyncInterval(syncInterval);
  dataFileWriter.setMeta("created_by", this.getClass().getCanonicalName());
  this.countingOutputStream = new CountingOutputStream(Channels.newOutputStream(channel));
  dataFileWriter.create(schema, this.countingOutputStream);
  logger.info("jdbcavroio : Write prepared");
}
 
protected void write(int status, Map<String, String> headers, Object content) throws IOException {
  // write response status code
  servletResponse.setStatus(status);

  // write response headers
  if (headers != null) {
    for (Map.Entry<String, String> entry : headers.entrySet()) {
      servletResponse.addHeader(entry.getKey(), entry.getValue());
    }
  }

  // write response body
  if (content != null) {
    servletResponse.setContentType(SystemService.MIME_JSON);
    if (addContentLength) {
      CountingOutputStream counter = new CountingOutputStream(ByteStreams.nullOutputStream());
      objectWriter.writeValue(counter, content);
      servletResponse.setContentLength((int) counter.getCount());
    }
    objectWriter.writeValue(servletResponse.getOutputStream(), content);
  }
}
 
源代码5 项目: indexr   文件: ArithmeticCoderTest.java
@Test
public void encodeDecodeTest() throws IOException {
    ArthmeticCoder.SimpleFrequency freq = new ArthmeticCoder.SimpleFrequency(counts);

    ByteArrayOutputStream encodedPool = new ByteArrayOutputStream();
    CountingOutputStream outputCounting = new CountingOutputStream(encodedPool);
    ArthmeticCoder.Encoder encoder = new ArthmeticCoder.Encoder(freq, new BitWrappedOutputStream(outputCounting));
    for (int s : symbols) {
        encoder.write(s);
    }
    encoder.seal();

    ByteArrayInputStream decodedPool = new ByteArrayInputStream(encodedPool.toByteArray());
    CountingInputStream inputCounting = new CountingInputStream(decodedPool);
    ArthmeticCoder.Decoder decoder = new ArthmeticCoder.Decoder(freq, new BitWrappedInputStream(inputCounting));
    int[] symbols2 = new int[symbols.length];
    for (int i = 0; i < symbols.length; i++) {
        symbols2[i] = decoder.read();
    }

    Assert.assertEquals(outputCounting.getCount(), inputCounting.getCount());
    Assert.assertArrayEquals(symbols, symbols2);
}
 
源代码6 项目: ache   文件: FilesTargetRepository.java
private synchronized void openNewFile() throws IOException {
    if(currentFile != null) {
        // flush and automatically closes file
        try(OutputStream out = this.currentFile) {
            out.flush();
        }
    }
    long timestamp = System.currentTimeMillis();
    long count = 0;
    Path filePath;
    do {
        String file = String.format("crawl_data-%d-%d.deflate", timestamp, count++);
        filePath = directory.resolve(file);
    } while (Files.exists(filePath));
    OutputStream fileStream = new PrintStream(filePath.toFile());
    this.bytesCounter = new CountingOutputStream(fileStream);
    this.currentFile = new DeflaterOutputStream(this.bytesCounter, true);
}
 
@Test(expected = RuntimeException.class)
public void givenData_whenCountReachesLimit_thenThrowException() throws Exception {
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    CountingOutputStream cos = new CountingOutputStream(out);

    byte[] data = new byte[1024];
    ByteArrayInputStream in = new ByteArrayInputStream(data);

    int b;
    while ((b = in.read()) != -1) {
        cos.write(b);
        if (cos.getCount() >= MAX) {
            throw new RuntimeException("Write limit reached");
        }
    }
}
 
源代码8 项目: buck   文件: EntryAccounting.java
private long writeDataDescriptor(OutputStream rawOut) throws IOException {
  if (!requiresDataDescriptor()) {
    return 0;
  }

  CountingOutputStream out = new CountingOutputStream(rawOut);
  ByteIo.writeInt(out, ZipEntry.EXTSIG);
  ByteIo.writeInt(out, getCrc());
  if (getCompressedSize() >= ZipConstants.ZIP64_MAGICVAL
      || getSize() >= ZipConstants.ZIP64_MAGICVAL) {
    ByteIo.writeLong(out, getCompressedSize());
    ByteIo.writeLong(out, getSize());
  } else {
    ByteIo.writeInt(out, getCompressedSize());
    ByteIo.writeInt(out, getSize());
  }
  return out.getCount();
}
 
源代码9 项目: presto   文件: OrcMetadataWriter.java
private static int writeProtobufObject(OutputStream output, MessageLite object)
        throws IOException
{
    CountingOutputStream countingOutput = new CountingOutputStream(output);
    object.writeTo(countingOutput);
    return toIntExact(countingOutput.getCount());
}
 
源代码10 项目: presto   文件: RcFileFileWriter.java
public RcFileFileWriter(
        OutputStream outputStream,
        Callable<Void> rollbackAction,
        RcFileEncoding rcFileEncoding,
        List<Type> fileColumnTypes,
        Optional<String> codecName,
        int[] fileInputColumnIndexes,
        Map<String, String> metadata,
        Optional<Supplier<RcFileDataSource>> validationInputFactory)
        throws IOException
{
    this.outputStream = new CountingOutputStream(outputStream);
    rcFileWriter = new RcFileWriter(
            new OutputStreamSliceOutput(this.outputStream),
            fileColumnTypes,
            rcFileEncoding,
            codecName,
            new AircompressorCodecFactory(new HadoopCodecFactory(getClass().getClassLoader())),
            metadata,
            validationInputFactory.isPresent());
    this.rollbackAction = requireNonNull(rollbackAction, "rollbackAction is null");

    this.fileInputColumnIndexes = requireNonNull(fileInputColumnIndexes, "outputColumnInputIndexes is null");

    ImmutableList.Builder<Block> nullBlocks = ImmutableList.builder();
    for (Type fileColumnType : fileColumnTypes) {
        BlockBuilder blockBuilder = fileColumnType.createBlockBuilder(null, 1, 0);
        blockBuilder.appendNull();
        nullBlocks.add(blockBuilder.build());
    }
    this.nullBlocks = nullBlocks.build();
    this.validationInputFactory = validationInputFactory;
}
 
源代码11 项目: kylin-on-parquet-v2   文件: FragmentFilesMerger.java
public CuboidColumnDataWriter(long cuboidId, String colName) throws IOException {
    this.cuboidId = cuboidId;
    this.colName = colName;

    this.tmpColDataFile = new File(mergeWorkingDirectory, cuboidId + "-" + colName + ".data");
    this.output = new CountingOutputStream(
            new BufferedOutputStream(FileUtils.openOutputStream(tmpColDataFile)));
}
 
源代码12 项目: kylin-on-parquet-v2   文件: FragmentFilesMerger.java
public CuboidMetricDataWriter(long cuboidId, String metricName, int maxValLen) throws IOException {
    this.cuboidId = cuboidId;
    this.metricName = metricName;
    this.maxValLen = maxValLen;
    this.tmpMetricDataFile = new File(mergeWorkingDirectory, cuboidId + "-" + metricName + ".data");
    this.countingOutput = new CountingOutputStream(
            new BufferedOutputStream(FileUtils.openOutputStream(tmpMetricDataFile)));
    this.output = new DataOutputStream(countingOutput);
}
 
private CuboidMetaInfo persistCuboidData(long cuboidID, TblColRef[] dimensions,
        Map<TblColRef, Dictionary<String>> dictMaps, List<List<Object>> columnarCuboidValues,
        CountingOutputStream fragmentOutput) throws Exception {
    CuboidMetaInfo cuboidMeta = new CuboidMetaInfo();
    int dimCnt = dimensions.length;
    List<DimensionMetaInfo> dimensionMetaList = Lists.newArrayListWithExpectedSize(dimCnt);
    cuboidMeta.setDimensionsInfo(dimensionMetaList);
    cuboidMeta.setNumberOfDim(dimCnt);
    List<MetricMetaInfo> metricMetaInfoList = Lists.newArrayListWithCapacity(measures.length);
    cuboidMeta.setMetricsInfo(metricMetaInfoList);
    cuboidMeta.setNumberOfMetrics(measures.length);

    long rowNum = -1;
    for (int i = 0; i < dimCnt; i++) {
        if (rowNum == -1) {
            rowNum = columnarCuboidValues.get(i).size();
        }
        persistDimension(cuboidID, columnarCuboidValues.get(i), dimensionMetaList, fragmentOutput,
                dimensions[i], dictMaps);
    }

    for (int i = 0; i < measures.length; i++) {
        persistMetric(cuboidID, columnarCuboidValues.get(dimCnt + i), metricMetaInfoList, i, fragmentOutput);
    }
    cuboidMeta.setNumberOfRows(rowNum);
    return cuboidMeta;
}
 
/**
     * This method is used to persist the metrics data to disk file.
     *
     * @param metricValueList
     * @param metricMetaInfoList
     * @param indexOut
     * @throws IOException
     */
    @SuppressWarnings({ "rawtypes", "unchecked" })
    private void persistMetric(long cuboidId, List<Object> metricValueList, List<MetricMetaInfo> metricMetaInfoList,
            int metricIdx, CountingOutputStream indexOut) throws IOException {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        MetricMetaInfo metricMeta = new MetricMetaInfo();
        metricMetaInfoList.add(metricMeta);
        String measureName = measures[metricIdx].getName();
        metricMeta.setName(measureName);
        metricMeta.setCol(metricIdx);
        metricMeta.setStartOffset((int) indexOut.getCount());

        DataType type = measures[metricIdx].getFunction().getReturnDataType();

        ColumnarMetricsEncoding metricsEncoding = ColumnarMetricsEncodingFactory.create(type);
        DataTypeSerializer serializer = metricsEncoding.asDataTypeSerializer();
        DataOutputStream metricsOut = new DataOutputStream(indexOut);

        int maxLength = serializer.maxLength();
        metricMeta.setMaxSerializeLength(maxLength);
        ByteBuffer metricsBuf = ByteBuffer.allocate(maxLength);
        ColumnarStoreMetricsDesc cStoreMetricsDesc = getColumnarStoreMetricsDesc(metricsEncoding);
        ColumnDataWriter metricsWriter = cStoreMetricsDesc.getMetricsWriter(metricsOut, metricValueList.size());
//        metricMeta.setStoreInFixedLength(false);
        for (Object metricValue : metricValueList) {
            metricsBuf.clear();
            serializer.serialize(metricValue, metricsBuf);
            byte[] metricBytes = Arrays.copyOf(metricsBuf.array(), metricsBuf.position());
            metricsWriter.write(metricBytes);
        }
        metricsWriter.flush();
        metricMeta.setMetricLength(metricsOut.size());
        metricMeta.setCompression(cStoreMetricsDesc.getCompression().name());
        stopwatch.stop();
        if (logger.isDebugEnabled()) {
            logger.debug("cuboid-{} saved measure:{}, took: {}ms", cuboidId, measureName, stopwatch.elapsedMillis());
        }
    }
 
public int writeCompressedData(int rowCnt) throws IOException {
    int compressBolckSize = 64 * 1024;
    CountingOutputStream countingOutputStream = new CountingOutputStream(new FileOutputStream(tmpColFile));
    LZ4CompressedColumnWriter writer = new LZ4CompressedColumnWriter(4, rowCnt, compressBolckSize,
            countingOutputStream);
    int[] colValues = new int[rowCnt];
    for (int i = 0; i < rowCnt; i++) {
        colValues[i] = i;
    }
    for (int i = 0; i < rowCnt; i++) {
        writer.write(Bytes.toBytes(colValues[i]));
    }
    writer.flush();
    return (int) countingOutputStream.getCount();
}
 
public int writeCompressData1(int rowCnt) throws IOException {
    int compressBolckSize = 64 * 1024;
    CountingOutputStream countingOutputStream = new CountingOutputStream(new FileOutputStream(tmpColFile));
    RunLengthCompressedColumnWriter writer = new RunLengthCompressedColumnWriter(4, rowCnt, compressBolckSize,
            countingOutputStream);
    int[] colValues = new int[rowCnt];
    for (int i = 0; i < rowCnt; i++) {
        colValues[i] = i;
    }
    for (int i = 0; i < rowCnt; i++) {
        writer.write(Bytes.toBytes(colValues[i]));
    }
    writer.flush();
    return (int) countingOutputStream.getCount();
}
 
源代码17 项目: datawave   文件: BaseMethodStatsInterceptor.java
protected ResponseMethodStats doWrite(WriterInterceptorContext context) throws IOException, WebApplicationException {
    ResponseMethodStats stats;
    long start = System.nanoTime();
    OutputStream originalOutputStream = context.getOutputStream();
    CountingOutputStream countingStream = new CountingOutputStream(originalOutputStream);
    context.setOutputStream(countingStream);
    try {
        context.proceed();
    } finally {
        long stop = System.nanoTime();
        long time = TimeUnit.NANOSECONDS.toMillis(stop - start);
        
        context.setOutputStream(originalOutputStream);
        
        stats = (ResponseMethodStats) context.getProperty(RESPONSE_STATS_NAME);
        if (stats == null) {
            log.warn("No response stats found for " + getClass() + ". Using default.");
            stats = new ResponseMethodStats();
        }
        
        RequestMethodStats requestStats = (RequestMethodStats) context.getProperty(REQUEST_STATS_NAME);
        if (requestStats == null) {
            log.warn("No request method stats found for " + getClass() + ". Using default.");
            requestStats = new RequestMethodStats();
            requestStats.callStartTime = stop + TimeUnit.MILLISECONDS.toNanos(1);
        }
        
        stats.serializationTime = time;
        stats.loginTime = requestStats.getLoginTime();
        stats.callTime = TimeUnit.NANOSECONDS.toMillis(stop - requestStats.getCallStartTime());
        stats.bytesWritten = countingStream.getCount();
        // Merge in the headers we saved in the postProcess call, if any.
        putNew(stats.responseHeaders, context.getHeaders());
    }
    
    return stats;
}
 
源代码18 项目: bundletool   文件: GZipUtils.java
/** Calculates the GZip compressed size in bytes of the target {@code stream}. */
public static long calculateGzipCompressedSize(@WillNotClose InputStream stream)
    throws IOException {
  CountingOutputStream countingOutputStream =
      new CountingOutputStream(ByteStreams.nullOutputStream());
  try (GZIPOutputStream compressedStream = new GZIPOutputStream(countingOutputStream)) {
    ByteStreams.copy(stream, compressedStream);
  }
  return countingOutputStream.getCount();
}
 
源代码19 项目: mph-table   文件: TableWriter.java
private static <K, V> void writeToIndexedOffsets(
        final File inputData,
        final File outputData,
        final File outputOffsets,
        final TableMeta<K, V> meta,
        final Iterable<Pair<K, V>> entries,
        final long dataSize) throws IOException {
    final long numEntries = meta.numEntries();
    final int offsetSize = meta.getConfig().bytesPerOffset(numEntries, dataSize);
    final long totalOffsetSize = numEntries * offsetSize;
    final BufferedFileDataOutputStream fileOut = new BufferedFileDataOutputStream(outputData);
    final CountingOutputStream countOut = new CountingOutputStream(fileOut);
    final long startMillis = System.currentTimeMillis();
    try (final MMapBuffer offsets = new MMapBuffer(outputOffsets, 0L, totalOffsetSize, FileChannel.MapMode.READ_WRITE, ByteOrder.nativeOrder());
         final LittleEndianDataOutputStream out = new LittleEndianDataOutputStream(countOut)) {
        for (final Pair<K, V> e : entries) {
            final long hash = meta.getHash(e.getFirst());
            if (hash < 0) {
                throw new IOException("inconsistent mph, known key hashed to -1: " + e.getFirst());
            }
            final long offset = countOut.getCount();
            if (offsetSize == 2) {
                offsets.memory().putShort(hash * 2L, (short) offset);
            } else if (offsetSize == 4) {
                offsets.memory().putInt(hash * 4L, (int) offset);
            } else {
                offsets.memory().putLong(hash * 8L, offset);
            }
            meta.getConfig().write(e.getFirst(), e.getSecond(), out);
        }
        offsets.sync(0L, totalOffsetSize);
        out.flush();
    }
    outputData.setReadOnly();
    outputOffsets.setReadOnly();
    LOGGER.info("wrote " + numEntries + " offsets for " + dataSize + " bytes of data in " +
                (System.currentTimeMillis() - startMillis) + " ms");
}
 
源代码20 项目: BUbiNG   文件: RandomReadWritesTest.java
@SuppressWarnings("resource")
public static int[] writeRecords(final String path, final int numRecords, final WarcRecord[] randomRecords, final int parallel) throws IOException, InterruptedException {
	final ProgressLogger pl = new ProgressLogger(LOGGER, "records");
	if (parallel <= 1) pl.expectedUpdates = numRecords;
	final ProgressLogger plb = new ProgressLogger(LOGGER, "KB");
	final CountingOutputStream cos = new CountingOutputStream(new FastBufferedOutputStream(new FileOutputStream (path)));
	final WarcWriter ww;
	if (parallel == 0) {
		ww = new UncompressedWarcWriter(cos);
		pl.start("Writing records…");
	} else if (parallel == 1) {
		ww = new CompressedWarcWriter(cos);
		pl.start("Writing records (compressed)…");
	} else {
		ww = null;
		pl.start("SHOULD NOT HAPPEN");
		throw new IllegalStateException();
	}
	plb.start();
	long written = 0;
	final int[] position = new int[numRecords];
	for (int i = 0; i < numRecords; i++) {
		final int pos = RandomTestMocks.RNG.nextInt(randomRecords.length);
		position[i] = pos;
		ww.write(randomRecords[pos]);
		if (parallel <= 0) {
			pl.lightUpdate();
			plb.update((cos.getCount() - written) / 1024);
		}
		written = cos.getCount();
	}
	ww.close();
	pl.done(numRecords);
	plb.done(cos.getCount());
	return position;
}
 
源代码21 项目: blueocean-plugin   文件: NodeLogResource.java
private long appendError(String msg, OutputStream w) throws IOException {
    try (CountingOutputStream os = new CountingOutputStream(w)) {
        os.write(msg.getBytes("UTF-8"));
        os.flush();
        return os.getCount();
    }
}
 
源代码22 项目: packagedrone   文件: BlobStore.java
public long handleCreate ( final String id, final IOConsumer<OutputStream> consumer ) throws IOException
{
    final Path path = makeDataPath ( id );

    Files.createDirectories ( path.getParent () );

    try ( CountingOutputStream stream = new CountingOutputStream ( new BufferedOutputStream ( Files.newOutputStream ( path, StandardOpenOption.CREATE_NEW ) ) ) )
    {
        consumer.accept ( stream );
        return stream.getCount ();
    }
}
 
源代码23 项目: packagedrone   文件: PayloadRecorder.java
public PayloadRecorder ( final boolean autoFinish, final PayloadCoding payloadCoding, final String payloadFlags, final DigestAlgorithm fileDigestAlgorithm ) throws IOException
{
    this.autoFinish = autoFinish;

    this.fileDigestAlgorithm = fileDigestAlgorithm;

    this.tempFile = Files.createTempFile ( "rpm-", null );

    try
    {
        this.fileStream = new BufferedOutputStream ( Files.newOutputStream ( this.tempFile, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING ) );

        this.payloadCounter = new CountingOutputStream ( this.fileStream );

        this.payloadCoding = payloadCoding;

        this.payloadFlags = Optional.ofNullable ( payloadFlags );

        final OutputStream payloadStream = this.payloadCoding.createProvider ().createOutputStream ( this.payloadCounter, this.payloadFlags );

        this.archiveCounter = new CountingOutputStream ( payloadStream );

        // setup archive stream

        this.archiveStream = new CpioArchiveOutputStream ( this.archiveCounter, CpioConstants.FORMAT_NEW, 4, CharsetNames.UTF_8 );
    }
    catch ( final IOException e )
    {
        Files.deleteIfExists ( this.tempFile );
        throw e;
    }
}
 
源代码24 项目: glowroot   文件: CappedDatabase.java
private long write(String type, Copier copier) throws IOException {
    long blockStartIndex;
    synchronized (lock) {
        if (closed) {
            return -1;
        }
        long startTick = ticker.read();
        out.startBlock();
        NonClosingCountingOutputStream countingStreamAfterCompression =
                new NonClosingCountingOutputStream(out);
        CountingOutputStream countingStreamBeforeCompression =
                new CountingOutputStream(newLZFOutputStream(countingStreamAfterCompression));
        copier.copyTo(countingStreamBeforeCompression);
        countingStreamBeforeCompression.close();
        long endTick = ticker.read();
        CappedDatabaseStats stats = statsByType.get(type);
        if (stats == null) {
            stats = new CappedDatabaseStats();
            statsByType.put(type, stats);
        }
        stats.record(countingStreamBeforeCompression.getCount(),
                countingStreamAfterCompression.getCount(), endTick - startTick);
        blockStartIndex = out.endBlock();
    }
    // fsync (if really needed here) does not need to be done under lock
    out.fsyncIfReallyNeeded();
    return blockStartIndex;
}
 
源代码25 项目: lsmtree   文件: ImmutableBTreeIndex.java
/**
 * @param path root lsm tree index directory
 * @param iterator the iterator
 * @param keySerializer the key serializer
 * @param valueSerializer the value serializer
 * @param blocksize block size
 * @param keepDeletions true to keep deletion
 * @param <K> the key type
 * @param <V> the value type
 * @throws IOException  if an I/O error occurs
 */
public static <K, V> void write(
        Path path,
        Iterator<Generation.Entry<K,V>> iterator,
        Serializer<K> keySerializer,
        Serializer<V> valueSerializer,
        final int blocksize,
        boolean keepDeletions
) throws IOException {
    if (blocksize > 65536) throw new IllegalArgumentException("block size must be less than 65536");
    Files.createDirectories(path);
    final BufferedFileDataOutputStream fileOut = new BufferedFileDataOutputStream(path.resolve("index.bin"));
    final CountingOutputStream out = new CountingOutputStream(fileOut);
    //tempFile is deleted in writeIndex
    final Path tempPath = Files.createTempFile("tmp", ".bin");
    final WriteLevelResult result = writeLevel(out, tempPath, iterator, keySerializer, valueSerializer, blocksize, keepDeletions);
    final int tmpCount = result.tmpCount;
    final long size = result.size;

    final long valueLevelLength = out.getCount();
    final Header header = writeIndex(out, tempPath, tmpCount, keySerializer, blocksize);
    header.valueLevelLength = valueLevelLength;
    header.size = size;
    header.hasDeletions = keepDeletions;
    new HeaderSerializer().write(header, new LittleEndianDataOutputStream(out));
    fileOut.sync();
    out.close();
}
 
public FlexibleDelimitedFileWriter(LogFilePath path, CompressionCodec codec) throws IOException {
  Path fsPath = new Path(path.getLogFilePath());
  FileSystem fs = FileUtil.getFileSystem(path.getLogFilePath());
  this.mCountingStream = new CountingOutputStream(fs.create(fsPath));
  this.mWriter = (codec == null) ? new BufferedOutputStream(
  this.mCountingStream) : new BufferedOutputStream(
  codec.createOutputStream(this.mCountingStream,
  mCompressor = CodecPool.getCompressor(codec)));
}
 
public DelimitedTextFileWriter(LogFilePath path, CompressionCodec codec) throws IOException {
    Path fsPath = new Path(path.getLogFilePath());
    FileSystem fs = FileUtil.getFileSystem(path.getLogFilePath());
    this.mCountingStream = new CountingOutputStream(fs.create(fsPath));
    this.mWriter = (codec == null) ? new BufferedOutputStream(
            this.mCountingStream) : new BufferedOutputStream(
            codec.createOutputStream(this.mCountingStream,
                                     mCompressor = CodecPool.getCompressor(codec)));
}
 
源代码28 项目: bazel   文件: SimpleLogHandler.java
/**
 * Opens the specified file in append mode, first closing the current file if needed.
 *
 * @throws IOException if the file could not be opened
 */
public void open(String path) throws IOException {
  try {
    close();
    file = new File(path);
    stream = new CountingOutputStream(new FileOutputStream(file, true));
    writer = new OutputStreamWriter(stream, UTF_8);
  } catch (IOException e) {
    close();
    throw e;
  }
}
 
源代码29 项目: airpal   文件: CsvOutputBuilder.java
public CsvOutputBuilder(boolean includeHeader, UUID jobUUID, long maxFileSizeBytes, boolean compressedOutput) throws IOException {
    this.includeHeader = includeHeader;
    this.jobUUID = jobUUID;
    this.outputFile = File.createTempFile(jobUUID.toString(), FILE_SUFFIX);
    this.maxFileSizeBytes = maxFileSizeBytes;
    this.countingOutputStream = new CountingOutputStream(new FileOutputStream(this.outputFile));
    OutputStreamWriter writer;
    if (compressedOutput) {
        writer = new OutputStreamWriter(new GZIPOutputStream(this.countingOutputStream));
    }
    else {
        writer = new OutputStreamWriter(this.countingOutputStream);
    }
    this.csvWriter = new CSVWriter(writer);
}
 
源代码30 项目: selenium   文件: ProtocolHandshake.java
public Result createSession(HttpClient client, Command command)
    throws IOException {
  Capabilities desired = (Capabilities) command.getParameters().get("desiredCapabilities");
  desired = desired == null ? new ImmutableCapabilities() : desired;

  int threshold = (int) Math.min(Runtime.getRuntime().freeMemory() / 10, Integer.MAX_VALUE);
  FileBackedOutputStream os = new FileBackedOutputStream(threshold);
  try (
      CountingOutputStream counter = new CountingOutputStream(os);
      Writer writer = new OutputStreamWriter(counter, UTF_8);
      NewSessionPayload payload = NewSessionPayload.create(desired)) {

    payload.writeTo(writer);

    try (InputStream rawIn = os.asByteSource().openBufferedStream();
         BufferedInputStream contentStream = new BufferedInputStream(rawIn)) {
      Optional<Result> result = createSession(client, contentStream, counter.getCount());

      if (result.isPresent()) {
        Result toReturn = result.get();
        LOG.info(String.format("Detected dialect: %s", toReturn.dialect));
        return toReturn;
      }
    }
  } finally {
    os.reset();
  }

  throw new SessionNotCreatedException(
      String.format(
          "Unable to create new remote session. " +
          "desired capabilities = %s",
          desired));
}