java.nio.ByteBuffer#clear ( )源码实例Demo

下面列出了java.nio.ByteBuffer#clear ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: ZjDroid   文件: MemDump.java
private static void saveByteBuffer(OutputStream out, ByteBuffer data) {
	data.order(ByteOrder.LITTLE_ENDIAN);
	byte[] buffer = new byte[8192];
	data.clear();
	while (data.hasRemaining()) {
		int count = Math.min(buffer.length, data.remaining());
		data.get(buffer, 0, count);
		try {
			out.write(buffer, 0, count);
		} catch (IOException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
		}
	}

}
 
@Test
public void testHeaderContinuationContiguous() throws Exception {
    // HTTP2 upgrade
    http2Connect();

    // Part 1
    byte[] frameHeader = new byte[9];
    ByteBuffer headersPayload = ByteBuffer.allocate(128);
    buildSimpleGetRequestPart1(frameHeader, headersPayload, 3);
    writeFrame(frameHeader, headersPayload);

    // Part 2
    headersPayload.clear();
    buildSimpleGetRequestPart2(frameHeader, headersPayload, 3);
    writeFrame(frameHeader, headersPayload);

    // headers, body
    parser.readFrame(true);
    parser.readFrame(true);

    Assert.assertEquals(getSimpleResponseTrace(3), output.getTrace());
}
 
源代码3 项目: red5-io   文件: Init.java
@Override
public byte[] getBytes() {
    ByteBuffer byteBuffer = ByteBuffer.allocateDirect(MANDATORY_FIELD_SIZE + CHUNK_HEADER_SIZE);
    byte[] data = super.getBytes();
    byteBuffer.put(data);
    byteBuffer.putInt(initiateTag);
    byteBuffer.putInt(advertisedReceiverWindowCredit);
    byteBuffer.putShort((short) numberOfOutboundStreams);
    byteBuffer.putShort((short) numberOfInboundStreams);
    byteBuffer.putInt(initialTSN);

    byteBuffer.clear();
    byte[] result = new byte[byteBuffer.capacity()];
    byteBuffer.get(result, 0, result.length);
    return result;
}
 
源代码4 项目: javacore   文件: FileChannelDemo02.java
public static void main(String[] args) throws Exception {
    File file1 = new File("d:" + File.separator + "out.txt");
    File file2 = new File("d:" + File.separator + "outnote.txt");
    FileInputStream input = new FileInputStream(file1);
    FileOutputStream output = new FileOutputStream(file2);
    FileChannel fout = output.getChannel(); // 得到输出的通道
    FileChannel fin = input.getChannel(); // 得到输入的通道
    ByteBuffer buf = ByteBuffer.allocate(1024);

    int temp = 0;
    while ((temp = fin.read(buf)) != -1) {
        buf.flip();
        fout.write(buf);
        buf.clear(); // 清空缓冲区,所有的状态变量的位置恢复到原点
    }
    fin.close();
    fout.close();
    input.close();
    output.close();
}
 
源代码5 项目: freehealth-connector   文件: SSLStreams.java
void doClosure() throws IOException {
   try {
      this.handshaking.lock();
      ByteBuffer tmp = this.allocate(SSLStreams.BufType.APPLICATION);

      SSLStreams.WrapperResult r;
      do {
         tmp.clear();
         tmp.flip();
         r = this.wrapper.wrapAndSendX(tmp, true);
      } while(r.result.getStatus() != Status.CLOSED);
   } finally {
      this.handshaking.unlock();
   }

}
 
源代码6 项目: phoebus   文件: PVAHeader.java
/** Encode common PVA message header
 *  @param buffer Buffer into which to encode
 *  @param flags  Combination of FLAG_
 *  @param command Command
 *  @param payload_size Size of payload that follows
 */
public static void encodeMessageHeader(final ByteBuffer buffer, byte flags, final byte command, final int payload_size)
{
    if (buffer.order() == ByteOrder.BIG_ENDIAN)
        flags |= FLAG_BIG_ENDIAN;
    else
        flags &= ~FLAG_BIG_ENDIAN;
    buffer.clear();
    buffer.put(PVA_MAGIC);
    buffer.put(PVA_PROTOCOL_REVISION);
    buffer.put(flags);
    buffer.put(command);
    buffer.putInt(payload_size);
}
 
源代码7 项目: HaloDB   文件: HaloDBInternal.java
int get(byte[] key, ByteBuffer buffer) throws IOException {
    InMemoryIndexMetaData metaData = inMemoryIndex.get(key);
    if (metaData == null) {
        return 0;
    }

    HaloDBFile readFile = readFileMap.get(metaData.getFileId());
    if (readFile == null) {
        logger.debug("File {} not present. Compaction job would have deleted it. Retrying ...", metaData.getFileId());
        return get(key, buffer);
    }

    buffer.clear();
    buffer.limit(metaData.getValueSize());

    try {
        int read = readFile.readFromFile(metaData.getValueOffset(), buffer);
        buffer.flip();
        return read;
    }
    catch (ClosedChannelException e) {
        if (!isClosing) {
            logger.debug("File {} was closed. Compaction job would have deleted it. Retrying ...", metaData.getFileId());
            return get(key, buffer);
        }

        // trying to read after HaloDB.close() method called.
        throw e;
    }
}
 
源代码8 项目: jReto   文件: WlanConnection.java
@Override
public void writeData(ByteBuffer data) {
	if (data.remaining() == 0) throw new IllegalArgumentException("data buffer needs to have more than 0 bytes remaining.");
	
	data.order(ByteOrder.LITTLE_ENDIAN);
	
	ByteBuffer lengthBuffer = ByteBuffer.allocate(PACKET_LENGTH_FIELD_LENGTH);
	lengthBuffer.order(ByteOrder.LITTLE_ENDIAN);
	lengthBuffer.putInt(data.remaining());
	lengthBuffer.clear();
	
	this.channelWriter.write(lengthBuffer);
	this.channelWriter.write(data);
}
 
源代码9 项目: libcommon   文件: ChannelHelper.java
/**
 * ByteChannelからboolean配列を読み込む
 * @param channel
 * @return
 * @throws IOException
 */
public static boolean[] readBooleanArray(@NonNull final ByteChannel channel)
	throws IOException {
	
	final int n = readInt(channel);
	final ByteBuffer buf = ByteBuffer.allocate(n);
	final int readBytes = channel.read(buf);
	if (readBytes != n) throw new IOException();
	buf.clear();
	final boolean[] result = new boolean[n];
	for (int i = 0; i < n; i++) {
		result[i] = buf.get() != 0;
	}
	return result;
}
 
源代码10 项目: flink   文件: MemoryUtils.java
/**
 * Wraps the unsafe native memory with a {@link ByteBuffer}.
 *
 * @param address address of the unsafe memory to wrap
 * @param size size of the unsafe memory to wrap
 * @return a {@link ByteBuffer} which is a view of the given unsafe memory
 */
static ByteBuffer wrapUnsafeMemoryWithByteBuffer(long address, int size) {
	//noinspection OverlyBroadCatchBlock
	try {
		ByteBuffer buffer = (ByteBuffer) UNSAFE.allocateInstance(DIRECT_BYTE_BUFFER_CLASS);
		UNSAFE.putLong(buffer, BUFFER_ADDRESS_FIELD_OFFSET, address);
		UNSAFE.putInt(buffer, BUFFER_CAPACITY_FIELD_OFFSET, size);
		buffer.clear();
		return buffer;
	} catch (Throwable t) {
		throw new Error("Failed to wrap unsafe off-heap memory with ByteBuffer", t);
	}
}
 
源代码11 项目: gemfirexd-oss   文件: InputStreamChannel.java
protected void resetAndCopyLeftOverBytes(final ByteBuffer channelBuffer) {
  if (channelBuffer.hasRemaining()) {
    channelBuffer.compact();
  }
  else {
    channelBuffer.clear();
  }
}
 
/**
     * This method is used to persist the metrics data to disk file.
     *
     * @param metricValueList
     * @param metricMetaInfoList
     * @param indexOut
     * @throws IOException
     */
    @SuppressWarnings({ "rawtypes", "unchecked" })
    private void persistMetric(long cuboidId, List<Object> metricValueList, List<MetricMetaInfo> metricMetaInfoList,
            int metricIdx, CountingOutputStream indexOut) throws IOException {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        MetricMetaInfo metricMeta = new MetricMetaInfo();
        metricMetaInfoList.add(metricMeta);
        String measureName = measures[metricIdx].getName();
        metricMeta.setName(measureName);
        metricMeta.setCol(metricIdx);
        metricMeta.setStartOffset((int) indexOut.getCount());

        DataType type = measures[metricIdx].getFunction().getReturnDataType();

        ColumnarMetricsEncoding metricsEncoding = ColumnarMetricsEncodingFactory.create(type);
        DataTypeSerializer serializer = metricsEncoding.asDataTypeSerializer();
        DataOutputStream metricsOut = new DataOutputStream(indexOut);

        int maxLength = serializer.maxLength();
        metricMeta.setMaxSerializeLength(maxLength);
        ByteBuffer metricsBuf = ByteBuffer.allocate(maxLength);
        ColumnarStoreMetricsDesc cStoreMetricsDesc = getColumnarStoreMetricsDesc(metricsEncoding);
        ColumnDataWriter metricsWriter = cStoreMetricsDesc.getMetricsWriter(metricsOut, metricValueList.size());
//        metricMeta.setStoreInFixedLength(false);
        for (Object metricValue : metricValueList) {
            metricsBuf.clear();
            serializer.serialize(metricValue, metricsBuf);
            byte[] metricBytes = Arrays.copyOf(metricsBuf.array(), metricsBuf.position());
            metricsWriter.write(metricBytes);
        }
        metricsWriter.flush();
        metricMeta.setMetricLength(metricsOut.size());
        metricMeta.setCompression(cStoreMetricsDesc.getCompression().name());
        stopwatch.stop();
        if (logger.isDebugEnabled()) {
            logger.debug("cuboid-{} saved measure:{}, took: {}ms", cuboidId, measureName, stopwatch.elapsedMillis());
        }
    }
 
源代码13 项目: Jabit   文件: BufferPool.java
public synchronized void deallocate(ByteBuffer buffer) {
    buffer.clear();
    Stack<ByteBuffer> pool = pools.get(buffer.capacity());
    if (pool == null) {
        throw new IllegalArgumentException("Illegal buffer capacity " + buffer.capacity() +
            " one of " + pools.keySet() + " expected.");
    } else {
        pool.push(buffer);
    }
}
 
源代码14 项目: webarchive-commons   文件: BasicURLCanonicalizer.java
public String decode(String input) {
	StringBuilder sb = null;
	int pctUtf8SeqStart = -1;
	ByteBuffer bbuf = null;
	CharsetDecoder utf8decoder = null;
	int i = 0;
	int h1, h2;
	while (i < input.length()) {
		char c = input.charAt(i);
		if (i <= input.length() - 3 && c == '%'
				&& (h1 = getHex(input.charAt(i + 1))) >= 0
				&& (h2 = getHex(input.charAt(i + 2))) >= 0) {
			if (sb == null) {
				sb = new StringBuilder(input.length());
				if (i > 0) {
					sb.append(input.substring(0, i));
				}
			}
			int b = ((h1 << 4) + h2) & 0xff;
			if (pctUtf8SeqStart < 0 && b < 0x80) { // plain ascii
				sb.append((char) b);
			} else {
				if (pctUtf8SeqStart < 0) {
					pctUtf8SeqStart = i;
					if (bbuf == null) {
						bbuf = ByteBuffer
								.allocate((input.length() - i) / 3);
					}
				}
				bbuf.put((byte) b);
			}
			i += 3;
		} else {
			if (pctUtf8SeqStart >= 0) {
				if (utf8decoder == null) {
					utf8decoder = UTF8().newDecoder();
				}
				appendDecodedPctUtf8(sb, bbuf, input, pctUtf8SeqStart, i,
						utf8decoder);
				pctUtf8SeqStart = -1;
				bbuf.clear();
			}
			if (sb != null) {
				sb.append(c);
			}
			i++;
		}
	}
	if (pctUtf8SeqStart >= 0) {
		if (utf8decoder == null) {
			utf8decoder = UTF8().newDecoder();
		}
		appendDecodedPctUtf8(sb, bbuf, input, pctUtf8SeqStart, i,
				utf8decoder);
	}

	if (sb != null) {
		return sb.toString();
	} else {
		return input;
	}
}
 
源代码15 项目: TencentKona-8   文件: Lock.java
static void runLockSlave(int port) throws Exception {

        // establish connection to parent
        SocketChannel sc = SocketChannel.open(new InetSocketAddress(port));
        ByteBuffer buf = ByteBuffer.allocateDirect(1024);

        FileChannel fc = null;
        FileLock fl = null;
        try {
            for (;;) {

                // read command (ends with ";")
                buf.clear();
                int n, last = 0;
                do {
                    n = sc.read(buf);
                    if (n < 0)
                        return;
                    if (n == 0)
                        throw new AssertionError();
                    last += n;
                } while (buf.get(last-1) != TERMINATOR);

                // decode into command and optional parameter
                buf.flip();
                String s = Charset.defaultCharset().decode(buf).toString();
                int sp = s.indexOf(" ");
                String cmd = (sp < 0) ? s.substring(0, s.length()-1) :
                    s.substring(0, sp);
                String param = (sp < 0) ? "" : s.substring(sp+1, s.length()-1);

                // execute
                if (cmd.equals(OPEN_CMD)) {
                    if (fc != null)
                        throw new RuntimeException("File already open");
                    fc = FileChannel.open(Paths.get(param),READ, WRITE);
                }
                if (cmd.equals(CLOSE_CMD)) {
                    if (fc == null)
                        throw new RuntimeException("No file open");
                    fc.close();
                    fc = null;
                    fl = null;
                }
                if (cmd.equals(LOCK_CMD)) {
                    if (fl != null)
                        throw new RuntimeException("Already holding lock");

                    if (param.length() == 0) {
                        fl = fc.lock();
                    } else {
                        String[] values = param.split(",");
                        if (values.length != 3)
                            throw new RuntimeException("Lock parameter invalid");
                        long position = Long.parseLong(values[0]);
                        long size = Long.parseLong(values[1]);
                        boolean shared = Boolean.parseBoolean(values[2]);
                        fl = fc.lock(position, size, shared);
                    }
                }

                if (cmd.equals(UNLOCK_CMD)) {
                    if (fl == null)
                        throw new RuntimeException("Not holding lock");
                    fl.release();
                    fl = null;
                }

                // send reply
                byte[] reply = { TERMINATOR };
                n = sc.write(ByteBuffer.wrap(reply));
            }

        } finally {
            sc.close();
            if (fc != null) fc.close();
        }
    }
 
源代码16 项目: neoscada   文件: DataFileAccessorImpl.java
@Override
public void forwardCorrect ( final double value, final Date afterDate ) throws Exception
{
    final long startTimestamp = afterDate.getTime ();

    final long position = this.channel.position ();
    try
    {
        this.channel.position ( HEADER_SIZE );

        final ByteBuffer buffer = ByteBuffer.allocate ( ENTRY_SIZE );

        while ( safeRead ( buffer ) == ENTRY_SIZE )
        {
            buffer.flip ();

            final double entryValue = buffer.getDouble ();
            final long entryTimestamp = buffer.getLong ();
            final byte flags = buffer.get ();

            logger.debug ( "Checking value - flag: {}", flags );

            if ( ( flags & FLAG_HEARTBEAT ) == 0 && ( flags & FLAG_DELETED ) == 0 )
            {
                if ( entryTimestamp > startTimestamp )
                {
                    logger.info ( "Rewriting history - delete - timestamp: {}, value: {}", entryTimestamp, entryValue );
                    // replace the flag value, mark as deleted
                    // the flag is one byte behind the current position
                    this.channel.position ( this.channel.position () - 1 );
                    this.channel.write ( ByteBuffer.wrap ( new byte[] { (byte) ( flags | FLAG_DELETED ) } ) );
                }
            }
            buffer.clear ();
        }

    }
    finally
    {
        this.channel.position ( position );
        logger.debug ( "Returned to position: {}", position );
    }
}
 
源代码17 项目: kylin-on-parquet-v2   文件: SparkFactDistinct.java
@Override
public Iterator<Tuple2<SelfDefineSortableKey, Text>> call(Iterator<String[]> rowIterator) throws Exception {
    if (initialized == false) {
        synchronized (SparkFactDistinct.class) {
            if (initialized == false) {
                init();
            }
        }
    }

    List<Tuple2<SelfDefineSortableKey, Text>> result = Lists.newArrayList();

    int rowCount = 0;

    while (rowIterator.hasNext()) {
        String[] row = rowIterator.next();
        bytesWritten.add(countSizeInBytes(row));

        for (int i = 0; i < allCols.size(); i++) {
            String fieldValue = row[columnIndex[i]];
            if (fieldValue == null || keyValueBuilder.isNull(fieldValue))
                continue;

            final DataType type = allCols.get(i).getType();

            //for dic column, de dup before write value; for dim not dic column, hold util doCleanup()
            if (dictColDeduper.isDictCol(i)) {
                if (dictColDeduper.add(i, fieldValue)) {
                    addFieldValue(type, i, fieldValue, result);
                }
            } else {
                DimensionRangeInfo old = dimensionRangeInfoMap.get(i);
                if (old == null) {
                    old = new DimensionRangeInfo(fieldValue, fieldValue);
                    dimensionRangeInfoMap.put(i, old);
                } else {
                    old.setMax(type.getOrder().max(old.getMax(), fieldValue));
                    old.setMin(type.getOrder().min(old.getMin(), fieldValue));
                }
            }
        }

        if (rowCount % 100 < samplingPercent) {
            cuboidStatCalculator.putRow(row);
        }

        if (rowCount % 100 == 0) {
            dictColDeduper.resetIfShortOfMem();
        }

        rowCount++;
    }

    ByteBuffer hllBuf = ByteBuffer.allocate(BufferedMeasureCodec.DEFAULT_BUFFER_SIZE);

    // output each cuboid's hll to reducer, key is 0 - cuboidId
    Long[] cuboidIds = cuboidStatCalculator.getCuboidIds();
    HLLCounter[] cuboidsHLL = cuboidStatCalculator.getHLLCounters();
    HLLCounter hll;

    for (int i = 0; i < cuboidIds.length; i++) {
        hll = cuboidsHLL[i];
        tmpbuf.clear();
        tmpbuf.put((byte) FactDistinctColumnsReducerMapping.MARK_FOR_HLL_COUNTER); // one byte
        tmpbuf.putLong(cuboidIds[i]);
        Text outputKey = new Text();
        Text outputValue = new Text();
        SelfDefineSortableKey sortableKey = new SelfDefineSortableKey();

        outputKey.set(tmpbuf.array(), 0, tmpbuf.position());
        hllBuf.clear();
        hll.writeRegisters(hllBuf);
        outputValue.set(hllBuf.array(), 0, hllBuf.position());

        sortableKey.init(outputKey, (byte) 0);

        result.add(new Tuple2<SelfDefineSortableKey, Text>(sortableKey, outputValue));
    }

    for (Map.Entry<Integer, DimensionRangeInfo> entry : dimensionRangeInfoMap.entrySet()) {
        int colIndex = entry.getKey();
        DimensionRangeInfo rangeInfo = entry.getValue();
        DataType dataType = allCols.get(colIndex).getType();
        addFieldValue(dataType, colIndex, rangeInfo.getMin(), result);
        addFieldValue(dataType, colIndex, rangeInfo.getMax(), result);
    }

    return result.iterator();
}
 
源代码18 项目: lams   文件: ExportControlled.java
/**
 * Perform the handshaking step of the TLS connection. We use the `sslEngine' along with the `channel' to exchange messages with the server to setup an
 * encrypted channel.
 * 
 * @param sslEngine
 *            {@link SSLEngine}
 * @param channel
 *            {@link AsynchronousSocketChannel}
 * @throws SSLException
 *             in case of handshake error
 */
private static void performTlsHandshake(SSLEngine sslEngine, AsynchronousSocketChannel channel) throws SSLException {
    sslEngine.beginHandshake();
    HandshakeStatus handshakeStatus = sslEngine.getHandshakeStatus();

    // Create byte buffers to use for holding application data
    int packetBufferSize = sslEngine.getSession().getPacketBufferSize();
    ByteBuffer myNetData = ByteBuffer.allocate(packetBufferSize);
    ByteBuffer peerNetData = ByteBuffer.allocate(packetBufferSize);
    int appBufferSize = sslEngine.getSession().getApplicationBufferSize();
    ByteBuffer myAppData = ByteBuffer.allocate(appBufferSize);
    ByteBuffer peerAppData = ByteBuffer.allocate(appBufferSize);

    SSLEngineResult res = null;

    while (handshakeStatus != HandshakeStatus.FINISHED && handshakeStatus != HandshakeStatus.NOT_HANDSHAKING) {
        switch (handshakeStatus) {
            case NEED_WRAP:
                myNetData.clear();
                res = sslEngine.wrap(myAppData, myNetData);
                handshakeStatus = res.getHandshakeStatus();
                switch (res.getStatus()) {
                    case OK:
                        myNetData.flip();
                        write(channel, myNetData);
                        break;
                    case BUFFER_OVERFLOW:
                    case BUFFER_UNDERFLOW:
                    case CLOSED:
                        throw new CJCommunicationsException("Unacceptable SSLEngine result: " + res);
                }
                break;
            case NEED_UNWRAP:
                peerNetData.flip(); // Process incoming handshaking data
                res = sslEngine.unwrap(peerNetData, peerAppData);
                handshakeStatus = res.getHandshakeStatus();
                switch (res.getStatus()) {
                    case OK:
                        peerNetData.compact();
                        break;
                    case BUFFER_OVERFLOW:
                        // Check if we need to enlarge the peer application data buffer.
                        final int newPeerAppDataSize = sslEngine.getSession().getApplicationBufferSize();
                        if (newPeerAppDataSize > peerAppData.capacity()) {
                            // enlarge the peer application data buffer
                            ByteBuffer newPeerAppData = ByteBuffer.allocate(newPeerAppDataSize);
                            newPeerAppData.put(peerAppData);
                            newPeerAppData.flip();
                            peerAppData = newPeerAppData;
                        } else {
                            peerAppData.compact();
                        }
                        break;
                    case BUFFER_UNDERFLOW:
                        // Check if we need to enlarge the peer network packet buffer
                        final int newPeerNetDataSize = sslEngine.getSession().getPacketBufferSize();
                        if (newPeerNetDataSize > peerNetData.capacity()) {
                            // enlarge the peer network packet buffer
                            ByteBuffer newPeerNetData = ByteBuffer.allocate(newPeerNetDataSize);
                            newPeerNetData.put(peerNetData);
                            newPeerNetData.flip();
                            peerNetData = newPeerNetData;
                        } else {
                            peerNetData.compact();
                        }
                        // obtain more inbound network data and then retry the operation
                        if (read(channel, peerNetData) < 0) {
                            throw new CJCommunicationsException("Server does not provide enough data to proceed with SSL handshake.");
                        }
                        break;
                    case CLOSED:
                        throw new CJCommunicationsException("Unacceptable SSLEngine result: " + res);
                }
                break;

            case NEED_TASK:
                sslEngine.getDelegatedTask().run();
                handshakeStatus = sslEngine.getHandshakeStatus();
                break;
            case FINISHED:
            case NOT_HANDSHAKING:
                break;
        }
    }
}
 
源代码19 项目: jmonkeyengine   文件: MipMapGenerator.java
public static void generateMipMaps(Image image){
    BufferedImage original = ImageToAwt.convert(image, false, true, 0);
    int width = original.getWidth();
    int height = original.getHeight();
    int level = 0;

    BufferedImage current = original;
    AWTLoader loader = new AWTLoader();
    ArrayList<ByteBuffer> output = new ArrayList<ByteBuffer>();
    int totalSize = 0;
    Format format = null;
    
    while (height >= 1 || width >= 1){
        Image converted = loader.load(current, false);
        format = converted.getFormat();
        output.add(converted.getData(0));
        totalSize += converted.getData(0).capacity();

        if(height == 1 || width == 1) {
          break;
        }

        level++;

        height /= 2;
        width /= 2;

        current = scaleDown(current, width, height);
    }

    ByteBuffer combinedData = BufferUtils.createByteBuffer(totalSize);
    int[] mipSizes = new int[output.size()];
    for (int i = 0; i < output.size(); i++){
        ByteBuffer data = output.get(i);
        data.clear();
        combinedData.put(data);
        mipSizes[i] = data.capacity();
    }
    combinedData.flip();

    // insert mip data into image
    image.setData(0, combinedData);
    image.setMipMapSizes(mipSizes);
    image.setFormat(format);
}
 
源代码20 项目: database   文件: TestCase3.java
/**
 * Fill the buffer with a random run length of random data starting at a
 * random offset.
 * 
 * @param b
 *            The buffer.
 */
protected void fillBufferWithRandomData(final ByteBuffer b) {

    final int capacity = b.capacity();

    b.clear();
    
    // starting offset.
    final int off = r.nextInt(capacity / 2);
    
    // run length (may be zero).
    final int len = r.nextInt(capacity - off + 1) - 1;
    
    if (log.isInfoEnabled())
        log.info("off=" + off + ", len=" + len + ", off+len=" + (off + len)
                + ", capacity=" + capacity);

    final byte[] a = new byte[len];
    
    // random byte[] of that length.
    r.nextBytes(a);
    
    // setup the view of the slice on the buffer.
    b.limit(off + len);
    b.position(off);
    
    // copy random byte[] into the buffer.
    b.put(a);
    
    // prepare the buffer for reading.
    b.flip();
    
}