org.apache.hadoop.io.DataOutputBuffer#close ( )源码实例Demo

下面列出了org.apache.hadoop.io.DataOutputBuffer#close ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: gemfirexd-oss   文件: SequenceFile.java
/** Read a compressed buffer */
private synchronized void readBuffer(DataInputBuffer buffer, 
                                     CompressionInputStream filter) throws IOException {
  // Read data into a temporary buffer
  DataOutputBuffer dataBuffer = new DataOutputBuffer();

  try {
    int dataBufferLength = WritableUtils.readVInt(in);
    dataBuffer.write(in, dataBufferLength);
  
    // Set up 'buffer' connected to the input-stream
    buffer.reset(dataBuffer.getData(), 0, dataBuffer.getLength());
  } finally {
    dataBuffer.close();
  }

  // Reset the codec
  filter.resetState();
}
 
源代码2 项目: gemfirexd-oss   文件: SequenceFile.java
/** Read a compressed buffer */
private synchronized void readBuffer(DataInputBuffer buffer, 
                                     CompressionInputStream filter) throws IOException {
  // Read data into a temporary buffer
  DataOutputBuffer dataBuffer = new DataOutputBuffer();

  try {
    int dataBufferLength = WritableUtils.readVInt(in);
    dataBuffer.write(in, dataBufferLength);
  
    // Set up 'buffer' connected to the input-stream
    buffer.reset(dataBuffer.getData(), 0, dataBuffer.getLength());
  } finally {
    dataBuffer.close();
  }

  // Reset the codec
  filter.resetState();
}
 
源代码3 项目: phoenix   文件: TestClientKeyValueLocal.java
private void validate(KeyValue kv, byte[] row, byte[] family, byte[] qualifier, long ts,
    Type type, byte[] value) throws IOException {
  DataOutputBuffer out = new DataOutputBuffer();
  kv.write(out);
  out.close();
  byte[] data = out.getData();
  // read it back in
  KeyValue read = new KeyValue();
  DataInputBuffer in = new DataInputBuffer();
  in.reset(data, data.length);
  read.readFields(in);
  in.close();

  // validate that its the same
  assertTrue("Row didn't match!", Bytes.equals(row, read.getRow()));
  assertTrue("Family didn't match!", Bytes.equals(family, read.getFamily()));
  assertTrue("Qualifier didn't match!", Bytes.equals(qualifier, read.getQualifier()));
  assertTrue("Value didn't match!", Bytes.equals(value, read.getValue()));
  assertEquals("Timestamp didn't match", ts, read.getTimestamp());
  assertEquals("Type didn't match", type.getCode(), read.getType());
}
 
源代码4 项目: spliceengine   文件: HFilesystemAdmin.java
private static byte[] toByteArray(Writable... writables) {
    final DataOutputBuffer out = new DataOutputBuffer();
    try {
        for(Writable w : writables) {
            w.write(out);
        }
        out.close();
    } catch (IOException e) {
        throw new RuntimeException("Fail to convert writables to a byte array",e);
    }
    byte[] bytes = out.getData();
    if (bytes.length == out.getLength()) {
        return bytes;
    }
    byte[] result = new byte[out.getLength()];
    System.arraycopy(bytes, 0, result, 0, out.getLength());
    return result;
}
 
源代码5 项目: hadoop   文件: TestEditsDoubleBuffer.java
@Test
public void testDoubleBuffer() throws IOException {
  EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
  
  assertTrue(buf.isFlushed());
  byte[] data = new byte[100];
  buf.writeRaw(data, 0, data.length);
  assertEquals("Should count new data correctly",
      data.length, buf.countBufferedBytes());

  assertTrue("Writing to current buffer should not affect flush state",
      buf.isFlushed());

  // Swap the buffers
  buf.setReadyToFlush();
  assertEquals("Swapping buffers should still count buffered bytes",
      data.length, buf.countBufferedBytes());
  assertFalse(buf.isFlushed());
 
  // Flush to a stream
  DataOutputBuffer outBuf = new DataOutputBuffer();
  buf.flushTo(outBuf);
  assertEquals(data.length, outBuf.getLength());
  assertTrue(buf.isFlushed());
  assertEquals(0, buf.countBufferedBytes());
  
  // Write some more
  buf.writeRaw(data, 0, data.length);
  assertEquals("Should count new data correctly",
      data.length, buf.countBufferedBytes());
  buf.setReadyToFlush();
  buf.flushTo(outBuf);
  
  assertEquals(data.length * 2, outBuf.getLength());
  
  assertEquals(0, buf.countBufferedBytes());

  outBuf.close();
}
 
源代码6 项目: big-c   文件: TestEditsDoubleBuffer.java
@Test
public void testDoubleBuffer() throws IOException {
  EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
  
  assertTrue(buf.isFlushed());
  byte[] data = new byte[100];
  buf.writeRaw(data, 0, data.length);
  assertEquals("Should count new data correctly",
      data.length, buf.countBufferedBytes());

  assertTrue("Writing to current buffer should not affect flush state",
      buf.isFlushed());

  // Swap the buffers
  buf.setReadyToFlush();
  assertEquals("Swapping buffers should still count buffered bytes",
      data.length, buf.countBufferedBytes());
  assertFalse(buf.isFlushed());
 
  // Flush to a stream
  DataOutputBuffer outBuf = new DataOutputBuffer();
  buf.flushTo(outBuf);
  assertEquals(data.length, outBuf.getLength());
  assertTrue(buf.isFlushed());
  assertEquals(0, buf.countBufferedBytes());
  
  // Write some more
  buf.writeRaw(data, 0, data.length);
  assertEquals("Should count new data correctly",
      data.length, buf.countBufferedBytes());
  buf.setReadyToFlush();
  buf.flushTo(outBuf);
  
  assertEquals(data.length * 2, outBuf.getLength());
  
  assertEquals(0, buf.countBufferedBytes());

  outBuf.close();
}
 
源代码7 项目: RDFS   文件: AvatarNode.java
/**
 * Create an empty edits log
 */
static void createEditsFile(String editDir) throws IOException {
  File editfile = new File(editDir + EDITSFILE);
  FileOutputStream fp = new FileOutputStream(editfile);
  DataOutputBuffer buf = new DataOutputBuffer(1024);
  buf.writeInt(FSConstants.LAYOUT_VERSION);
  buf.writeTo(fp);
  buf.close();
  fp.close();
}
 
源代码8 项目: tez   文件: TestShuffleInputEventHandlerImpl.java
private InputContext createInputContext() throws IOException {
  DataOutputBuffer port_dob = new DataOutputBuffer();
  port_dob.writeInt(PORT);
  final ByteBuffer shuffleMetaData = ByteBuffer.wrap(port_dob.getData(), 0, port_dob.getLength());
  port_dob.close();

  ExecutionContext executionContext = mock(ExecutionContext.class);
  doReturn(HOST).when(executionContext).getHostName();

  InputContext inputContext = mock(InputContext.class);
  doReturn(new TezCounters()).when(inputContext).getCounters();
  doReturn("sourceVertex").when(inputContext).getSourceVertexName();
  doReturn(shuffleMetaData).when(inputContext)
      .getServiceProviderMetaData(conf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID,
          TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT));
  doReturn(executionContext).when(inputContext).getExecutionContext();
  when(inputContext.createTezFrameworkExecutorService(anyInt(), anyString())).thenAnswer(
      new Answer<ExecutorService>() {
        @Override
        public ExecutorService answer(InvocationOnMock invocation) throws Throwable {
          return sharedExecutor.createExecutorService(
              invocation.getArgumentAt(0, Integer.class),
              invocation.getArgumentAt(1, String.class));
        }
      });
  return inputContext;
}
 
源代码9 项目: tez   文件: TestShuffleManager.java
private InputContext createInputContext() throws IOException {
  DataOutputBuffer port_dob = new DataOutputBuffer();
  port_dob.writeInt(PORT);
  final ByteBuffer shuffleMetaData = ByteBuffer.wrap(port_dob.getData(), 0,
      port_dob.getLength());
  port_dob.close();

  ExecutionContext executionContext = mock(ExecutionContext.class);
  doReturn(FETCHER_HOST).when(executionContext).getHostName();

  InputContext inputContext = mock(InputContext.class);
  doReturn(new TezCounters()).when(inputContext).getCounters();
  doReturn("sourceVertex").when(inputContext).getSourceVertexName();
  doReturn(shuffleMetaData).when(inputContext)
      .getServiceProviderMetaData(conf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID,
          TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT));
  doReturn(executionContext).when(inputContext).getExecutionContext();
  when(inputContext.createTezFrameworkExecutorService(anyInt(), anyString())).thenAnswer(
      new Answer<ExecutorService>() {
        @Override
        public ExecutorService answer(InvocationOnMock invocation) throws Throwable {
          return sharedExecutor.createExecutorService(
              invocation.getArgumentAt(0, Integer.class),
              invocation.getArgumentAt(1, String.class));
        }
      });
  return inputContext;
}
 
源代码10 项目: tez   文件: TestEntityDescriptor.java
public void testSingularWrite(InputDescriptor entityDescriptor, InputDescriptor deserialized, UserPayload payload,
                              String confVal) throws IOException {
  DataOutputBuffer out = new DataOutputBuffer();
  entityDescriptor.write(out);
  out.close();
  ByteArrayOutputStream bos = new ByteArrayOutputStream(out.getData().length);
  bos.write(out.getData());

  Mockito.verify(entityDescriptor).writeSingular(eq(out), any(ByteBuffer.class));
  deserialized.readFields(new DataInputStream(new ByteArrayInputStream(bos.toByteArray())));
  verifyResults(entityDescriptor, deserialized, payload, confVal);
}
 
源代码11 项目: incubator-retired-blur   文件: ProtoSerializer.java
public static void main(String[] args) throws ParseException, IOException {

    QueryParser parser = new QueryParser(Version.LUCENE_40, "", new StandardAnalyzer(Version.LUCENE_40));

    Query query = parser.parse("a:v1 b:v2 c:v3~ c:asda*asda");
    
    SuperQuery superQuery = new SuperQuery(query,ScoreType.SUPER,new Term("_primedoc_"));

    QueryWritable queryWritable = new QueryWritable(superQuery);
    DataOutputBuffer buffer = new DataOutputBuffer();
    queryWritable.write(buffer);
    buffer.close();

    System.out.println(new String(buffer.getData(), 0, buffer.getLength()));

    QueryWritable qw = new QueryWritable();

    DataInputBuffer in = new DataInputBuffer();
    in.reset(buffer.getData(), 0, buffer.getLength());
    qw.readFields(in);

    System.out.println("------------");
    
    System.out.println(qw.getQuery());
    
    System.out.println("------------");

    while (true) {
      run(superQuery);
    }
  }