org.apache.hadoop.hbase.client.HTable#setWriteBufferSize ( )源码实例Demo

下面列出了org.apache.hadoop.hbase.client.HTable#setWriteBufferSize ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: alchemy   文件: HBaseOutputFormat.java
@Override
public void open(int taskNumber, int numTasks) throws IOException {
    table = new HTable(conf, this.hbaseProperties.getTableName());
    if (this.hbaseProperties.getBufferSize() > 0) {
        table.setAutoFlushTo(false);
        table.setWriteBufferSize(this.hbaseProperties.getBufferSize());
    }
}
 
源代码2 项目: wifi   文件: HBaseTable.java
public static void put(String row, String column, String data)
		throws Exception {
	HTable table = new HTable(cfg, tableName);
	table.setAutoFlush(false);
	table.setWriteBufferSize(10 * 1024 * 1024);
	Put p1 = new Put(Bytes.toBytes(row));
	p1.add(Bytes.toBytes(familyName), Bytes.toBytes(column),
			Bytes.toBytes(data));
	table.put(p1);
	System.out.println("put '" + row + "','" + familyName + ":" + column
			+ "','" + data + "'");
}
 
源代码3 项目: wifi   文件: HBaseTable.java
public static void put(String row,String column,String data) throws Exception {
    HTable table = new HTable(cfg, tableName);
    table.setAutoFlush(false);
    table.setWriteBufferSize(10*1024*1024);
    Put p1=new Put(Bytes.toBytes(row));
    p1.add(Bytes.toBytes(familyName), Bytes.toBytes(column), Bytes.toBytes(data));
    table.put(p1);
    System.out.println("put '"+row+"','"+familyName+":"+column+"','"+data+"'");
}
 
源代码4 项目: bigdata-tutorial   文件: Mapper2HbaseDemo.java
@Override
protected void setup(Context context) throws IOException,
		InterruptedException {
	super.setup(context);
	conf = HBaseConfiguration.create(context.getConfiguration());
	conf.set("hbase.zookeeper.quorum", "zk1.hadoop,zk2.hadoop,zk3.hadoop");
	conf.set("hbase.zookeeper.property.clientPort", "2181");

	htable = new HTable(conf, "micmiu");
	htable.setAutoFlush(false);
	htable.setWriteBufferSize(12 * 1024 * 1024);//12M
	wal = true;
}
 
源代码5 项目: hiped2   文件: HBaseWriter.java
/**
 * The MapReduce driver - setup and launch the job.
 *
 * @param args the command-line arguments
 * @return the process exit code
 * @throws Exception if something goes wrong
 */
public int run(final String[] args) throws Exception {

  Cli cli = Cli.builder().setArgs(args).addOptions(CliCommonOpts.InputFileOption.values()).build();
  int result = cli.runCmd();

  if (result != 0) {
    return result;
  }

  File inputFile = new File(cli.getArgValueAsString(CliCommonOpts.InputFileOption.INPUT));

  Configuration conf = HBaseConfiguration.create();

  createTableAndColumn(conf, STOCKS_TABLE_NAME,
      STOCK_DETAILS_COLUMN_FAMILY_AS_BYTES);

  HTable htable = new HTable(conf, STOCKS_TABLE_NAME);
  htable.setAutoFlush(false);
  htable.setWriteBufferSize(1024 * 1024 * 12);


  SpecificDatumWriter<Stock> writer =
      new SpecificDatumWriter<Stock>();
  writer.setSchema(Stock.SCHEMA$);

  ByteArrayOutputStream bao = new ByteArrayOutputStream();
  BinaryEncoder encoder =
      EncoderFactory.get().directBinaryEncoder(bao, null);

  for (Stock stock: AvroStockUtils.fromCsvFile(inputFile)) {
    writer.write(stock, encoder);
    encoder.flush();

    byte[] rowkey = Bytes.add(
        Bytes.toBytes(stock.getSymbol().toString()),
        Bytes.toBytes(stock.getDate().toString()));

    byte[] stockAsAvroBytes = bao.toByteArray();

    Put put = new Put(rowkey);
    put.add(STOCK_DETAILS_COLUMN_FAMILY_AS_BYTES,
        STOCK_COLUMN_QUALIFIER_AS_BYTES,
        stockAsAvroBytes);

    htable.put(put);

    bao.reset();
  }

  htable.flushCommits();
  htable.close();
  System.out.println("done");

  return 0;
}