org.apache.hadoop.hbase.client.HTablePool#org.apache.hadoop.hbase.regionserver.wal.HLog源码实例Demo

下面列出了org.apache.hadoop.hbase.client.HTablePool#org.apache.hadoop.hbase.regionserver.wal.HLog 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: phoenix-tephra   文件: TransactionProcessorTest.java
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
 
源代码2 项目: phoenix-tephra   文件: TransactionProcessorTest.java
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
 
源代码3 项目: hbase-secondary-index   文件: IndexedRegion.java
@SuppressWarnings("deprecation")
public IndexedRegion(final Path basedir, final HLog log, final FileSystem fs,
      final Configuration conf, final HRegionInfo regionInfo,
      final FlushRequester flushListener) throws IOException {
    super(basedir, log, fs, conf, regionInfo, flushListener);
    this.indexTableDescriptor = new IndexedTableDescriptor(
        regionInfo.getTableDesc());
    this.conf = conf;
    this.tablePool = new HTablePool();
  }
 
源代码4 项目: hbase-secondary-index   文件: THLogSplitter.java
@Override
protected Reader getReader(final FileSystem fs, final Path logfile, final Configuration conf) throws IOException {
    if (isTrxLog(logfile)) {
        return THLog.getReader(fs, logfile, conf);
    }
    return HLog.getReader(fs, logfile, conf);
}
 
源代码5 项目: hbase-secondary-index   文件: THLog.java
/**
 * Get a writer for the WAL.
 * 
 * @param path
 * @param conf
 * @return A WAL writer. Close when done with it.
 * @throws IOException
 */
public static Writer createWriter(final FileSystem fs, final Path path,
		final Configuration conf) throws IOException {
	try {
		HLog.Writer writer = new SequenceFileLogWriter(THLogKey.class);
		writer.init(fs, path, conf);
		return writer;
	} catch (Exception e) {
		IOException ie = new IOException("cannot get log writer");
		ie.initCause(e);
		throw ie;
	}
}
 
源代码6 项目: hbase-secondary-index   文件: THLog.java
/**
 * Get a reader for the WAL.
 * 
 * @param fs
 * @param path
 * @param conf
 * @return A WAL reader. Close when done with it.
 * @throws IOException
 */
public static Reader getReader(final FileSystem fs, final Path path,
		final Configuration conf) throws IOException {
	try {
		HLog.Reader reader = new SequenceFileLogReader(THLogKey.class);
		reader.init(fs, path, conf);
		return reader;
	} catch (Exception e) {
		IOException ie = new IOException("cannot get log reader");
		ie.initCause(e);
		throw ie;
	}
}
 
private void initializeTHLog() throws IOException {
	// We keep in the same directory as the core HLog.
	Path oldLogDir = new Path(getRootDir(), HLogSplitter.RECOVERED_EDITS);
	Path logdir = new Path(getRootDir(),
			HLog.getHLogDirectoryName(super.getServerName().getServerName()));

	trxHLog = new THLog(getFileSystem(), logdir, oldLogDir, conf, null);
}
 
源代码8 项目: phoenix-tephra   文件: TransactionProcessorTest.java
@Override
public HLog getWAL(HRegionInfo regionInfo) throws IOException {
  return null;
}
 
源代码9 项目: phoenix-tephra   文件: TransactionProcessorTest.java
@Override
public HLog getWAL(HRegionInfo regionInfo) throws IOException {
  return null;
}
 
源代码10 项目: phoenix   文件: IndexedKeyValue.java
/**
 * This is a KeyValue that shouldn't actually be replayed, so we always mark it as an {@link HLog#METAFAMILY} so it
 * isn't replayed via the normal replay mechanism
 */
@Override
public boolean matchingFamily(final byte[] family) {
    return Bytes.equals(family, HLog.METAFAMILY);
}
 
源代码11 项目: hbase-secondary-index   文件: THLogSplitter.java
private synchronized Writer getCoreWriter() throws IOException {
    if (coreWriter == null) {
        coreWriter = HLog.createWriter(fs, logfile, conf);
    }
    return coreWriter;
}
 
/**
 * @param basedir
 * @param log
 * @param fs
 * @param conf
 * @param regionInfo
 * @param flushListener
 */
public TransactionalRegion(final Path basedir, final HLog log, final FileSystem fs, final Configuration conf,
        final HRegionInfo regionInfo, final FlushRequester flushListener) {
    oldTransactionFlushTrigger = conf.getInt(OLD_TRANSACTION_FLUSH, DEFAULT_OLD_TRANSACTION_FLUSH);
}