类org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint的API类实例代码及写法,或者点击链接到github查看源代码。

/**
 * @throws java.lang.Exception
 */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  ROWS[0] = ROW;
  ROWS[1] = ROW1;
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    MultiRowMutationEndpoint.class.getName());
  conf.setInt("hbase.regionserver.handler.count", 20);
  conf.setInt("hbase.bucketcache.size", 400);
  conf.setStrings(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
  conf.setInt("hbase.hstore.compactionThreshold", 7);
  conf.setFloat("hfile.block.cache.size", 0.2f);
  conf.setFloat("hbase.regionserver.global.memstore.size", 0.1f);
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0);// do not retry
  conf.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 500000);
  FAMILIES_1[0] = FAMILY;
  TEST_UTIL.startMiniCluster(SLAVES);
  compactReadLatch = new CountDownLatch(1);
}
 
源代码2 项目: hbase   文件: TestConnection.java
@Test(expected = DoNotRetryIOException.class)
public void testClosedConnection() throws ServiceException, Throwable {
  byte[] family = Bytes.toBytes("cf");
  TableName tableName = TableName.valueOf(name.getMethodName());
  TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName)
    .setCoprocessor(MultiRowMutationEndpoint.class.getName())
    .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
  TEST_UTIL.getAdmin().createTable(builder.build());

  Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
  // cache the location
  try (Table table = conn.getTable(tableName)) {
    table.get(new Get(Bytes.toBytes(0)));
  } finally {
    conn.close();
  }
  Batch.Call<MultiRowMutationService, MutateRowsResponse> callable = service -> {
    throw new RuntimeException("Should not arrive here");
  };
  conn.getTable(tableName).coprocessorService(MultiRowMutationService.class,
    HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, callable);
}
 
源代码3 项目: hbase   文件: TestBlockEvictionFromClient.java
/**
 * @throws java.lang.Exception
 */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  ROWS[0] = ROW;
  ROWS[1] = ROW1;
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      MultiRowMutationEndpoint.class.getName());
  conf.setInt("hbase.regionserver.handler.count", 20);
  conf.setInt("hbase.bucketcache.size", 400);
  conf.setStrings(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
  conf.setFloat("hfile.block.cache.size", 0.2f);
  conf.setFloat("hbase.regionserver.global.memstore.size", 0.1f);
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0);// do not retry
  conf.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 5000);
  FAMILIES_1[0] = FAMILY;
  TEST_UTIL.startMiniCluster(SLAVES);
}
 
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName());
  TestFromClientSideScanExcpetion.setUpBeforeClass();
}
 
源代码5 项目: hbase   文件: TestIncrementsFromClientSide.java
@BeforeClass
public static void beforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      MultiRowMutationEndpoint.class.getName());
  // We need more than one region server in this test
  TEST_UTIL.startMiniCluster(SLAVES);
}
 
源代码6 项目: phoenix   文件: ConnectionQueryServicesImpl.java
private void addCoprocessors(byte[] tableName, HTableDescriptor descriptor, PTableType tableType) throws SQLException {
    // The phoenix jar must be available on HBase classpath
    int priority = props.getInt(QueryServices.COPROCESSOR_PRIORITY_ATTRIB, QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY);
    try {
        if (!descriptor.hasCoprocessor(ScanRegionObserver.class.getName())) {
            descriptor.addCoprocessor(ScanRegionObserver.class.getName(), null, priority, null);
        }
        if (!descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName())) {
            descriptor.addCoprocessor(UngroupedAggregateRegionObserver.class.getName(), null, priority, null);
        }
        if (!descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName())) {
            descriptor.addCoprocessor(GroupedAggregateRegionObserver.class.getName(), null, priority, null);
        }
        if (!descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName())) {
            descriptor.addCoprocessor(ServerCachingEndpointImpl.class.getName(), null, priority, null);
        }
        // TODO: better encapsulation for this
        // Since indexes can't have indexes, don't install our indexing coprocessor for indexes.
        // Also don't install on the SYSTEM.CATALOG and SYSTEM.STATS table because we use
        // all-or-none mutate class which break when this coprocessor is installed (PHOENIX-1318).
        if ((tableType != PTableType.INDEX && tableType != PTableType.VIEW)
                && !SchemaUtil.isMetaTable(tableName)
                && !SchemaUtil.isStatsTable(tableName)
                && !descriptor.hasCoprocessor(Indexer.class.getName())) {
            Map<String, String> opts = Maps.newHashMapWithExpectedSize(1);
            opts.put(CoveredColumnsIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName());
            Indexer.enableIndexing(descriptor, PhoenixIndexBuilder.class, opts, priority);
        }
        if (SchemaUtil.isStatsTable(tableName) && !descriptor.hasCoprocessor(MultiRowMutationEndpoint.class.getName())) {
            descriptor.addCoprocessor(MultiRowMutationEndpoint.class.getName(),
                    null, priority, null);
        }

        if (descriptor.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) != null
                && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(descriptor
                        .getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
            if (!descriptor.hasCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName())) {
                descriptor.addCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName(),
                    null, priority, null);
            }
        } else {
            if (!descriptor.hasCoprocessor(LocalIndexSplitter.class.getName())
                    && !SchemaUtil.isMetaTable(tableName)
                    && !SchemaUtil.isSequenceTable(tableName)) {
                descriptor.addCoprocessor(LocalIndexSplitter.class.getName(), null, priority, null);
            }
        }

        // Setup split policy on Phoenix metadata table to ensure that the key values of a Phoenix table
        // stay on the same region.
        if (SchemaUtil.isMetaTable(tableName)) {
            if (!descriptor.hasCoprocessor(MetaDataEndpointImpl.class.getName())) {
                descriptor.addCoprocessor(MetaDataEndpointImpl.class.getName(), null, priority, null);
            }
            if (!descriptor.hasCoprocessor(MetaDataRegionObserver.class.getName())) {
                descriptor.addCoprocessor(MetaDataRegionObserver.class.getName(), null, priority + 1, null);
            }
        } else if (SchemaUtil.isSequenceTable(tableName)) {
            if (!descriptor.hasCoprocessor(SequenceRegionObserver.class.getName())) {
                descriptor.addCoprocessor(SequenceRegionObserver.class.getName(), null, priority, null);
            }
        }
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    }
}
 
源代码7 项目: hbase   文件: FSTableDescriptors.java
private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf)
  throws IOException {
  // TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
  // the META table data goes to File mode BC only. Test how that affect the system. If too much,
  // we have to rethink about adding back the setCacheDataInL1 for META table CFs.
  return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
    .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
      .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
              HConstants.DEFAULT_HBASE_META_VERSIONS))
      .setInMemory(true)
      .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
              HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
      .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
      .setBloomFilterType(BloomType.ROWCOL)
      .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1)
      .build())
    .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY)
      .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
          HConstants.DEFAULT_HBASE_META_VERSIONS))
      .setInMemory(true)
      .setBlocksize(8 * 1024)
      .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
      .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1)
      .setBloomFilterType(BloomType.ROWCOL)
      .build())
    .setColumnFamily(ColumnFamilyDescriptorBuilder
      .newBuilder(HConstants.REPLICATION_BARRIER_FAMILY)
      .setMaxVersions(HConstants.ALL_VERSIONS)
      .setInMemory(true)
      .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
      .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1)
      .setBloomFilterType(BloomType.ROWCOL)
      .build())
    .setColumnFamily(ColumnFamilyDescriptorBuilder
      .newBuilder(HConstants.NAMESPACE_FAMILY)
      .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
              HConstants.DEFAULT_HBASE_META_VERSIONS))
      .setInMemory(true)
      .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
              HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
      .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
      .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1)
      .setBloomFilterType(BloomType.ROWCOL)
      .build())
    .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(
      MultiRowMutationEndpoint.class.getName())
      .setPriority(Coprocessor.PRIORITY_SYSTEM).build());
}
 
@Before
public void before() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName());
}
 
源代码9 项目: hbase   文件: TestFromClientSide5.java
public TestFromClientSide5(Class registry, int numHedgedReqs) throws Exception {
  initialize(registry, numHedgedReqs, MultiRowMutationEndpoint.class);
}
 
源代码10 项目: hbase   文件: TestFromClientSideWithCoprocessor4.java
public TestFromClientSideWithCoprocessor4(Class registry, int numHedgedReqs) throws Exception {
  initialize(registry, numHedgedReqs, NoOpScanPolicyObserver.class,
      MultiRowMutationEndpoint.class);
}
 
源代码11 项目: hbase   文件: TestFromClientSide.java
public TestFromClientSide(Class registry, int numHedgedReqs) throws Exception {
  initialize(registry, numHedgedReqs, MultiRowMutationEndpoint.class);
}
 
源代码12 项目: hbase   文件: TestFromClientSideWithCoprocessor.java
public TestFromClientSideWithCoprocessor(Class registry, int numHedgedReqs) throws Exception {
  initialize(registry, numHedgedReqs, NoOpScanPolicyObserver.class,
      MultiRowMutationEndpoint.class);
}
 
源代码13 项目: hbase   文件: TestFromClientSideWithCoprocessor5.java
public TestFromClientSideWithCoprocessor5(Class registry, int numHedgedReqs) throws Exception {
  initialize(registry, numHedgedReqs, NoOpScanPolicyObserver.class,
      MultiRowMutationEndpoint.class);
}
 
源代码14 项目: hbase   文件: TestFromClientSide4.java
public TestFromClientSide4(Class registry, int numHedgedReqs) throws Exception {
  initialize(registry, numHedgedReqs, MultiRowMutationEndpoint.class);
}
 
 类所在包
 类方法
 同包方法