下面列出了怎么用org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost的API类实例代码及写法,或者点击链接到github查看源代码。
public static UserScanQueryMatcher create(Scan scan, ScanInfo scanInfo,
NavigableSet<byte[]> columns, long oldestUnexpiredTS, long now,
RegionCoprocessorHost regionCoprocessorHost) throws IOException {
boolean hasNullColumn =
!(columns != null && columns.size() != 0 && columns.first().length != 0);
Pair<DeleteTracker, ColumnTracker> trackers = getTrackers(regionCoprocessorHost, columns,
scanInfo, oldestUnexpiredTS, scan);
DeleteTracker deleteTracker = trackers.getFirst();
ColumnTracker columnTracker = trackers.getSecond();
if (scan.isRaw()) {
return RawScanQueryMatcher.create(scan, scanInfo, columnTracker, hasNullColumn,
oldestUnexpiredTS, now);
} else {
return NormalUserScanQueryMatcher.create(scan, scanInfo, columnTracker, deleteTracker,
hasNullColumn, oldestUnexpiredTS, now);
}
}
@Test
public void testPreStoreScannerOpen() throws IOException {
RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
Scan scan = new Scan();
scan.setTimeRange(TimeRange.INITIAL_MIN_TIMESTAMP, TimeRange.INITIAL_MAX_TIMESTAMP);
assertTrue("Scan is not for all time", scan.getTimeRange().isAllTime());
//SimpleRegionObserver is set to update the ScanInfo parameters if the passed-in scan
//is for all time. this lets us exercise both that the Scan is wired up properly in the coproc
//and that we can customize the metadata
ScanInfo oldScanInfo = getScanInfo();
HStore store = mock(HStore.class);
when(store.getScanInfo()).thenReturn(oldScanInfo);
ScanInfo newScanInfo = host.preStoreScannerOpen(store, scan);
verifyScanInfo(newScanInfo);
}
HRegion initHRegion (byte [] tableName, String callingMethod,
Configuration conf, byte [] ... families) throws IOException {
TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
new TableDescriptorBuilder.ModifyableTableDescriptor(TableName.valueOf(tableName));
for (byte[] family : families) {
tableDescriptor.setColumnFamily(
new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family));
}
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
Path path = new Path(DIR + callingMethod);
HRegion r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, tableDescriptor);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r,
Mockito.mock(RegionServerServices.class), conf);
r.setCoprocessorHost(host);
return r;
}
HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
byte[]... families) throws IOException {
TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
new TableDescriptorBuilder.ModifyableTableDescriptor(TableName.valueOf(tableName));
for (byte[] family : families) {
tableDescriptor.setColumnFamily(
new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family));
}
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
Path path = new Path(DIR + callingMethod);
WAL wal = HBaseTestingUtility.createWal(conf, path, info);
HRegion r = HRegion.createHRegion(info, path, conf, tableDescriptor, wal);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
return r;
}
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
// Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
Configuration conf = new HBaseTestingUtility().getConfiguration();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
Put put = new Put(ROW);
put.addColumn(A, A, A);
region.put(put);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
HBaseTestingUtility.closeRegionAndWAL(region);
}
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
// Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
Configuration conf = new HBaseTestingUtility().getConfiguration();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
// put a row and flush it to disk
Put put = new Put(ROW);
put.addColumn(A, A, A);
region.put(put);
region.flush(true);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
HBaseTestingUtility.closeRegionAndWAL(region);
}
@Test
public void testRegionCoprocessorHostDefaults() throws Exception {
Configuration conf = new Configuration(CONF);
HRegion region = mock(HRegion.class);
when(region.getRegionInfo()).thenReturn(REGIONINFO);
when(region.getTableDescriptor()).thenReturn(TABLEDESC);
RegionServerServices rsServices = mock(RegionServerServices.class);
systemCoprocessorLoaded.set(false);
tableCoprocessorLoaded.set(false);
new RegionCoprocessorHost(region, rsServices, conf);
assertEquals("System coprocessors loading default was not honored",
CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get());
assertEquals("Table coprocessors loading default was not honored",
CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED &&
CoprocessorHost.DEFAULT_USER_COPROCESSORS_ENABLED, tableCoprocessorLoaded.get());
}
@Test
public void testRegionCoprocessorHostAllDisabled() throws Exception {
Configuration conf = new Configuration(CONF);
conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, false);
HRegion region = mock(HRegion.class);
when(region.getRegionInfo()).thenReturn(REGIONINFO);
when(region.getTableDescriptor()).thenReturn(TABLEDESC);
RegionServerServices rsServices = mock(RegionServerServices.class);
systemCoprocessorLoaded.set(false);
tableCoprocessorLoaded.set(false);
new RegionCoprocessorHost(region, rsServices, conf);
assertFalse("System coprocessors should not have been loaded",
systemCoprocessorLoaded.get());
assertFalse("Table coprocessors should not have been loaded",
tableCoprocessorLoaded.get());
}
@Test
public void testRegionCoprocessorHostTableLoadingDisabled() throws Exception {
Configuration conf = new Configuration(CONF);
conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, true); // if defaults change
conf.setBoolean(CoprocessorHost.USER_COPROCESSORS_ENABLED_CONF_KEY, false);
HRegion region = mock(HRegion.class);
when(region.getRegionInfo()).thenReturn(REGIONINFO);
when(region.getTableDescriptor()).thenReturn(TABLEDESC);
RegionServerServices rsServices = mock(RegionServerServices.class);
systemCoprocessorLoaded.set(false);
tableCoprocessorLoaded.set(false);
new RegionCoprocessorHost(region, rsServices, conf);
assertTrue("System coprocessors should have been loaded",
systemCoprocessorLoaded.get());
assertFalse("Table coprocessors should not have been loaded",
tableCoprocessorLoaded.get());
}
/**
* Rough test that Coprocessor Environment is Read-Only.
* Just check a random CP and see that it returns a read-only config.
*/
@Test
public void testReadOnlyConfiguration() throws Exception {
Configuration conf = new Configuration(CONF);
HRegion region = mock(HRegion.class);
when(region.getRegionInfo()).thenReturn(REGIONINFO);
when(region.getTableDescriptor()).thenReturn(TABLEDESC);
RegionServerServices rsServices = mock(RegionServerServices.class);
RegionCoprocessorHost rcp = new RegionCoprocessorHost(region, rsServices, conf);
boolean found = false;
for (String cpStr: rcp.getCoprocessors()) {
CoprocessorEnvironment cpenv = rcp.findCoprocessorEnvironment(cpStr);
if (cpenv != null) {
found = true;
}
Configuration c = cpenv.getConfiguration();
thrown.expect(UnsupportedOperationException.class);
c.set("one.two.three", "four.five.six");
}
assertTrue("Should be at least one CP found", found);
}
@Override
public final RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> ctxt, final Scan scan, final RegionScanner innerScanner) throws IOException {
boolean copAbortOnError = ctxt.getEnvironment().getConfiguration().getBoolean(RegionCoprocessorHost.ABORT_ON_ERROR_KEY, RegionCoprocessorHost.DEFAULT_ABORT_ON_ERROR);
// never throw out exception that could abort region server
if (copAbortOnError) {
try {
return doPostScannerObserver(ctxt, scan, innerScanner);
} catch (Throwable e) {
LOG.error("Kylin Coprocessor Error", e);
return innerScanner;
}
} else {
return doPostScannerObserver(ctxt, scan, innerScanner);
}
}
private List<MasterObserver> getAccessControllers() throws IOException {
ArrayList<MasterObserver> oldAccessControllers = accessControllers.get();
if (oldAccessControllers == null) {
oldAccessControllers = new ArrayList<>();
RegionCoprocessorHost cpHost = this.env.getCoprocessorHost();
for (RegionCoprocessor cp : cpHost.findCoprocessors(RegionCoprocessor.class)) {
if (cp instanceof AccessControlService.Interface && cp instanceof MasterObserver) {
oldAccessControllers.add((MasterObserver)cp);
if(cp.getClass().getName().equals(org.apache.hadoop.hbase.security.access.AccessController.class.getName())) {
hbaseAccessControllerEnabled = true;
}
}
}
accessControllers.set(oldAccessControllers);
}
return accessControllers.get();
}
public static CompactionScanQueryMatcher create(ScanInfo scanInfo, ScanType scanType,
long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now,
byte[] dropDeletesFromRow, byte[] dropDeletesToRow,
RegionCoprocessorHost regionCoprocessorHost) throws IOException {
Pair<DeleteTracker, ColumnTracker> trackers = getTrackers(regionCoprocessorHost, null,
scanInfo,oldestUnexpiredTS, null);
DeleteTracker deleteTracker = trackers.getFirst();
ColumnTracker columnTracker = trackers.getSecond();
if (dropDeletesFromRow == null) {
if (scanType == ScanType.COMPACT_RETAIN_DELETES) {
if (scanInfo.isNewVersionBehavior()) {
return new IncludeAllCompactionQueryMatcher(scanInfo, deleteTracker, columnTracker,
readPointToUse, oldestUnexpiredTS, now);
} else {
return new MinorCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker,
readPointToUse, oldestUnexpiredTS, now);
}
} else {
return new MajorCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker,
readPointToUse, earliestPutTs, oldestUnexpiredTS, now);
}
} else {
return new StripeCompactionScanQueryMatcher(scanInfo, deleteTracker, columnTracker,
readPointToUse, earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow,
dropDeletesToRow);
}
}
@Test
public void testPreCompactScannerOpen() throws IOException {
RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
ScanInfo oldScanInfo = getScanInfo();
HStore store = mock(HStore.class);
when(store.getScanInfo()).thenReturn(oldScanInfo);
ScanInfo newScanInfo = host.preCompactScannerOpen(store, ScanType.COMPACT_DROP_DELETES,
mock(CompactionLifeCycleTracker.class), mock(CompactionRequest.class), mock(User.class));
verifyScanInfo(newScanInfo);
}
@Test
public void testPreFlushScannerOpen() throws IOException {
RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
ScanInfo oldScanInfo = getScanInfo();
HStore store = mock(HStore.class);
when(store.getScanInfo()).thenReturn(oldScanInfo);
ScanInfo newScanInfo = host.preFlushScannerOpen(store, mock(FlushLifeCycleTracker.class));
verifyScanInfo(newScanInfo);
}
@Test
public void testPreMemStoreCompactionCompactScannerOpen() throws IOException {
RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
ScanInfo oldScanInfo = getScanInfo();
HStore store = mock(HStore.class);
when(store.getScanInfo()).thenReturn(oldScanInfo);
ScanInfo newScanInfo = host.preMemStoreCompactionCompactScannerOpen(store);
verifyScanInfo(newScanInfo);
}
private RegionCoprocessorHost getRegionCoprocessorHost() {
// Make up an HRegion instance. Use the hbase:meta first region as our RegionInfo. Use
// hbase:meta table name for building the TableDescriptor our mock returns when asked schema
// down inside RegionCoprocessorHost. Pass in mocked RegionServerServices too.
RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
HRegion mockedHRegion = Mockito.mock(HRegion.class);
Mockito.when(mockedHRegion.getRegionInfo()).thenReturn(ri);
TableDescriptor td = TableDescriptorBuilder.newBuilder(ri.getTable()).build();
Mockito.when(mockedHRegion.getTableDescriptor()).thenReturn(td);
RegionServerServices mockedServices = Mockito.mock(RegionServerServices.class);
Configuration conf = HBaseConfiguration.create();
// Load our test coprocessor defined above.
conf.set(REGION_COPROCESSOR_CONF_KEY, TestRegionObserver.class.getName());
return new RegionCoprocessorHost(mockedHRegion, mockedServices, conf);
}
private void verifyMethodResult(Class<?> coprocessor, String methodName[], TableName tableName,
Object value[]) throws IOException {
try {
for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()) {
continue;
}
for (RegionInfo r : ProtobufUtil
.getOnlineRegions(t.getRegionServer().getRSRpcServices())) {
if (!r.getTable().equals(tableName)) {
continue;
}
RegionCoprocessorHost cph =
t.getRegionServer().getOnlineRegion(r.getRegionName()).getCoprocessorHost();
Coprocessor cp = cph.findCoprocessor(coprocessor.getName());
assertNotNull(cp);
for (int i = 0; i < methodName.length; ++i) {
Method m = coprocessor.getMethod(methodName[i]);
Object o = m.invoke(cp);
assertTrue("Result of " + coprocessor.getName() + "." + methodName[i]
+ " is expected to be " + value[i].toString() + ", while we get "
+ o.toString(), o.equals(value[i]));
}
}
}
} catch (Exception e) {
throw new IOException(e.toString());
}
}
/**
* Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to
* access a RegionServerServices instance. Assert the opposite too.
* Do it to RegionCoprocessors.
* @throws IOException
*/
@Test
public void testCoreRegionCoprocessor() throws IOException {
RegionCoprocessorHost rch = region.getCoprocessorHost();
RegionCoprocessorEnvironment env =
rch.load(null, NotCoreRegionCoprocessor.class.getName(), 0, HTU.getConfiguration());
assertFalse(env instanceof HasRegionServerServices);
env = rch.load(null, CoreRegionCoprocessor.class.getName(), 1, HTU.getConfiguration());
assertTrue(env instanceof HasRegionServerServices);
assertEquals(this.rss, ((HasRegionServerServices)env).getRegionServerServices());
}
HRegion reopenRegion(final HRegion closedRegion, Class<?> ... implClasses)
throws IOException {
//RegionInfo info = new RegionInfo(tableName, null, null, false);
HRegion r = HRegion.openHRegion(closedRegion, null);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
Configuration conf = TEST_UTIL.getConfiguration();
RegionCoprocessorHost host = new RegionCoprocessorHost(r,
Mockito.mock(RegionServerServices.class), conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load((Class<? extends RegionCoprocessor>) implClass, Coprocessor.PRIORITY_USER, conf);
}
// we need to manually call pre- and postOpen here since the
// above load() is not the real case for CP loading. A CP is
// expected to be loaded by default from 1) configuration; or 2)
// HTableDescriptor. If it's loaded after HRegion initialized,
// the pre- and postOpen() won't be triggered automatically.
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
HRegion initHRegion (TableName tableName, String callingMethod,
Configuration conf, Class<?> [] implClasses, byte [][] families)
throws IOException {
TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);
for (byte[] family : families) {
tableDescriptor.setColumnFamily(
new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family));
}
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
RegionInfo info = RegionInfoBuilder.newBuilder(tableName)
.setStartKey(null)
.setEndKey(null)
.setSplit(false)
.build();
Path path = new Path(DIR + callingMethod);
HRegion r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, tableDescriptor);
// this following piece is a hack.
RegionCoprocessorHost host =
new RegionCoprocessorHost(r, Mockito.mock(RegionServerServices.class), conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load((Class<? extends RegionCoprocessor>) implClass, Coprocessor.PRIORITY_USER, conf);
Coprocessor c = host.findCoprocessor(implClass.getName());
assertNotNull(c);
}
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
public void testRegionObserverStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(this.getClass().getSimpleName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A } ;
Configuration conf = TEST_UTIL.getConfiguration();
HRegion region = initHRegion(TABLE, getClass().getName(),
conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(ObserverA.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(ObserverB.class, Coprocessor.PRIORITY_USER, conf);
h.load(ObserverC.class, Coprocessor.PRIORITY_LOWEST, conf);
Put put = new Put(ROW);
put.addColumn(A, A, A);
region.put(put);
Coprocessor c = h.findCoprocessor(ObserverA.class.getName());
long idA = ((ObserverA)c).id;
c = h.findCoprocessor(ObserverB.class.getName());
long idB = ((ObserverB)c).id;
c = h.findCoprocessor(ObserverC.class.getName());
long idC = ((ObserverC)c).id;
assertTrue(idA < idB);
assertTrue(idB < idC);
HBaseTestingUtility.closeRegionAndWAL(region);
}
public void postRollBackSplit(ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException{
try {
RegionCoprocessorEnvironment rce=ctx.getEnvironment();
start(rce);
RegionCoprocessorHost coprocessorHost=((HRegion)rce.getRegion()).getCoprocessorHost();
Coprocessor coprocessor=coprocessorHost.findCoprocessor(SpliceIndexEndpoint.class.getName());
coprocessor.start(rce);
} catch (Throwable t) {
throw CoprocessorUtils.getIOException(t);
}
}
public static void checkClassLoading(final Configuration conf, final TableDescriptor td)
throws IOException {
RegionSplitPolicy.getSplitPolicyClass(td, conf);
RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td);
}
protected static Pair<DeleteTracker, ColumnTracker> getTrackers(RegionCoprocessorHost host,
NavigableSet<byte[]> columns, ScanInfo scanInfo, long oldestUnexpiredTS, Scan userScan)
throws IOException {
int resultMaxVersion = scanInfo.getMaxVersions();
int maxVersionToCheck = resultMaxVersion;
if (userScan != null) {
if (userScan.isRaw()) {
resultMaxVersion = userScan.getMaxVersions();
maxVersionToCheck = userScan.hasFilter() ? Integer.MAX_VALUE : resultMaxVersion;
} else {
resultMaxVersion = Math.min(userScan.getMaxVersions(), scanInfo.getMaxVersions());
maxVersionToCheck = userScan.hasFilter() ? scanInfo.getMaxVersions() : resultMaxVersion;
}
}
DeleteTracker deleteTracker;
if (scanInfo.isNewVersionBehavior() && (userScan == null || !userScan.isRaw())) {
deleteTracker = new NewVersionBehaviorTracker(columns, scanInfo.getComparator(),
scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion,
oldestUnexpiredTS);
} else {
deleteTracker = new ScanDeleteTracker(scanInfo.getComparator());
}
if (host != null) {
deleteTracker = host.postInstantiateDeleteTracker(deleteTracker);
if (deleteTracker instanceof VisibilityScanDeleteTracker && scanInfo.isNewVersionBehavior()) {
deleteTracker = new VisibilityNewVersionBehaivorTracker(columns, scanInfo.getComparator(),
scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion,
oldestUnexpiredTS);
}
}
ColumnTracker columnTracker;
if (deleteTracker instanceof NewVersionBehaviorTracker) {
columnTracker = (NewVersionBehaviorTracker) deleteTracker;
} else if (columns == null || columns.size() == 0) {
columnTracker = new ScanWildcardColumnTracker(scanInfo.getMinVersions(), maxVersionToCheck,
oldestUnexpiredTS, scanInfo.getComparator());
} else {
columnTracker = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(),
maxVersionToCheck, oldestUnexpiredTS);
}
return new Pair<>(deleteTracker, columnTracker);
}
@Before
public void setUp() throws Exception {
// Create the test table (owner added to the _acl_ table)
Admin admin = TEST_UTIL.getAdmin();
TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
new TableDescriptorBuilder.ModifyableTableDescriptor(testTable.getTableName());
ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(TEST_FAMILY);
familyDescriptor.setMaxVersions(100);
tableDescriptor.setColumnFamily(familyDescriptor);
tableDescriptor.setOwner(USER_OWNER);
admin.createTable(tableDescriptor, new byte[][] { Bytes.toBytes("s") });
TEST_UTIL.waitUntilAllRegionsAssigned(testTable.getTableName());
HRegion region = TEST_UTIL.getHBaseCluster().getRegions(testTable.getTableName()).get(0);
RegionCoprocessorHost rcpHost = region.getCoprocessorHost();
RCP_ENV = rcpHost.createEnvironment(ACCESS_CONTROLLER,
Coprocessor.PRIORITY_HIGHEST, 1, TEST_UTIL.getConfiguration());
// Set up initial grants
grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(),
Permission.Action.ADMIN,
Permission.Action.CREATE,
Permission.Action.READ,
Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RW.getShortName(),
testTable.getTableName(), TEST_FAMILY, null,
Permission.Action.READ,
Permission.Action.WRITE);
// USER_CREATE is USER_RW plus CREATE permissions
grantOnTable(TEST_UTIL, USER_CREATE.getShortName(),
testTable.getTableName(), null, null,
Permission.Action.CREATE,
Permission.Action.READ,
Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RO.getShortName(),
testTable.getTableName(), TEST_FAMILY, null,
Permission.Action.READ);
grantOnTable(TEST_UTIL, USER_QUAL.getShortName(),
testTable.getTableName(), TEST_FAMILY, TEST_Q1,
Permission.Action.READ,
Permission.Action.WRITE);
assertEquals(5, PermissionStorage
.getTablePermissions(TEST_UTIL.getConfiguration(), testTable.getTableName()).size());
}
private static void setUpTableAndUserPermissions() throws Exception {
TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
new TableDescriptorBuilder.ModifyableTableDescriptor(TEST_TABLE);
ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(TEST_FAMILY);
familyDescriptor.setMaxVersions(100);
tableDescriptor.setColumnFamily(familyDescriptor);
tableDescriptor.setOwner(USER_OWNER);
createTable(TEST_UTIL, tableDescriptor, new byte[][] { Bytes.toBytes("s") });
HRegion region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE).get(0);
RegionCoprocessorHost rcpHost = region.getCoprocessorHost();
RCP_ENV = rcpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
// Set up initial grants
grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(),
Permission.Action.ADMIN,
Permission.Action.CREATE,
Permission.Action.READ,
Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RW.getShortName(),
TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ,
Permission.Action.WRITE);
// USER_CREATE is USER_RW plus CREATE permissions
grantOnTable(TEST_UTIL, USER_CREATE.getShortName(),
TEST_TABLE, null, null,
Permission.Action.CREATE,
Permission.Action.READ,
Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RO.getShortName(),
TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ);
grantOnTable(TEST_UTIL, USER_ADMIN_CF.getShortName(),
TEST_TABLE, TEST_FAMILY,
null, Permission.Action.ADMIN, Permission.Action.CREATE);
grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN);
grantGlobal(TEST_UTIL, toGroupEntry(GROUP_CREATE), Permission.Action.CREATE);
grantGlobal(TEST_UTIL, toGroupEntry(GROUP_READ), Permission.Action.READ);
grantGlobal(TEST_UTIL, toGroupEntry(GROUP_WRITE), Permission.Action.WRITE);
assertEquals(5, PermissionStorage.getTablePermissions(conf, TEST_TABLE).size());
try {
assertEquals(5, AccessControlClient.getUserPermissions(systemUserConnection,
TEST_TABLE.toString()).size());
} catch (Throwable e) {
LOG.error("error during call of AccessControlClient.getUserPermissions. ", e);
}
}
/**
* Ensure we get expected exception when we try to return null from a preFlush call.
* @throws IOException We expect it to throw {@link CoprocessorException}
*/
@Test (expected = CoprocessorException.class)
public void testPreFlushReturningNull() throws IOException {
RegionCoprocessorHost rch = getRegionCoprocessorHost();
rch.preFlush(null, null, null);
}
/**
* Ensure we get expected exception when we try to return null from a preCompact call.
* @throws IOException We expect it to throw {@link CoprocessorException}
*/
@Test (expected = CoprocessorException.class)
public void testPreCompactReturningNull() throws IOException {
RegionCoprocessorHost rch = getRegionCoprocessorHost();
rch.preCompact(null, null, null, null, null, null);
}
public RegionCoprocessorHost getCoprocessorHost() {
return ((HRegion)env.getRegion()).getCoprocessorHost();
}