下面列出了org.apache.hadoop.hbase.ipc.ServerRpcController#getFailedOn ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Test
public void testEndpoint() throws Exception {
final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName();
final ServerRpcController controller = new ServerRpcController();
final CoprocessorRpcUtils.BlockingRpcCallback<DummyRegionServerEndpointProtos.DummyResponse>
rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>();
DummyRegionServerEndpointProtos.DummyService service =
ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class,
TEST_UTIL.getAdmin().coprocessorService(serverName));
service.dummyCall(controller,
DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback);
assertEquals(DUMMY_VALUE, rpcCallback.get().getValue());
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
}
@Override
public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller,
final ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
rpcPreCheck("execMasterService");
try {
ServerRpcController execController = new ServerRpcController();
ClientProtos.CoprocessorServiceCall call = request.getCall();
String serviceName = call.getServiceName();
String methodName = call.getMethodName();
if (!master.coprocessorServiceHandlers.containsKey(serviceName)) {
throw new UnknownProtocolException(null,
"No registered Master Coprocessor Endpoint found for " + serviceName +
". Has it been enabled?");
}
Service service = master.coprocessorServiceHandlers.get(serviceName);
ServiceDescriptor serviceDesc = service.getDescriptorForType();
MethodDescriptor methodDesc =
CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc);
Message execRequest =
CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest());
final Message.Builder responseBuilder =
service.getResponsePrototype(methodDesc).newBuilderForType();
service.callMethod(methodDesc, execController, execRequest,
(message) -> {
if (message != null) {
responseBuilder.mergeFrom(message);
}
});
Message execResult = responseBuilder.build();
if (execController.getFailedOn() != null) {
throw execController.getFailedOn();
}
return CoprocessorRpcUtils.getResponse(execResult, HConstants.EMPTY_BYTE_ARRAY);
} catch (IOException ie) {
throw new ServiceException(ie);
}
}
public BulkWritesResult invoke(BulkWrites write) throws IOException {
TableName tableName=tableInfoFactory.getTableInfo(this.tableName);
CoprocessorRpcChannel channel = channelFactory.newChannel(tableName,write.getRegionKey());
boolean cacheCheck = false;
try {
SpliceMessage.SpliceIndexService service = ProtobufUtil.newServiceStub(SpliceMessage.SpliceIndexService.class, channel);
SpliceMessage.BulkWriteRequest.Builder builder = SpliceMessage.BulkWriteRequest.newBuilder();
byte[] requestBytes = compressor.compress(write);
builder.setBytes(ZeroCopyLiteralByteString.wrap(requestBytes));
SpliceMessage.BulkWriteRequest bwr = builder.build();
BlockingRpcCallback<SpliceMessage.BulkWriteResponse> doneCallback =new BlockingRpcCallback<>();
ServerRpcController controller = new ServerRpcController();
service.bulkWrite(controller, bwr, doneCallback);
if (controller.failed()){
IOException error=controller.getFailedOn();
clearCacheIfNeeded(error);
cacheCheck=true;
if(error!=null)
throw pef.processRemoteException(error);
else
throw pef.fromErrorString(controller.errorText());
}
SpliceMessage.BulkWriteResponse bulkWriteResponse = doneCallback.get();
byte[] bytes = bulkWriteResponse.getBytes().toByteArray();
if(bytes==null || bytes.length<=0){
Logger logger=Logger.getLogger(BulkWriteChannelInvoker.class);
logger.error("zero-length bytes returned with a null error for encodedString: "+write.getBulkWrites().iterator().next().getEncodedStringName());
}
return compressor.decompress(bytes,BulkWritesResult.class);
} catch (Exception e) {
if (!cacheCheck) clearCacheIfNeeded(e);
throw pef.processRemoteException(e);
}
}
/**
* Performs an atomic multi-mutate operation against the given table. Used by the likes of merge
* and split as these want to make atomic mutations across multiple rows.
* @throws IOException even if we encounter a RuntimeException, we'll still wrap it in an IOE.
*/
@VisibleForTesting
static void multiMutate(final Table table, byte[] row, final List<Mutation> mutations)
throws IOException {
debugLogMutations(mutations);
Batch.Call<MultiRowMutationService, MutateRowsResponse> callable = instance -> {
MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
builder.addMutationRequest(
ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
builder.addMutationRequest(
ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException(
"multi in MetaEditor doesn't support " + mutation.getClass().getName());
}
}
ServerRpcController controller = new ServerRpcController();
CoprocessorRpcUtils.BlockingRpcCallback<MutateRowsResponse> rpcCallback =
new CoprocessorRpcUtils.BlockingRpcCallback<>();
instance.mutateRows(controller, builder.build(), rpcCallback);
MutateRowsResponse resp = rpcCallback.get();
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
return resp;
};
try {
table.coprocessorService(MultiRowMutationService.class, row, row, callable);
} catch (Throwable e) {
// Throw if an IOE else wrap in an IOE EVEN IF IT IS a RuntimeException (e.g.
// a RejectedExecutionException because the hosting exception is shutting down.
// This is old behavior worth reexamining. Procedures doing merge or split
// currently don't handle RuntimeExceptions coming up out of meta table edits.
// Would have to work on this at least. See HBASE-23904.
Throwables.throwIfInstanceOf(e, IOException.class);
throw new IOException(e);
}
}
@Test
public void testCachedConnections() throws Throwable {
final String schemaName = generateUniqueName();
final String tableName = generateUniqueName();
final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
final String indexName = generateUniqueName();
final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
final Connection conn = DriverManager.getConnection(getUrl());
// create table and indices
String createTableSql =
"CREATE TABLE " + fullTableName
+ "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, v2 INTEGER, v3 INTEGER)";
conn.createStatement().execute(createTableSql);
conn.createStatement()
.execute("CREATE INDEX " + indexName + " ON " + fullTableName + "(v1)");
conn.commit();
PhoenixConnection phoenixConn = conn.unwrap(PhoenixConnection.class);
ConnectionQueryServices queryServices = phoenixConn.getQueryServices();
Table metaTable =
phoenixConn.getQueryServices()
.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
long ts = EnvironmentEdgeManager.currentTimeMillis();
MutationCode code =
IndexUtil
.updateIndexState(fullIndexName, ts, metaTable, PIndexState.PENDING_DISABLE)
.getMutationCode();
assertEquals(MutationCode.TABLE_ALREADY_EXISTS, code);
ts = EnvironmentEdgeManager.currentTimeMillis();
final byte[] schemaBytes = PVarchar.INSTANCE.toBytes(schemaName);
final byte[] tableBytes = PVarchar.INSTANCE.toBytes(tableName);
PName tenantId = phoenixConn.getTenantId();
final long tableTimestamp = HConstants.LATEST_TIMESTAMP;
long tableResolvedTimestamp = HConstants.LATEST_TIMESTAMP;
final long resolvedTimestamp = tableResolvedTimestamp;
final byte[] tenantIdBytes =
tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes();
byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
Batch.Call<MetaDataService, MetaDataResponse> callable =
new Batch.Call<MetaDataService, MetaDataResponse>() {
@Override
public MetaDataResponse call(MetaDataService instance) throws IOException {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<MetaDataResponse> rpcCallback =
new BlockingRpcCallback<MetaDataResponse>();
GetTableRequest.Builder builder = GetTableRequest.newBuilder();
builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
builder.setSchemaName(ByteStringer.wrap(schemaBytes));
builder.setTableName(ByteStringer.wrap(tableBytes));
builder.setTableTimestamp(tableTimestamp);
builder.setClientTimestamp(resolvedTimestamp);
builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION,
13, PHOENIX_PATCH_NUMBER));
instance.getTable(controller, builder.build(), rpcCallback);
if (controller.getFailedOn() != null) {
throw controller.getFailedOn();
}
return rpcCallback.get();
}
};
int version = VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, 13, PHOENIX_PATCH_NUMBER);
LOGGER.info("Client version: " + version);
Table ht =
queryServices.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
try {
final Map<byte[], MetaDataResponse> results =
ht.coprocessorService(MetaDataService.class, tableKey, tableKey, callable);
assert (results.size() == 1);
MetaDataResponse result = results.values().iterator().next();
assert (result.getTable().getIndexesCount() == 1);
assert (PIndexState.valueOf(result.getTable().getIndexes(0).getIndexState())
.equals(PIndexState.DISABLE));
} catch (Exception e) {
LOGGER.error("Exception Occurred: " + e);
} finally {
Closeables.closeQuietly(ht);
}
}
private void dealWithError(ServerRpcController controller) throws IOException{
if(!controller.failed()) return; //nothing to worry about
throw controller.getFailedOn();
}