类org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hbase   文件: TableState.java
/**
 * Covert to PB version of State
 *
 * @return PB
 */
public HBaseProtos.TableState.State convert() {
  HBaseProtos.TableState.State state;
  switch (this) {
  case ENABLED:
    state = HBaseProtos.TableState.State.ENABLED;
    break;
  case DISABLED:
    state = HBaseProtos.TableState.State.DISABLED;
    break;
  case DISABLING:
    state = HBaseProtos.TableState.State.DISABLING;
    break;
  case ENABLING:
    state = HBaseProtos.TableState.State.ENABLING;
    break;
  default:
    throw new IllegalStateException(this.toString());
  }
  return state;
}
 
源代码2 项目: hbase   文件: TestThrottleSettings.java
@Test
public void testIncompatibleThrottleTypes() throws IOException {
  TimedQuota requestsQuota = TimedQuota.newBuilder().setSoftLimit(10)
      .setScope(QuotaProtos.QuotaScope.MACHINE)
      .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build();
  ThrottleRequest requestsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(requestsQuota)
      .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build();
  ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, requestsQuotaReq);

  TimedQuota readsQuota = TimedQuota.newBuilder().setSoftLimit(10)
      .setScope(QuotaProtos.QuotaScope.MACHINE)
      .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build();
  ThrottleRequest readsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(readsQuota)
      .setType(QuotaProtos.ThrottleType.READ_NUMBER).build();

  try {
    orig.merge(new ThrottleSettings("joe", null, null, null, readsQuotaReq));
    fail("A read throttle should not be capable of being merged with a request quota");
  } catch (IllegalArgumentException e) {
    // Pass
  }
}
 
源代码3 项目: hbase   文件: RegionServerSpaceQuotaManager.java
/**
 * Builds the protobuf message to inform the Master of files being archived.
 *
 * @param tn The table the files previously belonged to.
 * @param archivedFiles The files and their size in bytes that were archived.
 * @return The protobuf representation
 */
public RegionServerStatusProtos.FileArchiveNotificationRequest buildFileArchiveRequest(
    TableName tn, Collection<Entry<String,Long>> archivedFiles) {
  RegionServerStatusProtos.FileArchiveNotificationRequest.Builder builder =
      RegionServerStatusProtos.FileArchiveNotificationRequest.newBuilder();
  HBaseProtos.TableName protoTn = ProtobufUtil.toProtoTableName(tn);
  for (Entry<String,Long> archivedFile : archivedFiles) {
    RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize fws =
        RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.newBuilder()
            .setName(archivedFile.getKey())
            .setSize(archivedFile.getValue())
            .setTableName(protoTn)
            .build();
    builder.addArchivedFiles(fws);
  }
  final RegionServerStatusProtos.FileArchiveNotificationRequest request = builder.build();
  if (LOG.isTraceEnabled()) {
    LOG.trace("Reporting file archival to Master: " + TextFormat.shortDebugString(request));
  }
  return request;
}
 
源代码4 项目: hbase   文件: ProtobufUtil.java
public static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) {
  List<HBaseProtos.TableName> tables = new ArrayList<>(pojo.getTables().size());
  for (TableName arg : pojo.getTables()) {
    tables.add(ProtobufUtil.toProtoTableName(arg));
  }
  List<HBaseProtos.ServerName> hostports = new ArrayList<>(pojo.getServers().size());
  for (Address el : pojo.getServers()) {
    hostports.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname())
        .setPort(el.getPort()).build());
  }
  List<NameStringPair> configuration = pojo.getConfiguration().entrySet()
      .stream().map(entry -> NameStringPair.newBuilder()
          .setName(entry.getKey()).setValue(entry.getValue()).build())
      .collect(Collectors.toList());
  return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()).addAllServers(hostports)
      .addAllTables(tables).addAllConfiguration(configuration).build();
}
 
源代码5 项目: hbase   文件: TruncateTableProcedure.java
@Override
protected void deserializeStateData(ProcedureStateSerializer serializer)
    throws IOException {
  super.deserializeStateData(serializer);

  MasterProcedureProtos.TruncateTableStateData state =
      serializer.deserialize(MasterProcedureProtos.TruncateTableStateData.class);
  setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo()));
  if (state.hasTableSchema()) {
    tableDescriptor = ProtobufUtil.toTableDescriptor(state.getTableSchema());
    tableName = tableDescriptor.getTableName();
  } else {
    tableName = ProtobufUtil.toTableName(state.getTableName());
  }
  preserveSplits = state.getPreserveSplits();
  if (state.getRegionInfoCount() == 0) {
    regions = null;
  } else {
    regions = new ArrayList<>(state.getRegionInfoCount());
    for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
      regions.add(ProtobufUtil.toRegionInfo(hri));
    }
  }
}
 
源代码6 项目: hbase   文件: DeleteTableProcedure.java
@Override
protected void deserializeStateData(ProcedureStateSerializer serializer)
    throws IOException {
  super.deserializeStateData(serializer);

  MasterProcedureProtos.DeleteTableStateData state =
      serializer.deserialize(MasterProcedureProtos.DeleteTableStateData.class);
  setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo()));
  tableName = ProtobufUtil.toTableName(state.getTableName());
  if (state.getRegionInfoCount() == 0) {
    regions = null;
  } else {
    regions = new ArrayList<>(state.getRegionInfoCount());
    for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
      regions.add(ProtobufUtil.toRegionInfo(hri));
    }
  }
}
 
源代码7 项目: hbase   文件: TableNamespaceManager.java
private void loadNamespaceIntoCache() throws IOException {
  try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME);
    ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) {
    for (Result result;;) {
      result = scanner.next();
      if (result == null) {
        break;
      }
      Cell cell = result.getColumnLatestCell(HConstants.NAMESPACE_FAMILY,
        HConstants.NAMESPACE_COL_DESC_QUALIFIER);
      NamespaceDescriptor ns = ProtobufUtil
        .toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(CodedInputStream
          .newInstance(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())));
      cache.put(ns.getName(), ns);
    }
  }
}
 
源代码8 项目: hbase   文件: RegionInfo.java
/**
 * Parses an RegionInfo instance from the passed in stream.
 * Presumes the RegionInfo was serialized to the stream with
 * {@link #toDelimitedByteArray(RegionInfo)}.
 * @return An instance of RegionInfo.
 */
static RegionInfo parseFrom(final DataInputStream in) throws IOException {
  // I need to be able to move back in the stream if this is not a pb
  // serialization so I can do the Writable decoding instead.
  int pblen = ProtobufUtil.lengthOfPBMagic();
  byte [] pbuf = new byte[pblen];
  if (in.markSupported()) { //read it with mark()
    in.mark(pblen);
  }

  //assumption: if Writable serialization, it should be longer than pblen.
  int read = in.read(pbuf);
  if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen);
  if (ProtobufUtil.isPBMagicPrefix(pbuf)) {
    return ProtobufUtil.toRegionInfo(HBaseProtos.RegionInfo.parseDelimitedFrom(in));
  } else {
    throw new IOException("PB encoded RegionInfo expected");
  }
}
 
源代码9 项目: hbase   文件: MasterRpcServices.java
@Override
public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProcedure(
    RpcController controller, MasterProtos.ScheduleServerCrashProcedureRequest request)
    throws ServiceException {
  List<Long> pids = new ArrayList<>();
  for (HBaseProtos.ServerName sn: request.getServerNameList()) {
    ServerName serverName = ProtobufUtil.toServerName(sn);
    LOG.info("{} schedule ServerCrashProcedure for {}",
        this.master.getClientIdAuditPrefix(), serverName);
    if (shouldSubmitSCP(serverName)) {
      pids.add(this.master.getServerManager().expireServer(serverName, true));
    } else {
      pids.add(Procedure.NO_PROC_ID);
    }
  }
  return MasterProtos.ScheduleServerCrashProcedureResponse.newBuilder().addAllPid(pids).build();
}
 
源代码10 项目: hbase   文件: MasterRpcServices.java
@Override
public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request)
    throws ServiceException {
  Set<Address> hostPorts = Sets.newHashSet();
  MoveServersResponse.Builder builder = MoveServersResponse.newBuilder();
  for (HBaseProtos.ServerName el : request.getServersList()) {
    hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
  }
  LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " +
      request.getTargetGroup());
  try {
    if (master.getMasterCoprocessorHost() != null) {
      master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup());
    }
    master.getRSGroupInfoManager().moveServers(hostPorts, request.getTargetGroup());
    if (master.getMasterCoprocessorHost() != null) {
      master.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup());
    }
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return builder.build();
}
 
源代码11 项目: hbase   文件: ProtobufUtil.java
public static HBaseProtos.CacheEvictionStats toCacheEvictionStats(
    CacheEvictionStats cacheEvictionStats) {
  HBaseProtos.CacheEvictionStats.Builder builder
      = HBaseProtos.CacheEvictionStats.newBuilder();
  for (Map.Entry<byte[], Throwable> entry : cacheEvictionStats.getExceptions().entrySet()) {
    builder.addException(
        RegionExceptionMessage.newBuilder()
        .setRegion(RequestConverter.buildRegionSpecifier(
                RegionSpecifierType.REGION_NAME, entry.getKey()))
        .setException(ResponseConverter.buildException(entry.getValue()))
        .build()
    );
  }
  return builder
      .setEvictedBlocks(cacheEvictionStats.getEvictedBlocks())
      .setMaxCacheSize(cacheEvictionStats.getMaxCacheSize())
      .build();
}
 
源代码12 项目: hbase   文件: RSGroupAdminServiceImpl.java
@Override
public void removeServers(RpcController controller, RemoveServersRequest request,
    RpcCallback<RemoveServersResponse> done) {
  RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder();
  Set<Address> servers = Sets.newHashSet();
  for (HBaseProtos.ServerName el : request.getServersList()) {
    servers.add(Address.fromParts(el.getHostName(), el.getPort()));
  }
  LOG.info(
    master.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers);
  try {
    if (master.getMasterCoprocessorHost() != null) {
      master.getMasterCoprocessorHost().preRemoveServers(servers);
    }
    rsGroupInfoManager.removeServers(servers);
    if (master.getMasterCoprocessorHost() != null) {
      master.getMasterCoprocessorHost().postRemoveServers(servers);
    }
  } catch (IOException e) {
    CoprocessorRpcUtils.setControllerException(controller, e);
  }
  done.run(builder.build());
}
 
源代码13 项目: hbase   文件: RSGroupAdminClient.java
/**
 * Move given set of servers and tables to the specified target RegionServer group.
 * @param servers set of servers to move
 * @param tables set of tables to move
 * @param targetGroup the target group name
 * @throws IOException if moving the server and tables fail
 */
public void moveServersAndTables(Set<Address> servers, Set<TableName> tables, String targetGroup)
  throws IOException {
  MoveServersAndTablesRequest.Builder builder =
    MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup);
  for (Address el : servers) {
    builder.addServers(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname())
      .setPort(el.getPort()).build());
  }
  for (TableName tableName : tables) {
    builder.addTableName(ProtobufUtil.toProtoTableName(tableName));
    if (!admin.tableExists(tableName)) {
      throw new TableNotFoundException(tableName);
    }
  }
  try {
    stub.moveServersAndTables(null, builder.build());
  } catch (ServiceException e) {
    throw ProtobufUtil.handleRemoteException(e);
  }
}
 
源代码14 项目: hbase   文件: TestMetaCache.java
/**
 * Throw some exceptions. Mostly throw exceptions which do not clear meta cache.
 * Periodically throw NotSevingRegionException which clears the meta cache.
 * @throws ServiceException
 */
private void throwSomeExceptions(FakeRSRpcServices rpcServices,
                                 HBaseProtos.RegionSpecifier regionSpec)
    throws ServiceException {
  if (!isTestTable(rpcServices, regionSpec)) {
    return;
  }

  numReqs++;
  // Succeed every 5 request, throw cache clearing exceptions twice every 5 requests and throw
  // meta cache preserving exceptions otherwise.
  if (numReqs % 5 ==0) {
    return;
  } else if (numReqs % 5 == 1 || numReqs % 5 == 2) {
    throw new ServiceException(new NotServingRegionException());
  }
  // Round robin between different special exceptions.
  // This is not ideal since exception types are not tied to the operation performed here,
  // But, we don't really care here if we throw MultiActionTooLargeException while doing
  // single Gets.
  expCount++;
  Throwable t = metaCachePreservingExceptions.get(
      expCount % metaCachePreservingExceptions.size());
  throw new ServiceException(t);
}
 
源代码15 项目: hbase   文件: ServerMetricsBuilder.java
public static ServerMetrics toServerMetrics(ServerName serverName, int versionNumber,
    String version, ClusterStatusProtos.ServerLoad serverLoadPB) {
  return ServerMetricsBuilder.newBuilder(serverName)
    .setRequestCountPerSecond(serverLoadPB.getNumberOfRequests())
    .setRequestCount(serverLoadPB.getTotalNumberOfRequests())
    .setInfoServerPort(serverLoadPB.getInfoServerPort())
    .setMaxHeapSize(new Size(serverLoadPB.getMaxHeapMB(), Size.Unit.MEGABYTE))
    .setUsedHeapSize(new Size(serverLoadPB.getUsedHeapMB(), Size.Unit.MEGABYTE))
    .setCoprocessorNames(serverLoadPB.getCoprocessorsList().stream()
      .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList()))
    .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream()
      .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList()))
      .setUserMetrics(serverLoadPB.getUserLoadsList().stream()
          .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList()))
    .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream()
        .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList()))
    .setReplicationLoadSink(serverLoadPB.hasReplLoadSink()
      ? ProtobufUtil.toReplicationLoadSink(serverLoadPB.getReplLoadSink())
      : null)
    .setReportTimestamp(serverLoadPB.getReportEndTime())
    .setLastReportTimestamp(serverLoadPB.getReportStartTime()).setVersionNumber(versionNumber)
    .setVersion(version).build();
}
 
源代码16 项目: hbase   文件: ProtobufUtil.java
/**
 * Convert a ServerName to a protocol buffer ServerName
 *
 * @param serverName the ServerName to convert
 * @return the converted protocol buffer ServerName
 * @see #toServerName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName)
 */
public static HBaseProtos.ServerName toServerName(final ServerName serverName) {
  if (serverName == null) {
    return null;
  }
  HBaseProtos.ServerName.Builder builder =
    HBaseProtos.ServerName.newBuilder();
  builder.setHostName(serverName.getHostname());
  if (serverName.getPort() >= 0) {
    builder.setPort(serverName.getPort());
  }
  if (serverName.getStartcode() >= 0) {
    builder.setStartCode(serverName.getStartcode());
  }
  return builder.build();
}
 
源代码17 项目: hbase   文件: ProtobufUtil.java
public static ScanMetrics toScanMetrics(final byte[] bytes) {
  MapReduceProtos.ScanMetrics pScanMetrics = null;
  try {
    pScanMetrics = MapReduceProtos.ScanMetrics.parseFrom(bytes);
  } catch (InvalidProtocolBufferException e) {
    // Ignored there are just no key values to add.
  }
  ScanMetrics scanMetrics = new ScanMetrics();
  if (pScanMetrics != null) {
    for (HBaseProtos.NameInt64Pair pair : pScanMetrics.getMetricsList()) {
      if (pair.hasName() && pair.hasValue()) {
        scanMetrics.setCounter(pair.getName(), pair.getValue());
      }
    }
  }
  return scanMetrics;
}
 
源代码18 项目: hbase   文件: SingleColumnValueFilter.java
FilterProtos.SingleColumnValueFilter convert() {
  FilterProtos.SingleColumnValueFilter.Builder builder =
    FilterProtos.SingleColumnValueFilter.newBuilder();
  if (this.columnFamily != null) {
    builder.setColumnFamily(UnsafeByteOperations.unsafeWrap(this.columnFamily));
  }
  if (this.columnQualifier != null) {
    builder.setColumnQualifier(UnsafeByteOperations.unsafeWrap(this.columnQualifier));
  }
  HBaseProtos.CompareType compareOp = CompareType.valueOf(this.op.name());
  builder.setCompareOp(compareOp);
  builder.setComparator(ProtobufUtil.toComparator(this.comparator));
  builder.setFilterIfMissing(this.filterIfMissing);
  builder.setLatestVersionOnly(this.latestVersionOnly);

  return builder.build();
}
 
源代码19 项目: hbase   文件: RegionInfo.java
/**
 * @param bytes A pb RegionInfo serialized with a pb magic prefix.
 * @param offset starting point in the byte array
 * @param len length to read on the byte array
 * @return A deserialized {@link RegionInfo}
 */
@InterfaceAudience.Private
static RegionInfo parseFrom(final byte [] bytes, int offset, int len)
throws DeserializationException {
  if (ProtobufUtil.isPBMagicPrefix(bytes, offset, len)) {
    int pblen = ProtobufUtil.lengthOfPBMagic();
    try {
      HBaseProtos.RegionInfo.Builder builder = HBaseProtos.RegionInfo.newBuilder();
      ProtobufUtil.mergeFrom(builder, bytes, pblen + offset, len - pblen);
      HBaseProtos.RegionInfo ri = builder.build();
      return ProtobufUtil.toRegionInfo(ri);
    } catch (IOException e) {
      throw new DeserializationException(e);
    }
  } else {
    throw new DeserializationException("PB encoded RegionInfo expected");
  }
}
 
源代码20 项目: hbase-operator-tools   文件: HBCK2.java
List<Long> scheduleRecoveries(Hbck hbck, String[] args) throws IOException {
  List<HBaseProtos.ServerName> serverNames = new ArrayList<>();
  for (String serverName: args) {
    serverNames.add(parseServerName(serverName));
  }
  return hbck.scheduleServerCrashProcedure(serverNames);
}
 
private Cell createCellForTableState(TableName tableName){
  Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
    .setRow(tableName.getName())
    .setFamily(Bytes.toBytes("table"))
    .setQualifier(Bytes.toBytes("state"))
    .setType(Cell.Type.Put)
    .setValue(HBaseProtos.TableState.newBuilder()
      .setState(TableState.State.ENABLED.convert()).build().toByteArray())
    .build();
  return cell;
}
 
源代码22 项目: hbase   文件: TestRegionInfoBuilder.java
@Test
public void testConvert() {
  final TableName tableName =
    TableName.valueOf("ns1:" + name.getTableName().getQualifierAsString());
  byte[] startKey = Bytes.toBytes("startKey");
  byte[] endKey = Bytes.toBytes("endKey");
  boolean split = false;
  long regionId = System.currentTimeMillis();
  int replicaId = 42;

  RegionInfo ri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey)
    .setSplit(split).setRegionId(regionId).setReplicaId(replicaId).build();

  // convert two times, compare
  RegionInfo convertedRi = ProtobufUtil.toRegionInfo(ProtobufUtil.toRegionInfo(ri));

  assertEquals(ri, convertedRi);

  // test convert RegionInfo without replicaId
  HBaseProtos.RegionInfo info = HBaseProtos.RegionInfo.newBuilder()
    .setTableName(HBaseProtos.TableName.newBuilder()
      .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier()))
      .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())).build())
    .setStartKey(UnsafeByteOperations.unsafeWrap(startKey))
    .setEndKey(UnsafeByteOperations.unsafeWrap(endKey)).setSplit(split).setRegionId(regionId)
    .build();

  convertedRi = ProtobufUtil.toRegionInfo(info);
  RegionInfo expectedRi = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey)
    .setEndKey(endKey).setSplit(split).setRegionId(regionId).setReplicaId(0).build();

  assertEquals(expectedRi, convertedRi);
}
 
源代码23 项目: hbase   文件: ProtobufUtil.java
public static CacheEvictionStats toCacheEvictionStats(
    HBaseProtos.CacheEvictionStats stats) throws IOException{
  CacheEvictionStatsBuilder builder = CacheEvictionStats.builder();
  builder.withEvictedBlocks(stats.getEvictedBlocks())
      .withMaxCacheSize(stats.getMaxCacheSize());
  if (stats.getExceptionCount() > 0) {
    for (HBaseProtos.RegionExceptionMessage exception : stats.getExceptionList()) {
      HBaseProtos.RegionSpecifier rs = exception.getRegion();
      byte[] regionName = rs.getValue().toByteArray();
      builder.addException(regionName, ProtobufUtil.toException(exception.getException()));
    }
  }
  return builder.build();
}
 
源代码24 项目: hbase   文件: FavoredNodeAssignmentHelper.java
/**
 * @param favoredNodes The PB'ed bytes of favored nodes
 * @return the array of {@link ServerName} for the byte array of favored nodes.
 * @throws IOException
 */
public static ServerName[] getFavoredNodesList(byte[] favoredNodes) throws IOException {
  FavoredNodes f = FavoredNodes.parseFrom(favoredNodes);
  List<HBaseProtos.ServerName> protoNodes = f.getFavoredNodeList();
  ServerName[] servers = new ServerName[protoNodes.size()];
  int i = 0;
  for (HBaseProtos.ServerName node : protoNodes) {
    servers[i++] = ProtobufUtil.toServerName(node);
  }
  return servers;
}
 
源代码25 项目: hbase   文件: FavoredNodeAssignmentHelper.java
/**
 * @param serverAddrList
 * @return PB'ed bytes of {@link FavoredNodes} generated by the server list.
 */
public static byte[] getFavoredNodes(List<ServerName> serverAddrList) {
  FavoredNodes.Builder f = FavoredNodes.newBuilder();
  for (ServerName s : serverAddrList) {
    HBaseProtos.ServerName.Builder b = HBaseProtos.ServerName.newBuilder();
    b.setHostName(s.getHostname());
    b.setPort(s.getPort());
    b.setStartCode(ServerName.NON_STARTCODE);
    f.addFavoredNode(b.build());
  }
  return f.build().toByteArray();
}
 
源代码26 项目: hbase   文件: ProtobufUtil.java
public static HBaseProtos.TimeRange toTimeRange(TimeRange timeRange) {
  if (timeRange == null) {
    timeRange = TimeRange.allTime();
  }
  return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin())
    .setTo(timeRange.getMax())
    .build();
}
 
源代码27 项目: hbase   文件: ZKConnectionRegistry.java
private Pair<RegionState.State, ServerName> getStateAndServerName(
    ZooKeeperProtos.MetaRegionServer proto) {
  RegionState.State state;
  if (proto.hasState()) {
    state = RegionState.State.convert(proto.getState());
  } else {
    state = RegionState.State.OPEN;
  }
  HBaseProtos.ServerName snProto = proto.getServer();
  return Pair.newPair(state,
    ServerName.valueOf(snProto.getHostName(), snProto.getPort(), snProto.getStartCode()));
}
 
源代码28 项目: hbase   文件: RegionMetricsBuilder.java
public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) {
  return ClusterStatusProtos.RegionLoad.newBuilder()
      .setRegionSpecifier(HBaseProtos.RegionSpecifier
        .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
        .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName()))
        .build())
      .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize()
        .get(Size.Unit.KILOBYTE))
      .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount())
      .setTotalCompactingKVs(regionMetrics.getCompactingCellCount())
      .setCompleteSequenceId(regionMetrics.getCompletedSequenceId())
      .setDataLocality(regionMetrics.getDataLocality())
      .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount())
      .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize()
        .get(Size.Unit.KILOBYTE))
      .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp())
      .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE))
      .setReadRequestsCount(regionMetrics.getReadRequestCount())
      .setCpRequestsCount(regionMetrics.getCpRequestCount())
      .setWriteRequestsCount(regionMetrics.getWriteRequestCount())
      .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize()
        .get(Size.Unit.KILOBYTE))
      .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize()
        .get(Size.Unit.KILOBYTE))
      .setStores(regionMetrics.getStoreCount())
      .setStorefiles(regionMetrics.getStoreFileCount())
      .setStoreRefCount(regionMetrics.getStoreRefCount())
      .setMaxCompactedStoreFileRefCount(regionMetrics.getMaxCompactedStoreFileRefCount())
      .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE))
      .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId()))
      .setStoreUncompressedSizeMB(
        (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))
      .build();
}
 
源代码29 项目: hbase   文件: MasterRpcServices.java
/**
 * Get list of TableDescriptors for requested tables.
 * @param c Unused (set to null).
 * @param req GetTableDescriptorsRequest that contains:
 *     - tableNames: requested tables, or if empty, all are requested.
 * @return GetTableDescriptorsResponse
 * @throws ServiceException
 */
@Override
public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
    GetTableDescriptorsRequest req) throws ServiceException {
  try {
    master.checkInitialized();

    final String regex = req.hasRegex() ? req.getRegex() : null;
    final String namespace = req.hasNamespace() ? req.getNamespace() : null;
    List<TableName> tableNameList = null;
    if (req.getTableNamesCount() > 0) {
      tableNameList = new ArrayList<TableName>(req.getTableNamesCount());
      for (HBaseProtos.TableName tableNamePB: req.getTableNamesList()) {
        tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
      }
    }

    List<TableDescriptor> descriptors = master.listTableDescriptors(namespace, regex,
        tableNameList, req.getIncludeSysTables());

    GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
    if (descriptors != null && descriptors.size() > 0) {
      // Add the table descriptors to the response
      for (TableDescriptor htd: descriptors) {
        builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
      }
    }
    return builder.build();
  } catch (IOException ioe) {
    throw new ServiceException(ioe);
  }
}
 
源代码30 项目: hbase   文件: MasterRpcServices.java
/**
 * A 'raw' version of assign that does bulk and skirts Master state checks (assigns can be made
 * during Master startup). For use by Hbck2.
 */
@Override
public MasterProtos.AssignsResponse assigns(RpcController controller,
    MasterProtos.AssignsRequest request)
  throws ServiceException {
  if (this.master.getMasterProcedureExecutor() == null) {
    throw new ServiceException("Master's ProcedureExecutor not initialized; retry later");
  }
  MasterProtos.AssignsResponse.Builder responseBuilder =
      MasterProtos.AssignsResponse.newBuilder();
  try {
    boolean override = request.getOverride();
    LOG.info("{} assigns, override={}", master.getClientIdAuditPrefix(), override);
    for (HBaseProtos.RegionSpecifier rs: request.getRegionList()) {
      RegionInfo ri = getRegionInfo(rs);
      if (ri == null) {
        LOG.info("Unknown={}", rs);
        responseBuilder.addPid(Procedure.NO_PROC_ID);
        continue;
      }
      responseBuilder.addPid(this.master.getMasterProcedureExecutor().submitProcedure(this.master
          .getAssignmentManager().createOneAssignProcedure(ri, override)));
    }
    return responseBuilder.build();
  } catch (IOException ioe) {
    throw new ServiceException(ioe);
  }
}
 
 类所在包
 同包方法