类org.apache.hadoop.hbase.HConstants源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.HConstants的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: phoenix   文件: WhereOptimizerTest.java
@Test
public void testUseOfFunctionOnLHSInMiddleOfRVCForLTE() throws SQLException {
    String tenantId = "000000000000001";
    String parentId = "000000000000002";
    String subStringParentId = parentId.substring(0, 3);
    Date createdDate = new Date(System.currentTimeMillis());
    
    String query = "select * from entity_history where (organization_id, substr(parent_id, 1, 3), created_date) <= (?,?,?)";
    List<Object> binds = Arrays.<Object>asList(tenantId, subStringParentId, createdDate);
    StatementContext context = compileStatement(query, binds);
    Scan scan = context.getScan();
    Filter filter = scan.getFilter();
    assertNotNull(filter);
    assertTrue(filter instanceof RowKeyComparisonFilter);
    byte[] expectedStopRow = ByteUtil.concat(
        PVarchar.INSTANCE.toBytes(tenantId), ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(subStringParentId)));
    assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStartRow());
    assertArrayEquals(expectedStopRow, scan.getStopRow());
}
 
源代码2 项目: phoenix   文件: PhoenixConfigurationUtilTest.java
@Test
public void testUpsertStatement() throws Exception {
    Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES));
    final String tableName = "TEST_TABLE";
    try {
        String ddl = "CREATE TABLE "+ tableName + 
                "  (a_string varchar not null, a_binary varbinary not null, col1 integer" +
                "  CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n";
        conn.createStatement().execute(ddl);
        final Configuration configuration = new Configuration ();
        configuration.set(HConstants.ZOOKEEPER_QUORUM, getUrl());
        PhoenixConfigurationUtil.setOutputTableName(configuration, tableName);
        PhoenixConfigurationUtil.setPhysicalTableName(configuration, tableName);
        final String upserStatement = PhoenixConfigurationUtil.getUpsertStatement(configuration);
        final String expectedUpsertStatement = "UPSERT INTO " + tableName + " VALUES (?, ?, ?)"; 
        assertEquals(expectedUpsertStatement, upserStatement);
    } finally {
        conn.close();
    }
 }
 
源代码3 项目: phoenix   文件: ByteUtil.java
public static byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
    // Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
    // Search for the place where the trailing 0xFFs start
    int offset = rowKeyPrefix.length;
    while (offset > 0) {
        if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
            break;
        }
        offset--;
    }
    if (offset == 0) {
        // We got an 0xFFFF... (only FFs) stopRow value which is
        // the last possible prefix before the end of the table.
        // So set it to stop at the 'end of the table'
        return HConstants.EMPTY_END_ROW;
    }
    // Copy the right length of the original
    byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
    // And increment the last one
    newStopRow[newStopRow.length - 1]++;
    return newStopRow;
}
 
源代码4 项目: hbase   文件: TestFavoredNodeAssignmentHelper.java
@Test
public void testGetFavoredNodes() throws IOException {
  Map<String,Integer> rackToServerCount = new HashMap<>();
  Set<String> rackList = Sets.newHashSet("rack1", "rack2", "rack3");
  for (String rack : rackList) {
    rackToServerCount.put(rack, 4);
  }
  List<ServerName> servers = getServersFromRack(rackToServerCount);

  FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
  helper.initialize();
  assertTrue(helper.canPlaceFavoredNodes());

  RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
      .setStartKey(HConstants.EMPTY_START_ROW)
      .setEndKey(HConstants.EMPTY_END_ROW)
      .build();

  for (int maxattempts = 0; maxattempts < MAX_ATTEMPTS; maxattempts++) {
    List<ServerName> fn = helper.generateFavoredNodes(region);
    checkDuplicateFN(fn);
    checkFNRacks(fn);
  }
}
 
源代码5 项目: hbase   文件: TestHFileEncryption.java
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  // Disable block cache in this test.
  conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
  conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
  conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
  conf.setInt("hfile.format.version", 3);

  fs = FileSystem.get(conf);

  cryptoContext = Encryption.newContext(conf);
  String algorithm =
      conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
  Cipher aes = Encryption.getCipher(conf, algorithm);
  assertNotNull(aes);
  cryptoContext.setCipher(aes);
  byte[] key = new byte[aes.getKeyLength()];
  RNG.nextBytes(key);
  cryptoContext.setKey(key);
}
 
源代码6 项目: hbase   文件: MetricsRegionServerWrapperImpl.java
public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) {
  this.regionServer = regionServer;
  initBlockCache();
  initMobFileCache();

  this.period = regionServer.getConfiguration().getLong(HConstants.REGIONSERVER_METRICS_PERIOD,
    HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD);

  this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
  this.runnable = new RegionServerMetricsWrapperRunnable();
  this.executor.scheduleWithFixedDelay(this.runnable, this.period, this.period,
    TimeUnit.MILLISECONDS);
  this.metricsWALSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
  this.allocator = regionServer.getRpcServer().getByteBuffAllocator();

  try {
    this.dfsHedgedReadMetrics = FSUtils.getDFSHedgedReadMetrics(regionServer.getConfiguration());
  } catch (IOException e) {
    LOG.warn("Failed to get hedged metrics", e);
  }
  if (LOG.isInfoEnabled()) {
    LOG.info("Computing regionserver metrics every " + this.period + " milliseconds");
  }
}
 
源代码7 项目: hbase   文件: TestRecoverableZooKeeper.java
@Test
public void testSetDataVersionMismatchInLoop() throws Exception {
  String znode = "/hbase/splitWAL/9af7cfc9b15910a0b3d714bf40a3248f";
  Configuration conf = TEST_UTIL.getConfiguration();
  ZKWatcher zkw = new ZKWatcher(conf, "testSetDataVersionMismatchInLoop",
      abortable, true);
  String ensemble = ZKConfig.getZKQuorumServersString(conf);
  RecoverableZooKeeper rzk = ZKUtil.connect(conf, ensemble, zkw);
  rzk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
  rzk.setData(znode, Bytes.toBytes("OPENING"), 0);
  Field zkField = RecoverableZooKeeper.class.getDeclaredField("zk");
  zkField.setAccessible(true);
  int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
  ZookeeperStub zkStub = new ZookeeperStub(ensemble, timeout, zkw);
  zkStub.setThrowExceptionInNumOperations(1);
  zkField.set(rzk, zkStub);
  byte[] opened = Bytes.toBytes("OPENED");
  rzk.setData(znode, opened, 1);
  byte[] data = rzk.getData(znode, false, new Stat());
  assertTrue(Bytes.equals(opened, data));
}
 
源代码8 项目: phoenix   文件: WhereOptimizerTest.java
@Test
public void testOrDiffColExpression() throws SQLException {
    String tenantId1 = "000000000000001";
    String entityId1 = "002333333333331";
    String query = "select * from atable where organization_id = ? or entity_id  = ?";
    List<Object> binds = Arrays.<Object>asList(tenantId1,entityId1);
    StatementContext context = compileStatement(query, binds);
    Scan scan = context.getScan();
    Filter filter = scan.getFilter();

    assertNotNull(filter);
    assertTrue(filter instanceof RowKeyComparisonFilter);
    ScanRanges scanRanges = context.getScanRanges();
    assertEquals(ScanRanges.EVERYTHING,scanRanges);
    assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow());
    assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow());
}
 
源代码9 项目: hbase   文件: TestCompactionScanQueryMatcher.java
private void testDropDeletes(byte[] from, byte[] to, byte[][] rows, MatchCode... expected)
    throws IOException {
  long now = EnvironmentEdgeManager.currentTime();
  // Set time to purge deletes to negative value to avoid it ever happening.
  ScanInfo scanInfo = new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE,
      HConstants.DEFAULT_BLOCKSIZE, -1L, rowComparator, false);

  CompactionScanQueryMatcher qm = CompactionScanQueryMatcher.create(scanInfo,
    ScanType.COMPACT_RETAIN_DELETES, Long.MAX_VALUE, HConstants.OLDEST_TIMESTAMP,
    HConstants.OLDEST_TIMESTAMP, now, from, to, null);
  List<ScanQueryMatcher.MatchCode> actual = new ArrayList<>(rows.length);
  byte[] prevRow = null;
  for (byte[] row : rows) {
    if (prevRow == null || !Bytes.equals(prevRow, row)) {
      qm.setToNewRow(KeyValueUtil.createFirstOnRow(row));
      prevRow = row;
    }
    actual.add(qm.match(new KeyValue(row, fam2, null, now, Type.Delete)));
  }

  assertEquals(expected.length, actual.size());
  for (int i = 0; i < expected.length; i++) {
    LOG.debug("expected " + expected[i] + ", actual " + actual.get(i));
    assertEquals(expected[i], actual.get(i));
  }
}
 
源代码10 项目: phoenix   文件: WhereOptimizerTest.java
/**
 * With only a subset of row key cols present (which includes the leading key), 
 * Phoenix should have optimized the start row for the scan to include the 
 * row keys cols that occur contiguously in the RVC.
 * 
 * Table entity_history has the row key defined as (organization_id, parent_id, created_date, entity_history_id). 
 * This test uses (organization_id, parent_id, entity_id) in RVC. So the start row should be comprised of
 * organization_id and parent_id.
 * @throws SQLException
 */
@Test
public void testRVCExpressionWithSubsetOfPKCols() throws SQLException {
    String tenantId = "000000000000001";
    String parentId = "000000000000002";
    String entityHistId = "000000000000003";
    
    String query = "select * from entity_history where (organization_id, parent_id, entity_history_id) >= (?,?,?)";
    List<Object> binds = Arrays.<Object>asList(tenantId, parentId, entityHistId);
    StatementContext context = compileStatement(query, binds);
    Scan scan = context.getScan();
    Filter filter = scan.getFilter();
    assertNotNull(filter);
    assertTrue(filter instanceof RowKeyComparisonFilter);
    byte[] expectedStartRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(parentId));
    assertArrayEquals(expectedStartRow, scan.getStartRow());
    assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow());
}
 
@Test
public void testPutRegionInfoFromHdfsInMeta() throws Exception {
  RegionInfo info = this.createRegionInfo("test-tbl");
  Path regionPath = new Path("/hbase/data/default/test-tbl/" + info.getEncodedName());
  FSDataInputStream fis = new FSDataInputStream(new TestInputStreamSeekable(info));
  when(this.mockedFileSystem.open(new Path(regionPath, ".regioninfo")))
    .thenReturn(fis);
  fixer.putRegionInfoFromHdfsInMeta(regionPath);
  Mockito.verify(this.mockedConnection).getTable(TableName.META_TABLE_NAME);
  ArgumentCaptor<Put> captor = ArgumentCaptor.forClass(Put.class);
  Mockito.verify(this.mockedTable).put(captor.capture());
  Put capturedPut = captor.getValue();
  List<Cell> cells = capturedPut.get(HConstants.CATALOG_FAMILY,
    HConstants.STATE_QUALIFIER);
  assertEquals(1, cells.size());
  String state = Bytes.toString(cells.get(0).getValueArray(),
    cells.get(0).getValueOffset(), cells.get(0).getValueLength());
  assertEquals(RegionState.State.valueOf(state), RegionState.State.CLOSED);
  cells = capturedPut.get(HConstants.CATALOG_FAMILY,
    HConstants.REGIONINFO_QUALIFIER);
  byte[] returnedInfo = Bytes.copy(cells.get(0).getValueArray(),
    cells.get(0).getValueOffset(), cells.get(0).getValueLength());
  assertEquals(info, RegionInfo.parseFrom(returnedInfo));
}
 
源代码12 项目: hbase   文件: TestMinorCompaction.java
/** constructor */
public TestMinorCompaction() {
  super();

  // Set cache flush size to 1MB
  conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
  conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
  compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);

  firstRowBytes = START_KEY_BYTES;
  secondRowBytes = START_KEY_BYTES.clone();
  // Increment the least significant character so we get to next row.
  secondRowBytes[START_KEY_BYTES.length - 1]++;
  thirdRowBytes = START_KEY_BYTES.clone();
  thirdRowBytes[START_KEY_BYTES.length - 1] =
      (byte) (thirdRowBytes[START_KEY_BYTES.length - 1] + 2);
  col1 = Bytes.toBytes("column1");
  col2 = Bytes.toBytes("column2");
}
 
源代码13 项目: phoenix   文件: TestUtil.java
public static void clearMetaDataCache(Connection conn) throws Throwable {
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    Table htable = pconn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW,
        HConstants.EMPTY_END_ROW, new Batch.Call<MetaDataService, ClearCacheResponse>() {
            @Override
            public ClearCacheResponse call(MetaDataService instance) throws IOException {
                ServerRpcController controller = new ServerRpcController();
                BlockingRpcCallback<ClearCacheResponse> rpcCallback =
                        new BlockingRpcCallback<ClearCacheResponse>();
                ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder();
                instance.clearCache(controller, builder.build(), rpcCallback);
                if(controller.getFailedOn() != null) {
                    throw controller.getFailedOn();
                }
                return rpcCallback.get(); 
            }
          });
}
 
源代码14 项目: hbase   文件: TestThriftConnection.java
private static Connection createConnection(int port, boolean useHttp) throws IOException {
  Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
      ThriftConnection.class.getName());
  if (useHttp) {
    conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS,
        ThriftConnection.HTTPThriftClientBuilder.class.getName());
  }
  String host = HConstants.LOCALHOST;
  if (useHttp) {
    host = "http://" + host;
  }
  conf.set(Constants.HBASE_THRIFT_SERVER_NAME, host);
  conf.setInt(Constants.HBASE_THRIFT_SERVER_PORT, port);
  return ConnectionFactory.createConnection(conf);
}
 
源代码15 项目: hbase   文件: TestGetClosestAtOrBefore.java
/**
 * @param answer Pass -1 if we're not to find anything.
 * @return Row found.
 */
private byte[] findRow(final Region mr, final char table, final int rowToFind, final int answer)
  throws IOException {
  TableName tableb = TableName.valueOf("" + table);
  // Find the row.
  byte[] tofindBytes = Bytes.toBytes((short) rowToFind);
  byte[] metaKey = RegionInfo.createRegionName(tableb, tofindBytes, HConstants.NINES, false);
  LOG.info("find=" + new String(metaKey, StandardCharsets.UTF_8));
  Result r = UTIL.getClosestRowBefore(mr, metaKey, HConstants.CATALOG_FAMILY);
  if (answer == -1) {
    assertNull(r);
    return null;
  }
  assertTrue(
    Bytes.compareTo(Bytes.toBytes((short) answer), extractRowFromMetaRow(r.getRow())) == 0);
  return r.getRow();
}
 
源代码16 项目: hbase   文件: PerformanceEvaluation.java
@Override
void testRow(final int i) throws IOException {
  byte[] row = getRandomRow(this.rand, this.totalRows);
  Put put = new Put(row);
  byte[] value = generateData(this.rand, ROW_LENGTH);
  if (useTags) {
    byte[] tag = generateData(this.rand, TAG_LENGTH);
    Tag[] tags = new Tag[noOfTags];
    for (int n = 0; n < noOfTags; n++) {
      Tag t = new ArrayBackedTag((byte) n, tag);
      tags[n] = t;
    }
    KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
        value, tags);
    put.add(kv);
  } else {
    put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value);
  }
  put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
  mutator.mutate(put);
}
 
源代码17 项目: hbase   文件: TestVisibilityLabelsWithACL.java
private static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
    throws Exception {
  Table table = null;
  try {
    table = TEST_UTIL.createTable(tableName, fam);
    int i = 1;
    List<Put> puts = new ArrayList<>(labelExps.length);
    for (String labelExp : labelExps) {
      Put put = new Put(Bytes.toBytes("row" + i));
      put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
      put.setCellVisibility(new CellVisibility(labelExp));
      puts.add(put);
      i++;
    }
    table.put(puts);
  } finally {
    if (table != null) {
      table.close();
    }
  }
  return table;
}
 
源代码18 项目: cloud-bigtable-examples   文件: CellCounter.java
private static long[] getTimeRange(String[] args) throws IOException {
  final String startTimeArgKey = "--starttime=";
  final String endTimeArgKey = "--endtime=";
  long startTime = 0L;
  long endTime = 0L;

  for (int i = 1; i < args.length; i++) {
    System.out.println("i:" + i + "arg[i]" + args[i]);
    if (args[i].startsWith(startTimeArgKey)) {
      startTime = Long.parseLong(args[i].substring(startTimeArgKey.length()));
    }
    if (args[i].startsWith(endTimeArgKey)) {
      endTime = Long.parseLong(args[i].substring(endTimeArgKey.length()));
    }
  }

  if (startTime == 0 && endTime == 0)
    return null;

  endTime = endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime;
  LOG.warn("Got the timerange : " + startTime + " - " + endTime);
  return new long [] {startTime, endTime};
}
 
源代码19 项目: hbase   文件: BaseLoadBalancer.java
@Override
public List<RegionPlan>
    balanceCluster(Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfAllTable) {
  if (isByTable) {
    List<RegionPlan> result = new ArrayList<>();
    loadOfAllTable.forEach((tableName, loadOfOneTable) -> {
      LOG.info("Start Generate Balance plan for table: " + tableName);
      List<RegionPlan> partialPlans = balanceTable(tableName, loadOfOneTable);
      if (partialPlans != null) {
        result.addAll(partialPlans);
      }
    });
    return result;
  } else {
    LOG.info("Start Generate Balance plan for cluster.");
    return balanceTable(HConstants.ENSEMBLE_TABLE_NAME, toEnsumbleTableLoad(loadOfAllTable));
  }
}
 
源代码20 项目: hbase   文件: WALKeyImpl.java
@Override
public long estimatedSerializedSizeOf() {
  long size = encodedRegionName != null ? encodedRegionName.length : 0;
  size += tablename != null ? tablename.toBytes().length : 0;
  if (clusterIds != null) {
    size += 16 * clusterIds.size();
  }
  if (nonceGroup != HConstants.NO_NONCE) {
    size += Bytes.SIZEOF_LONG; // nonce group
  }
  if (nonce != HConstants.NO_NONCE) {
    size += Bytes.SIZEOF_LONG; // nonce
  }
  if (replicationScope != null) {
    for (Map.Entry<byte[], Integer> scope: replicationScope.entrySet()) {
      size += scope.getKey().length;
      size += Bytes.SIZEOF_INT;
    }
  }
  size += Bytes.SIZEOF_LONG; // sequence number
  size += Bytes.SIZEOF_LONG; // write time
  if (origLogSeqNum > 0) {
    size += Bytes.SIZEOF_LONG; // original sequence number
  }
  return size;
}
 
源代码21 项目: pentaho-hadoop-shims   文件: HadoopShim.java
@Override
public Class[] getHbaseDependencyClasses() {
  return new Class[] {
    HConstants.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class,
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, Put.class,
    RpcServer.class, CompatibilityFactory.class, JobUtil.class, TableMapper.class, FastLongHistogram.class,
    Snapshot.class, ZooKeeper.class, Channel.class, Message.class, UnsafeByteOperations.class, Lists.class,
    Tracer.class, MetricRegistry.class, ArrayUtils.class, ObjectMapper.class, Versioned.class,
    JsonView.class, ZKWatcher.class, CacheLoader.class
  };
}
 
源代码22 项目: kylin-on-parquet-v2   文件: CubeHBaseRPC.java
protected long getCoprocessorTimeoutMillis() {
    long coopTimeout;
    if (BackdoorToggles.getQueryTimeout() != -1) {
        coopTimeout = BackdoorToggles.getQueryTimeout();
    } else {
        coopTimeout = cubeSeg.getConfig().getQueryCoprocessorTimeoutSeconds() * 1000L;
    }
    
    int rpcTimeout;
    Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration();
    rpcTimeout = hconf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
    
    // HBase rpc timeout must be longer than coprocessor timeout
    if ((int) (coopTimeout * 1.1) > rpcTimeout) {
        rpcTimeout = (int) (coopTimeout * 1.1);
        hconf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, rpcTimeout);
    }
    
    // coprocessor timeout is 0 by default
    if (coopTimeout <= 0) {
        coopTimeout = (long) (rpcTimeout * 0.9);
    }

    queryContext.checkMillisBeforeDeadline();
    logger.debug("{} = {} ms, use {} ms as timeout for coprocessor", HConstants.HBASE_RPC_TIMEOUT_KEY, rpcTimeout, coopTimeout);
    return coopTimeout;
}
 
源代码23 项目: hbase   文件: BackupUtils.java
/**
 * Write the .regioninfo file on-disk.
 */
public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs,
    final Path regionInfoDir, RegionInfo regionInfo) throws IOException {
  final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo);
  Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR);
  // First check to get the permissions
  FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
  // Write the RegionInfo file content
  FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null);
  try {
    out.write(content);
  } finally {
    out.close();
  }
}
 
private Table createTableAndWriteDataWithLabels(String... labelExps) throws Exception {
  Table table = createTable(fam);
  int i = 1;
  List<Put> puts = new ArrayList<>(labelExps.length);
  for (String labelExp : labelExps) {
    Put put = new Put(Bytes.toBytes("row" + i));
    put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
    put.setCellVisibility(new CellVisibility(labelExp));
    puts.add(put);
    table.put(put);
    i++;
  }
  // table.put(puts);
  return table;
}
 
源代码25 项目: hbase   文件: RegionInfoBuilder.java
/**
 * Returns true if the given inclusive range of rows is fully contained
 * by this region. For example, if the region is foo,a,g and this is
 * passed ["b","c"] or ["a","c"] it will return true, but if this is passed
 * ["b","z"] it will return false.
 * @throws IllegalArgumentException if the range passed is invalid (ie. end &lt; start)
 */
@Override
public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) {
  if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) {
    throw new IllegalArgumentException(
    "Invalid range: " + Bytes.toStringBinary(rangeStartKey) +
    " > " + Bytes.toStringBinary(rangeEndKey));
  }

  boolean firstKeyInRange = Bytes.compareTo(rangeStartKey, startKey) >= 0;
  boolean lastKeyInRange =
    Bytes.compareTo(rangeEndKey, endKey) < 0 ||
    Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY);
  return firstKeyInRange && lastKeyInRange;
}
 
源代码26 项目: hbase   文件: TestAdmin.java
@Test
public void testCreateTable() throws IOException {
  List<TableDescriptor> tables = ADMIN.listTableDescriptors();
  int numTables = tables.size();
  final TableName tableName = TableName.valueOf(name.getMethodName());
  TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
  tables = ADMIN.listTableDescriptors();
  assertEquals(numTables + 1, tables.size());
  assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster()
    .getTableStateManager().isTableState(tableName, TableState.State.ENABLED));
  assertEquals(TableState.State.ENABLED, getStateFromMeta(tableName));
}
 
源代码27 项目: phoenix   文件: BaseTest.java
public static Configuration setUpConfigForMiniCluster(Configuration conf, ReadOnlyProps overrideProps) {
    assertNotNull(conf);
    setDefaultTestConfig(conf, overrideProps);
    /*
     * The default configuration of mini cluster ends up spawning a lot of threads
     * that are not really needed by phoenix for test purposes. Limiting these threads
     * helps us in running several mini clusters at the same time without hitting 
     * the threads limit imposed by the OS. 
     */
    conf.setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 5);
    conf.setInt("hbase.regionserver.metahandler.count", 2);
    conf.setInt(HConstants.MASTER_HANDLER_COUNT, 2);
    conf.setClass("hbase.coprocessor.regionserver.classes", LocalIndexMerger.class,
        RegionServerObserver.class);
    conf.setInt("dfs.namenode.handler.count", 2);
    conf.setInt("dfs.namenode.service.handler.count", 2);
    conf.setInt("dfs.datanode.handler.count", 2);
    conf.setInt("ipc.server.read.threadpool.size", 2);
    conf.setInt("ipc.server.handler.threadpool.size", 2);
    conf.setInt("hbase.hconnection.threads.max", 2);
    conf.setInt("hbase.hconnection.threads.core", 2);
    conf.setInt("hbase.htable.threads.max", 2);
    conf.setInt("hbase.regionserver.hlog.syncer.count", 2);
    conf.setInt("hbase.hlog.asyncer.number", 2);
    conf.setInt("hbase.assignment.zkevent.workers", 5);
    conf.setInt("hbase.assignment.threads.max", 5);
    conf.setInt("hbase.catalogjanitor.interval", 5000);
    return conf;
}
 
源代码28 项目: hbase   文件: RSRpcServices.java
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public SlowLogResponses getSlowLogResponses(final RpcController controller,
  final SlowLogResponseRequest request) {
  final SlowLogRecorder slowLogRecorder =
    this.regionServer.getSlowLogRecorder();
  final List<SlowLogPayload> slowLogPayloads;
  slowLogPayloads = slowLogRecorder != null
    ? slowLogRecorder.getSlowLogPayloads(request)
    : Collections.emptyList();
  SlowLogResponses slowLogResponses = SlowLogResponses.newBuilder()
    .addAllSlowLogPayloads(slowLogPayloads)
    .build();
  return slowLogResponses;
}
 
源代码29 项目: hbase   文件: TestCloseAnOpeningRegion.java
@BeforeClass
public static void setUp() throws Exception {
  UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY, 60000);
  UTIL.startMiniCluster(
    StartMiniClusterOption.builder().numRegionServers(2).masterClass(MockHMaster.class).build());
  UTIL.createTable(TABLE_NAME, CF);
  UTIL.getAdmin().balancerSwitch(false, true);
}
 
源代码30 项目: hbase   文件: TestHFileOutputFormat2.java
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testJobConfiguration() throws Exception {
  Configuration conf = new Configuration(this.util.getConfiguration());
  conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration")
      .toString());
  Job job = new Job(conf);
  job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration"));
  Table table = Mockito.mock(Table.class);
  RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
  setupMockStartKeys(regionLocator);
  setupMockTableName(regionLocator);
  HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
  assertEquals(job.getNumReduceTasks(), 4);
}
 
 类所在包
 同包方法