org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#of ( )源码实例Demo

下面列出了org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#of ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hbase   文件: TestBulkLoadHFiles.java
@Test
public void testSplitStoreFile() throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFile");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  ColumnFamilyDescriptor familyDesc = ColumnFamilyDescriptorBuilder.of(FAMILY);
  HFileTestUtil.createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER,
    Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  BulkLoadHFilesTool.splitStoreFile(util.getConfiguration(), testIn, familyDesc,
    Bytes.toBytes("ggg"), bottomOut, topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
 
源代码2 项目: hbase   文件: AddColumnAction.java
@Override
public void perform() throws Exception {
  TableDescriptor tableDescriptor = admin.getDescriptor(tableName);
  ColumnFamilyDescriptor columnDescriptor = null;

  while (columnDescriptor == null
      || tableDescriptor.getColumnFamily(columnDescriptor.getName()) != null) {
    columnDescriptor = ColumnFamilyDescriptorBuilder.of(RandomStringUtils.randomAlphabetic(5));
  }

  // Don't try the modify if we're stopping
  if (context.isStopping()) {
    return;
  }

  getLogger().debug("Performing action: Adding " + columnDescriptor + " to " + tableName);

  TableDescriptor modifiedTable = TableDescriptorBuilder.newBuilder(tableDescriptor)
      .setColumnFamily(columnDescriptor).build();
  admin.modifyTable(modifiedTable);
}
 
源代码3 项目: hbase   文件: TestRefreshHFilesEndpoint.java
@Override
public List<HStore> getStores() {
  List<HStore> list = new ArrayList<>(stores.size());
  /*
   * This is used to trigger the custom definition (faulty)
   * of refresh HFiles API.
   */
  try {
    if (this.store == null) {
      store = new HStoreWithFaultyRefreshHFilesAPI(this,
          ColumnFamilyDescriptorBuilder.of(FAMILY), this.conf);
    }
    list.add(store);
  } catch (IOException ioe) {
    LOG.info("Couldn't instantiate custom store implementation", ioe);
  }

  list.addAll(stores.values());
  return list;
}
 
源代码4 项目: atlas   文件: HBaseStoreManager.java
private TableDescriptor createTable(String tableName, String cfName, int ttlInSeconds, AdminMask adm) throws IOException {
    TableDescriptor desc = compat.newTableDescriptor(tableName);

    ColumnFamilyDescriptor cdesc = ColumnFamilyDescriptorBuilder.of(cfName);
    cdesc = setCFOptions(cdesc, ttlInSeconds);

    desc = compat.addColumnFamilyToTableDescriptor(desc, cdesc);

    int count; // total regions to create
    String src;

    if (MIN_REGION_COUNT <= (count = regionCount)) {
        src = "region count configuration";
    } else if (0 < regionsPerServer &&
               MIN_REGION_COUNT <= (count = regionsPerServer * adm.getEstimatedRegionServerCount())) {
        src = "ClusterStatus server count";
    } else {
        count = -1;
        src = "default";
    }

    if (MIN_REGION_COUNT < count) {
        adm.createTable(desc, getStartKey(count), getEndKey(count), count);
        logger.debug("Created table {} with region count {} from {}", tableName, count, src);
    } else {
        adm.createTable(desc);
        logger.debug("Created table {} with default start key, end key, and region count", tableName);
    }

    return desc;
}
 
源代码5 项目: hbase   文件: TestHStoreFile.java
@Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
  StoreFileReader reader = mock(StoreFileReader.class);
  HStore store = mock(HStore.class);
  byte[] cf = Bytes.toBytes("ty");
  ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(cf);
  when(store.getColumnFamilyDescriptor()).thenReturn(cfd);
  try (StoreFileScanner scanner =
    new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true)) {
    Scan scan = new Scan();
    scan.setColumnFamilyTimeRange(cf, 0, 1);
    assertFalse(scanner.shouldUseScanner(scan, store, 0));
  }
}
 
/**
 * Create simple HTD with three families: 'a', 'b', and 'c'
 * @param tableName name of the table descriptor
 * @return
 */
private TableDescriptor createBasic3FamilyHTD(final String tableName) {
  TableDescriptorBuilder tableBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
  ColumnFamilyDescriptor  a = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("a"));
  tableBuilder.addColumnFamily(a);
  ColumnFamilyDescriptor b = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("b"));
  tableBuilder.addColumnFamily(b);
  ColumnFamilyDescriptor c = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("c"));
  tableBuilder.addColumnFamily(c);
  return tableBuilder.build();
}
 
源代码7 项目: hbase   文件: TestMetaBrowser.java
private ColumnFamilyDescriptor columnFamilyDescriptor() {
  return ColumnFamilyDescriptorBuilder.of("f1");
}
 
源代码8 项目: hbase   文件: TestZooKeeperTableArchiveClient.java
@Test
public void testArchivingOnSingleTable() throws Exception {
  createArchiveDirectory();
  FileSystem fs = UTIL.getTestFileSystem();
  Path archiveDir = getArchiveDir();
  Path tableDir = getTableDir(STRING_TABLE_NAME);
  toCleanup.add(archiveDir);
  toCleanup.add(tableDir);

  Configuration conf = UTIL.getConfiguration();
  // setup the delegate
  Stoppable stop = new StoppableImplementation();
  HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
  List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
  final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);

  // create the region
  ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
  HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
  List<HRegion> regions = new ArrayList<>();
  regions.add(region);
  Mockito.doReturn(regions).when(rss).getRegions();
  final CompactedHFilesDischarger compactionCleaner =
      new CompactedHFilesDischarger(100, stop, rss, false);
  loadFlushAndCompact(region, TEST_FAM);
  compactionCleaner.chore();
  // get the current hfiles in the archive directory
  List<Path> files = getAllFiles(fs, archiveDir);
  if (files == null) {
    CommonFSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
    throw new RuntimeException("Didn't archive any files!");
  }
  CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());

  runCleaner(cleaner, finished, stop);

  // know the cleaner ran, so now check all the files again to make sure they are still there
  List<Path> archivedFiles = getAllFiles(fs, archiveDir);
  assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);

  // but we still have the archive directory
  assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
 
源代码9 项目: hbase   文件: TestRSGroupsAdmin1.java
@Test
public void testRSGroupListDoesNotContainFailedTableCreation() throws Exception {
  toggleQuotaCheckAndRestartMiniCluster(true);
  String nsp = "np1";
  NamespaceDescriptor nspDesc =
    NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5")
      .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
  ADMIN.createNamespace(nspDesc);
  assertEquals(3, ADMIN.listNamespaceDescriptors().length);
  ColumnFamilyDescriptor fam1 = ColumnFamilyDescriptorBuilder.of("fam1");
  TableDescriptor tableDescOne = TableDescriptorBuilder
    .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"))
    .setColumnFamily(fam1).build();
  ADMIN.createTable(tableDescOne);

  TableDescriptor tableDescTwo = TableDescriptorBuilder
    .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"))
    .setColumnFamily(fam1).build();
  boolean constraintViolated = false;

  try {
    ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 6);
    fail("Creation table should fail because of quota violation.");
  } catch (Exception exp) {
    assertTrue(exp instanceof IOException);
    constraintViolated = true;
  } finally {
    assertTrue("Constraint not violated for table " + tableDescTwo.getTableName(),
      constraintViolated);
  }
  List<RSGroupInfo> rsGroupInfoList = ADMIN.listRSGroups();
  boolean foundTable2 = false;
  boolean foundTable1 = false;
  for (int i = 0; i < rsGroupInfoList.size(); i++) {
    Set<TableName> tables =
      Sets.newHashSet(ADMIN.listTablesInRSGroup(rsGroupInfoList.get(i).getName()));
    if (tables.contains(tableDescTwo.getTableName())) {
      foundTable2 = true;
    }
    if (tables.contains(tableDescOne.getTableName())) {
      foundTable1 = true;
    }
  }
  assertFalse("Found table2 in rsgroup list.", foundTable2);
  assertTrue("Did not find table1 in rsgroup list", foundTable1);

  TEST_UTIL.deleteTable(tableDescOne.getTableName());
  ADMIN.deleteNamespace(nspDesc.getName());
  toggleQuotaCheckAndRestartMiniCluster(false);

}
 
源代码10 项目: hbase   文件: IntegrationTestDDLMasterFailover.java
private ColumnFamilyDescriptor createFamilyDesc() {
  String familyName = String.format("cf-%010d", RandomUtils.nextInt());
  return ColumnFamilyDescriptorBuilder.of(familyName);
}