类org.apache.hadoop.hbase.CellUtil源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.CellUtil的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hbase   文件: BackupSystemTable.java
public String[] getListOfBackupIdsFromDeleteOperation() throws IOException {
  LOG.trace("Get delete operation for backup ids");

  Get get = createGetForDeleteOperation();
  try (Table table = connection.getTable(tableName)) {
    Result res = table.get(get);
    if (res.isEmpty()) {
      return null;
    }
    Cell cell = res.listCells().get(0);
    byte[] val = CellUtil.cloneValue(cell);
    if (val.length == 0) {
      return null;
    }
    return new String(val).split(",");
  }
}
 
源代码2 项目: hraven   文件: JobHistoryRawService.java
/**
 * returns the raw byte representation of job history from the result value
 * @param value result
 * @return byte array of job history raw
 *
 * @throws MissingColumnInResultException
 */
public byte[] getJobHistoryRawFromResult(Result value)
    throws MissingColumnInResultException {
  if (value == null) {
    throw new IllegalArgumentException("Cannot create InputStream from null");
  }

  Cell cell = value.getColumnLatestCell(Constants.RAW_FAM_BYTES,
      Constants.JOBHISTORY_COL_BYTES);

  // Could be that there is no conf file (only a history file).
  if (cell == null) {
    throw new MissingColumnInResultException(Constants.RAW_FAM_BYTES,
        Constants.JOBHISTORY_COL_BYTES);
  }

  byte[] jobHistoryRaw = CellUtil.cloneValue(cell);
  return jobHistoryRaw;
}
 
源代码3 项目: hbase   文件: MultipleColumnPrefixFilter.java
public ReturnCode filterColumn(Cell cell) {
  byte [] qualifier = CellUtil.cloneQualifier(cell);
  TreeSet<byte []> lesserOrEqualPrefixes =
    (TreeSet<byte []>) sortedPrefixes.headSet(qualifier, true);

  if (lesserOrEqualPrefixes.size() != 0) {
    byte [] largestPrefixSmallerThanQualifier = lesserOrEqualPrefixes.last();
    
    if (Bytes.startsWith(qualifier, largestPrefixSmallerThanQualifier)) {
      return ReturnCode.INCLUDE;
    }
    
    if (lesserOrEqualPrefixes.size() == sortedPrefixes.size()) {
      return ReturnCode.NEXT_ROW;
    } else {
      hint = sortedPrefixes.higher(largestPrefixSmallerThanQualifier);
      return ReturnCode.SEEK_NEXT_USING_HINT;
    }
  } else {
    hint = sortedPrefixes.first();
    return ReturnCode.SEEK_NEXT_USING_HINT;
  }
}
 
源代码4 项目: hugegraph   文件: HbaseSessions.java
/**
 * Just for debug
 */
@SuppressWarnings("unused")
private void dump(String table, Scan scan) throws IOException {
    System.out.println(String.format(">>>> scan table %s with %s",
                                     table, scan));
    RowIterator iterator = this.scan(table, scan);
    while (iterator.hasNext()) {
        Result row = iterator.next();
        System.out.println(StringEncoding.format(row.getRow()));
        CellScanner cellScanner = row.cellScanner();
        while (cellScanner.advance()) {
            Cell cell = cellScanner.current();
            byte[] key = CellUtil.cloneQualifier(cell);
            byte[] val = CellUtil.cloneValue(cell);
            System.out.println(String.format("  %s=%s",
                               StringEncoding.format(key),
                               StringEncoding.format(val)));
        }
    }
}
 
源代码5 项目: phoenix   文件: IndexToolForNonTxGlobalIndexIT.java
private List<String> verifyRunStatusFromResultTable(Connection conn, Long scn, String indexTable, int totalRows, List<String> expectedStatus) throws SQLException, IOException {
    Table hIndexToolTable = conn.unwrap(PhoenixConnection.class).getQueryServices()
            .getTable(RESULT_TABLE_NAME_BYTES);
    Assert.assertEquals(totalRows, TestUtil.getRowCount(hIndexToolTable, false));
    List<String> output = new ArrayList<>();
    Scan s = new Scan();
    s.setRowPrefixFilter(Bytes.toBytes(String.format("%s%s%s", scn, ROW_KEY_SEPARATOR, indexTable)));
    ResultScanner rs = hIndexToolTable.getScanner(s);
    int count =0;
    for(Result r : rs) {
        Assert.assertTrue(r != null);
        List<Cell> cells = r.getColumnCells(RESULT_TABLE_COLUMN_FAMILY, INDEX_TOOL_RUN_STATUS_BYTES);
        Assert.assertEquals(cells.size(), 1);
        Assert.assertTrue(Bytes.toString(CellUtil.cloneRow(cells.get(0))).startsWith(String.valueOf(scn)));
        output.add(Bytes.toString(CellUtil.cloneValue(cells.get(0))));
        count++;
    }
    //for each region
    Assert.assertEquals(3, count);
    for(int i=0; i< count; i++) {
        Assert.assertEquals(expectedStatus.get(i), output.get(i));
    }
    return output;
}
 
源代码6 项目: hbase-indexer   文件: Indexer.java
/**
 * Delete all values for a single column family from Solr.
 */
private void deleteFamily(KeyValue deleteKeyValue, SolrUpdateCollector updateCollector,
                          UniqueKeyFormatter uniqueKeyFormatter, byte[] tableName) {
    String rowField = conf.getRowField();
    String cfField = conf.getColumnFamilyField();
    String rowValue;
    String familyValue;
    if (uniqueKeyFormatter instanceof UniqueTableKeyFormatter) {
        UniqueTableKeyFormatter uniqueTableKeyFormatter = (UniqueTableKeyFormatter) uniqueKeyFormatter;
        rowValue = uniqueTableKeyFormatter.formatRow(CellUtil.cloneRow(deleteKeyValue), tableName);
        familyValue = uniqueTableKeyFormatter.formatFamily(CellUtil.cloneFamily(deleteKeyValue), tableName);
    } else {
        rowValue = uniqueKeyFormatter.formatRow(CellUtil.cloneRow(deleteKeyValue));
        familyValue = uniqueKeyFormatter.formatFamily(CellUtil.cloneFamily(deleteKeyValue));
    }

    if (rowField != null && cfField != null) {
        updateCollector.deleteByQuery(String.format("(%s:%s)AND(%s:%s)", rowField, rowValue, cfField, familyValue));
    } else {
        log.warn(String.format(
                "Can't delete row %s and family %s from Solr because row and/or family fields not included in the indexer configuration",
                rowValue, familyValue));
    }
}
 
源代码7 项目: pinpoint   文件: ResponseTimeMapper.java
@Override
public ResponseTime mapRow(Result result, int rowNum) throws Exception {
    if (result.isEmpty()) {
        return null;
    }

    final byte[] rowKey = getOriginalKey(result.getRow());

    ResponseTime responseTime = createResponseTime(rowKey);
    for (Cell cell : result.rawCells()) {
        if (CellUtil.matchingFamily(cell, HbaseColumnFamily.MAP_STATISTICS_SELF_VER2_COUNTER.getName())) {
            recordColumn(responseTime, cell);
        }

        if (logger.isDebugEnabled()) {
            String columnFamily = Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
            logger.debug("unknown column family:{}", columnFamily);
        }
    }
    return responseTime;
}
 
源代码8 项目: hbase   文件: TestFromClientSide5.java
@Test
public void testAppendWithoutWAL() throws Exception {
  List<Result> resultsWithWal = doAppend(true);
  List<Result> resultsWithoutWal = doAppend(false);
  assertEquals(resultsWithWal.size(), resultsWithoutWal.size());
  for (int i = 0; i != resultsWithWal.size(); ++i) {
    Result resultWithWal = resultsWithWal.get(i);
    Result resultWithoutWal = resultsWithoutWal.get(i);
    assertEquals(resultWithWal.rawCells().length, resultWithoutWal.rawCells().length);
    for (int j = 0; j != resultWithWal.rawCells().length; ++j) {
      Cell cellWithWal = resultWithWal.rawCells()[j];
      Cell cellWithoutWal = resultWithoutWal.rawCells()[j];
      assertArrayEquals(CellUtil.cloneRow(cellWithWal), CellUtil.cloneRow(cellWithoutWal));
      assertArrayEquals(CellUtil.cloneFamily(cellWithWal), CellUtil.cloneFamily(cellWithoutWal));
      assertArrayEquals(CellUtil.cloneQualifier(cellWithWal),
        CellUtil.cloneQualifier(cellWithoutWal));
      assertArrayEquals(CellUtil.cloneValue(cellWithWal), CellUtil.cloneValue(cellWithoutWal));
    }
  }
}
 
@Override
public Cell transformCell(Cell cell) throws IOException {
  // Convert Tephra deletes back into HBase deletes
  if (tx.getVisibilityLevel() == Transaction.VisibilityLevel.SNAPSHOT_ALL) {
    if (DeleteTracker.isFamilyDelete(cell)) {
      return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), null, cell.getTimestamp(),
                          KeyValue.Type.DeleteFamily);
    } else if (isColumnDelete(cell)) {
      // Note: in some cases KeyValue.Type.Delete is used in Delete object,
      // and in some other cases KeyValue.Type.DeleteColumn is used.
      // Since Tephra cannot distinguish between the two, we return KeyValue.Type.DeleteColumn.
      // KeyValue.Type.DeleteColumn makes both CellUtil.isDelete and CellUtil.isDeleteColumns return true, and will
      // work in both cases.
      return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
                          cell.getTimestamp(), KeyValue.Type.DeleteColumn);
    }
  }
  return cell;
}
 
源代码10 项目: phoenix-tephra   文件: DataJanitorState.java
/**
 * Gets a list of {@link RegionPruneInfo} for given regions. Returns all regions if the given regions set is null.
 *
 * @param regions a set of regions
 * @return list of {@link RegionPruneInfo}s.
 * @throws IOException when not able to read the data from HBase
 */
public List<RegionPruneInfo> getPruneInfoForRegions(@Nullable SortedSet<byte[]> regions) throws IOException {
  List<RegionPruneInfo> regionPruneInfos = new ArrayList<>();
  try (Table stateTable = stateTableSupplier.get()) {
    byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY);
    Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP);
    scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL);

    try (ResultScanner scanner = stateTable.getScanner(scan)) {
      Result next;
      while ((next = scanner.next()) != null) {
        byte[] region = getRegionFromKey(next.getRow());
        if (regions == null || regions.contains(region)) {
          Cell cell = next.getColumnLatestCell(FAMILY, PRUNE_UPPER_BOUND_COL);
          if (cell != null) {
            byte[] pruneUpperBoundBytes = CellUtil.cloneValue(cell);
            long timestamp = cell.getTimestamp();
            regionPruneInfos.add(new RegionPruneInfo(region, Bytes.toStringBinary(region),
                                                     Bytes.toLong(pruneUpperBoundBytes), timestamp));
          }
        }
      }
    }
  }
  return Collections.unmodifiableList(regionPruneInfos);
}
 
源代码11 项目: phoenix   文件: IndexHalfStoreFileReader.java
/**
 * @param fs
 * @param p
 * @param cacheConf
 * @param in
 * @param size
 * @param r
 * @param conf
 * @param indexMaintainers
 * @param viewConstants
 * @param regionInfo
 * @param regionStartKeyInHFile
 * @param splitKey
 * @throws IOException
 */
public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig cacheConf,
        final FSDataInputStreamWrapper in, long size, final Reference r,
        final Configuration conf,
        final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
        final byte[][] viewConstants, final HRegionInfo regionInfo,
        byte[] regionStartKeyInHFile, byte[] splitKey) throws IOException {
    super(fs, p, in, size, cacheConf, conf);
    this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
    // Is it top or bottom half?
    this.top = Reference.isTopFileRegion(r.getFileRegion());
    this.splitRow = CellUtil.cloneRow(KeyValue.createKeyValueFromKey(splitkey));
    this.indexMaintainers = indexMaintainers;
    this.viewConstants = viewConstants;
    this.regionInfo = regionInfo;
    this.regionStartKeyInHFile = regionStartKeyInHFile;
    this.offset = regionStartKeyInHFile.length;
}
 
源代码12 项目: hbase   文件: FromClientSideBase.java
protected void assertSingleResult(Result result, byte [] row, byte [] family,
  byte [] qualifier, byte [] value) {
  assertTrue("Expected row [" + Bytes.toString(row) + "] " +
      "Got row [" + Bytes.toString(result.getRow()) +"]",
    equals(row, result.getRow()));
  assertEquals("Expected a single key but result contains " + result.size(), 1, result.size());
  Cell kv = result.rawCells()[0];
  assertTrue("Expected family [" + Bytes.toString(family) + "] " +
      "Got family [" + Bytes.toString(CellUtil.cloneFamily(kv)) + "]",
    equals(family, CellUtil.cloneFamily(kv)));
  assertTrue("Expected qualifier [" + Bytes.toString(qualifier) + "] " +
      "Got qualifier [" + Bytes.toString(CellUtil.cloneQualifier(kv)) + "]",
    equals(qualifier, CellUtil.cloneQualifier(kv)));
  assertTrue("Expected value [" + Bytes.toString(value) + "] " +
      "Got value [" + Bytes.toString(CellUtil.cloneValue(kv)) + "]",
    equals(value, CellUtil.cloneValue(kv)));
}
 
源代码13 项目: hbase   文件: RSRpcServices.java
private static Get toGet(final Mutation mutation) throws IOException {
  if(!(mutation instanceof Increment) && !(mutation instanceof Append)) {
    throw new AssertionError("mutation must be a instance of Increment or Append");
  }
  Get get = new Get(mutation.getRow());
  CellScanner cellScanner = mutation.cellScanner();
  while (!cellScanner.advance()) {
    Cell cell = cellScanner.current();
    get.addColumn(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell));
  }
  if (mutation instanceof Increment) {
    // Increment
    Increment increment = (Increment) mutation;
    get.setTimeRange(increment.getTimeRange().getMin(), increment.getTimeRange().getMax());
  } else {
    // Append
    Append append = (Append) mutation;
    get.setTimeRange(append.getTimeRange().getMin(), append.getTimeRange().getMax());
  }
  for (Entry<String, byte[]> entry : mutation.getAttributesMap().entrySet()) {
    get.setAttribute(entry.getKey(), entry.getValue());
  }
  return get;
}
 
private void verifyRows(HTableInterface table, Get get, List<byte[]> expectedValues) throws Exception {
  Result result = table.get(get);
  if (expectedValues == null) {
    assertTrue(result.isEmpty());
  } else {
    assertFalse(result.isEmpty());
    byte[] family = TestBytes.family;
    byte[] col = TestBytes.qualifier;
    if (get.hasFamilies()) {
      family = get.getFamilyMap().keySet().iterator().next();
      col = get.getFamilyMap().get(family).first();
    }
    Iterator<Cell> it = result.getColumnCells(family, col).iterator();
    for (byte[] expectedValue : expectedValues) {
      Assert.assertTrue(it.hasNext());
      assertArrayEquals(expectedValue, CellUtil.cloneValue(it.next()));
    }
  }
}
 
源代码15 项目: phoenix   文件: ApplyAndFilterDeletesFilter.java
/**
 * @param next
 * @return
 */
public boolean matchesPoint(KeyValue next) {
  // point deletes only apply to the exact KV that they reference, so we only need to ensure
  // that the timestamp matches exactly. Because we sort by timestamp first, either the next
  // keyvalue has the exact timestamp or is an older (smaller) timestamp, and we can allow that
  // one.
  if (pointDelete != null && CellUtil.matchingFamily(pointDelete, next)
      && CellUtil.matchingQualifier(pointDelete, next)) {
    if (pointDelete.getTimestamp() == next.getTimestamp()) {
      return true;
    }
    // clear the point delete since the TS must not be matching
    coveringDelete.pointDelete = null;
  }
  return false;
}
 
源代码16 项目: hraven   文件: JobHistoryRawService.java
/**
 * attempts to approximately set the job submit time based on the last
 * modification time of the job history file
 * @param value result
 * @return approximate job submit time
 * @throws MissingColumnInResultException
 */
public long getApproxSubmitTime(Result value)
    throws MissingColumnInResultException {
  if (value == null) {
    throw new IllegalArgumentException(
        "Cannot get last modification time from " + "a null hbase result");
  }

  Cell cell = value.getColumnLatestCell(Constants.INFO_FAM_BYTES,
      Constants.JOBHISTORY_LAST_MODIFIED_COL_BYTES);

  if (cell == null) {
    throw new MissingColumnInResultException(Constants.INFO_FAM_BYTES,
        Constants.JOBHISTORY_LAST_MODIFIED_COL_BYTES);
  }

  byte[] lastModTimeBytes = CellUtil.cloneValue(cell);
  // we try to approximately set the job submit time based on when the job
  // history file
  // was last modified and an average job duration
  long lastModTime = Bytes.toLong(lastModTimeBytes);
  long jobSubmitTimeMillis = lastModTime - Constants.AVERGAE_JOB_DURATION;
  LOG.debug("Approximate job submit time is " + jobSubmitTimeMillis
      + " based on " + lastModTime);
  return jobSubmitTimeMillis;
}
 
源代码17 项目: hbase   文件: ProtobufLogTestHelper.java
public static void doRead(ProtobufLogReader reader, boolean withTrailer, RegionInfo hri,
    TableName tableName, int columnCount, int recordCount, byte[] row, long timestamp)
    throws IOException {
  if (withTrailer) {
    assertNotNull(reader.trailer);
  } else {
    assertNull(reader.trailer);
  }
  for (int i = 0; i < recordCount; ++i) {
    WAL.Entry entry = reader.next();
    assertNotNull(entry);
    assertEquals(columnCount, entry.getEdit().size());
    assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
    assertEquals(tableName, entry.getKey().getTableName());
    int idx = 0;
    for (Cell val : entry.getEdit().getCells()) {
      assertTrue(Bytes.equals(row, 0, row.length, val.getRowArray(), val.getRowOffset(),
        val.getRowLength()));
      assertArrayEquals(toValue(i, idx), CellUtil.cloneValue(val));
      idx++;
    }
  }
  assertNull(reader.next());
}
 
@Override
public Cell transformCell(Cell cell) throws IOException {
  // Convert Tephra deletes back into HBase deletes
  if (tx.getVisibilityLevel() == Transaction.VisibilityLevel.SNAPSHOT_ALL) {
    if (DeleteTracker.isFamilyDelete(cell)) {
      return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), null, cell.getTimestamp(),
                          KeyValue.Type.DeleteFamily);
    } else if (isColumnDelete(cell)) {
      // Note: in some cases KeyValue.Type.Delete is used in Delete object,
      // and in some other cases KeyValue.Type.DeleteColumn is used.
      // Since Tephra cannot distinguish between the two, we return KeyValue.Type.DeleteColumn.
      // KeyValue.Type.DeleteColumn makes both CellUtil.isDelete and CellUtil.isDeleteColumns return true, and will
      // work in both cases.
      return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
                          cell.getTimestamp(), KeyValue.Type.DeleteColumn);
    }
  }
  return cell;
}
 
源代码19 项目: hbase   文件: TestResult.java
public void testBasicGetColumn() throws Exception {
  KeyValue [] kvs = genKVs(row, family, value, 1, 100);

  Arrays.sort(kvs, CellComparator.getInstance());

  Result r = Result.create(kvs);

  for (int i = 0; i < 100; ++i) {
    final byte[] qf = Bytes.toBytes(i);

    List<Cell> ks = r.getColumnCells(family, qf);
    assertEquals(1, ks.size());
    assertTrue(CellUtil.matchingQualifier(ks.get(0), qf));
    assertEquals(ks.get(0), r.getColumnLatestCell(family, qf));
  }
}
 
源代码20 项目: phoenix   文件: IndexRegionObserver.java
/**
 * IndexMaintainer.getIndexedColumns() returns the data column references for indexed columns. The data columns are
 * grouped into three classes, pk columns (data table pk columns), the indexed columns (the columns for which
 * we want to have indexing; they form the prefix for the primary key for the index table (after salt and tenant id))
 * and covered columns. The purpose of this method is to find out if all the indexed columns are included in the
 * pending data table mutation pointed by multiMutation.
 */
private boolean hasAllIndexedColumns(IndexMaintainer indexMaintainer, MultiMutation multiMutation) {
    Map<byte[], List<Cell>> familyMap = multiMutation.getFamilyCellMap();
    for (ColumnReference columnReference : indexMaintainer.getIndexedColumns()) {
        byte[] family = columnReference.getFamily();
        List<Cell> cellList = familyMap.get(family);
        if (cellList == null) {
            return false;
        }
        boolean has = false;
        for (Cell cell : cellList) {
            if (CellUtil.matchingColumn(cell, family, columnReference.getQualifier())) {
                has = true;
                break;
            }
        }
        if (!has) {
            return false;
        }
    }
    return true;
}
 
源代码21 项目: hbase   文件: SingleColumnValueFilter.java
@Override
public ReturnCode filterCell(final Cell c) {
  // System.out.println("REMOVE KEY=" + keyValue.toString() + ", value=" + Bytes.toString(keyValue.getValue()));
  if (this.matchedColumn) {
    // We already found and matched the single column, all keys now pass
    return ReturnCode.INCLUDE;
  } else if (this.latestVersionOnly && this.foundColumn) {
    // We found but did not match the single column, skip to next row
    return ReturnCode.NEXT_ROW;
  }
  if (!CellUtil.matchingColumn(c, this.columnFamily, this.columnQualifier)) {
    return ReturnCode.INCLUDE;
  }
  foundColumn = true;
  if (filterColumnValue(c)) {
    return this.latestVersionOnly? ReturnCode.NEXT_ROW: ReturnCode.INCLUDE;
  }
  this.matchedColumn = true;
  return ReturnCode.INCLUDE;
}
 
源代码22 项目: hbase   文件: WriteHeavyIncrementObserver.java
@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> c, Get get, List<Cell> result)
    throws IOException {
  Scan scan =
      new Scan().withStartRow(get.getRow()).withStopRow(get.getRow(), true).readAllVersions();
  NavigableMap<byte[], NavigableMap<byte[], MutableLong>> sums =
      new TreeMap<>(Bytes.BYTES_COMPARATOR);
  get.getFamilyMap().forEach((cf, cqs) -> {
    NavigableMap<byte[], MutableLong> ss = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    sums.put(cf, ss);
    cqs.forEach(cq -> {
      ss.put(cq, new MutableLong(0));
      scan.addColumn(cf, cq);
    });
  });
  List<Cell> cells = new ArrayList<>();
  try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) {
    boolean moreRows;
    do {
      moreRows = scanner.next(cells);
      for (Cell cell : cells) {
        byte[] family = CellUtil.cloneFamily(cell);
        byte[] qualifier = CellUtil.cloneQualifier(cell);
        long value = Bytes.toLong(cell.getValueArray(), cell.getValueOffset());
        sums.get(family).get(qualifier).add(value);
      }
      cells.clear();
    } while (moreRows);
  }
  sums.forEach((cf, m) -> m.forEach((cq, s) -> result
      .add(createCell(get.getRow(), cf, cq, HConstants.LATEST_TIMESTAMP, s.longValue()))));
  c.bypass();
}
 
源代码23 项目: phoenix   文件: TestApplyAndFilterDeletesFilter.java
/**
 * Hinting with this filter is a little convoluted as we binary search the list of families to
 * attempt to find the right one to seek.
 */
@Test
public void testHintCorrectlyToNextFamily() {
  // start with doing a family delete, so we will seek to the next column
  KeyValue kv = createKvForType(Type.DeleteFamily);
  ApplyAndFilterDeletesFilter filter = new ApplyAndFilterDeletesFilter(EMPTY_SET);
  assertEquals(ReturnCode.SKIP, filter.filterKeyValue(kv));
  KeyValue next = createKvForType(Type.Put);
  // make sure the hint is our attempt at the end key, because we have no more families to seek
  assertEquals("Didn't get a hint from a family delete", ReturnCode.SEEK_NEXT_USING_HINT,
    filter.filterKeyValue(next));
  assertEquals("Didn't get END_KEY with no families to match", KeyValue.LOWESTKEY,
    filter.getNextCellHint(next));

  // check for a family that comes before our family, so we always seek to the end as well
  filter = new ApplyAndFilterDeletesFilter(asSet(Bytes.toBytes("afamily")));
  assertEquals(ReturnCode.SKIP, filter.filterKeyValue(kv));
  // make sure the hint is our attempt at the end key, because we have no more families to seek
  assertEquals("Didn't get a hint from a family delete", ReturnCode.SEEK_NEXT_USING_HINT,
    filter.filterKeyValue(next));
  assertEquals("Didn't get END_KEY with no families to match", KeyValue.LOWESTKEY,
    filter.getNextCellHint(next));

  // check that we seek to the correct family that comes after our family
  byte[] laterFamily = Bytes.toBytes("zfamily");
  filter = new ApplyAndFilterDeletesFilter(asSet(laterFamily));
  assertEquals(ReturnCode.SKIP, filter.filterKeyValue(kv));
  KeyValue expected = KeyValueUtil.createFirstOnRow(CellUtil.cloneRow(kv), laterFamily, new byte[0]);
  assertEquals("Didn't get a hint from a family delete", ReturnCode.SEEK_NEXT_USING_HINT,
    filter.filterKeyValue(next));
  assertEquals("Didn't get correct next key with a next family", expected,
    filter.getNextCellHint(next));
}
 
private void copyAcl(String origCubeId, String newCubeId, String projectName) throws Exception {
    String projectResPath = ProjectInstance.concatResourcePath(projectName);
    Serializer<ProjectInstance> projectSerializer = new JsonSerializer<ProjectInstance>(ProjectInstance.class);
    ProjectInstance project = store.getResource(projectResPath, projectSerializer);
    String projUUID = project.getUuid();
    Table aclHtable = null;
    try {
        aclHtable = HBaseConnection.get(kylinConfig.getStorageUrl()).getTable(TableName.valueOf(kylinConfig.getMetadataUrlPrefix() + "_acl"));

        // cube acl
        Result result = aclHtable.get(new Get(Bytes.toBytes(origCubeId)));
        if (result.listCells() != null) {
            for (Cell cell : result.listCells()) {
                byte[] family = CellUtil.cloneFamily(cell);
                byte[] column = CellUtil.cloneQualifier(cell);
                byte[] value = CellUtil.cloneValue(cell);

                // use the target project uuid as the parent
                if (Bytes.toString(family).equals(ACL_INFO_FAMILY) && Bytes.toString(column).equals(ACL_INFO_FAMILY_PARENT_COLUMN)) {
                    String valueString = "{\"id\":\"" + projUUID + "\",\"type\":\"org.apache.kylin.metadata.project.ProjectInstance\"}";
                    value = Bytes.toBytes(valueString);
                }
                Put put = new Put(Bytes.toBytes(newCubeId));
                put.add(family, column, value);
                aclHtable.put(put);
            }
        }
    } finally {
        IOUtils.closeQuietly(aclHtable);
    }
}
 
源代码25 项目: hbase   文件: MobTestUtil.java
/**
 * Compare two Cells only for their row family qualifier value
 */
public static void assertCellEquals(Cell firstKeyValue, Cell secondKeyValue) {
  Assert.assertArrayEquals(CellUtil.cloneRow(firstKeyValue),
      CellUtil.cloneRow(secondKeyValue));
  Assert.assertArrayEquals(CellUtil.cloneFamily(firstKeyValue),
      CellUtil.cloneFamily(secondKeyValue));
  Assert.assertArrayEquals(CellUtil.cloneQualifier(firstKeyValue),
      CellUtil.cloneQualifier(secondKeyValue));
  Assert.assertArrayEquals(CellUtil.cloneValue(firstKeyValue),
      CellUtil.cloneValue(secondKeyValue));
}
 
源代码26 项目: hugegraph   文件: HbaseTable.java
protected void parseRowColumns(Result row, BackendEntry entry, Query query)
                               throws IOException {
    CellScanner cellScanner = row.cellScanner();
    while (cellScanner.advance()) {
        Cell cell = cellScanner.current();
        entry.columns(BackendColumn.of(CellUtil.cloneQualifier(cell),
                                       CellUtil.cloneValue(cell)));
    }
}
 
源代码27 项目: java-study   文件: HBaseUtil.java
/**
 * 查询该表中的所有数据
 * 
 * @param tableName
 *            表名
 */
public static void select(String tableName) {
	if(null==tableName||tableName.length()==0){
		return;
	}
	Table t = null;
	List<Map<String,Object>> list=new ArrayList<Map<String,Object>>();
	try {
		t = getConnection().getTable(TableName.valueOf(tableName));
		// 读取操作
		Scan scan = new Scan();
		// 得到扫描的结果集
		ResultScanner rs = t.getScanner(scan);
		if (null == rs ) {
			return;
		}
		for (Result result : rs) {
			// 得到单元格集合
			List<Cell> cs = result.listCells();
			if (null == cs || cs.size() == 0) {
				continue;
			}
			for (Cell cell : cs) {
				Map<String,Object> map=new HashMap<String, Object>();
				map.put("rowKey", Bytes.toString(CellUtil.cloneRow(cell)));// 取行健
				map.put("timestamp", cell.getTimestamp());// 取到时间戳
				map.put("family", Bytes.toString(CellUtil.cloneFamily(cell)));// 取到列族
				map.put("qualifier", Bytes.toString(CellUtil.cloneQualifier(cell)));// 取到列
				map.put("value", Bytes.toString(CellUtil.cloneValue(cell)));// 取到值
				list.add(map);
			}
		}
		System.out.println("查询的数据:"+list);
	} catch (IOException e) {
		System.out.println("查询失败!");
		e.printStackTrace();
	} finally {
		close();
	}
}
 
源代码28 项目: phoenix   文件: IndexMaintainer.java
private boolean hasIndexedColumnChanged(ValueGetter oldState, Collection<? extends Cell> pendingUpdates, long ts) throws IOException {
    if (pendingUpdates.isEmpty()) {
        return false;
    }
    Map<ColumnReference,Cell> newState = Maps.newHashMapWithExpectedSize(pendingUpdates.size()); 
    for (Cell kv : pendingUpdates) {
        newState.put(new ColumnReference(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv)), kv);
    }
    for (ColumnReference ref : indexedColumns) {
    	Cell newValue = newState.get(ref);
    	if (newValue != null) { // Indexed column has potentially changed
    	    ImmutableBytesWritable oldValue = oldState.getLatestValue(ref, ts);
            boolean newValueSetAsNull = (newValue.getTypeByte() == Type.DeleteColumn.getCode() || newValue.getTypeByte() == Type.Delete.getCode() || CellUtil.matchingValue(newValue, HConstants.EMPTY_BYTE_ARRAY));
    		boolean oldValueSetAsNull = oldValue == null || oldValue.getLength() == 0;
    		//If the new column value has to be set as null and the older value is null too,
    		//then just skip to the next indexed column.
    		if (newValueSetAsNull && oldValueSetAsNull) {
    			continue;
    		}
    		if (oldValueSetAsNull || newValueSetAsNull) {
    			return true;
    		}
    		// If the old value is different than the new value, the index row needs to be deleted
    		if (Bytes.compareTo(oldValue.get(), oldValue.getOffset(), oldValue.getLength(), 
    				newValue.getValueArray(), newValue.getValueOffset(), newValue.getValueLength()) != 0) {
    			return true;
    		}
    	}
    }
    return false;
}
 
源代码29 项目: hgraphdb   文件: VertexReader.java
@Override
public void load(Vertex vertex, Result result) {
    if (result.isEmpty()) {
        throw new HBaseGraphNotFoundException(vertex, "Vertex does not exist: " + vertex.id());
    }
    String label = null;
    Long createdAt = null;
    Long updatedAt = null;
    Map<String, byte[]> rawProps = new HashMap<>();
    for (Cell cell : result.listCells()) {
        String key = Bytes.toString(CellUtil.cloneQualifier(cell));
        if (!Graph.Hidden.isHidden(key)) {
            rawProps.put(key, CellUtil.cloneValue(cell));
        } else if (key.equals(Constants.LABEL)) {
            label = ValueUtils.deserialize(CellUtil.cloneValue(cell));
        } else if (key.equals(Constants.CREATED_AT)) {
            createdAt = ValueUtils.deserialize(CellUtil.cloneValue(cell));
        } else if (key.equals(Constants.UPDATED_AT)) {
            updatedAt = ValueUtils.deserialize(CellUtil.cloneValue(cell));
        }
    }
    final String labelStr = label;
    Map<String, Object> props = rawProps.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey,
            e -> ValueUtils.deserializePropertyValue(graph, ElementType.VERTEX, labelStr, e.getKey(), e.getValue())));
    HBaseVertex newVertex = new HBaseVertex(graph, vertex.id(), label, createdAt, updatedAt, props);
    ((HBaseVertex) vertex).copyFrom(newVertex);
}
 
源代码30 项目: phoenix-omid   文件: SnapshotFilterImpl.java
private void healShadowCell(Cell cell, long commitTimestamp) {
    Put put = new Put(CellUtil.cloneRow(cell));
    byte[] family = CellUtil.cloneFamily(cell);
    byte[] shadowCellQualifier = CellUtils.addShadowCellSuffixPrefix(cell.getQualifierArray(),
                                                               cell.getQualifierOffset(),
                                                               cell.getQualifierLength());
    put.addColumn(family, shadowCellQualifier, cell.getTimestamp(), Bytes.toBytes(commitTimestamp));
    try {
        tableAccessWrapper.put(put);
    } catch (IOException e) {
        LOG.warn("Failed healing shadow cell for kv {}", cell, e);
    }
}
 
 类所在包
 同包方法