org.apache.hadoop.hbase.Cell#getFamilyLength ( )源码实例Demo

下面列出了org.apache.hadoop.hbase.Cell#getFamilyLength ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: Halyard   文件: HalyardBulkDelete.java
@Override
protected void map(ImmutableBytesWritable key, Result value, Context output) throws IOException, InterruptedException {
    for (Cell c : value.rawCells()) {
        Statement st = HalyardTableUtils.parseStatement(c, SVF);
        if ((subj == null || subj.equals(st.getSubject())) && (pred == null || pred.equals(st.getPredicate())) && (obj == null || obj.equals(st.getObject())) && (ctx == null || ctx.contains(st.getContext()))) {
            KeyValue kv = new KeyValue(c.getRowArray(), c.getRowOffset(), (int) c.getRowLength(),
                c.getFamilyArray(), c.getFamilyOffset(), (int) c.getFamilyLength(),
                c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength(),
                c.getTimestamp(), KeyValue.Type.DeleteColumn, c.getValueArray(), c.getValueOffset(),
                c.getValueLength(), c.getTagsArray(), c.getTagsOffset(), c.getTagsLength());
            output.write(new ImmutableBytesWritable(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()), kv);
            deleted++;
        } else {
            output.progress();
        }
        if (total++ % 10000l == 0) {
            String msg = MessageFormat.format("{0} / {1} cells deleted", deleted, total);
            output.setStatus(msg);
            LOG.log(Level.INFO, msg);
        }
    }

}
 
源代码2 项目: phoenix   文件: IndexMaintainer.java
public ValueGetter createGetterFromKeyValues(final byte[] rowKey, Collection<? extends Cell> pendingUpdates) {
    final Map<ReferencingColumn, ImmutableBytesPtr> valueMap = Maps.newHashMapWithExpectedSize(pendingUpdates
            .size());
    for (Cell kv : pendingUpdates) {
        // create new pointers to each part of the kv
        ImmutableBytesPtr family = new ImmutableBytesPtr(kv.getRowArray(),kv.getFamilyOffset(),kv.getFamilyLength());
        ImmutableBytesPtr qual = new ImmutableBytesPtr(kv.getRowArray(), kv.getQualifierOffset(), kv.getQualifierLength());
        ImmutableBytesPtr value = new ImmutableBytesPtr(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
        valueMap.put(new ReferencingColumn(family, qual), value);
    }
    return new ValueGetter() {
        @Override
        public ImmutableBytesPtr getLatestValue(ColumnReference ref) {
            if(ref.equals(dataEmptyKeyValueRef)) return null;
            return valueMap.get(ReferencingColumn.wrap(ref));
        }
        @Override
        public byte[] getRowKey() {
        	return rowKey;
        }
    };
}
 
源代码3 项目: hbase   文件: StoreFileReader.java
/**
 * A method for checking Bloom filters. Called directly from
 * StoreFileScanner in case of a multi-column query.
 *
 * @param cell
 *          the cell to check if present in BloomFilter
 * @return True if passes
 */
public boolean passesGeneralRowColBloomFilter(Cell cell) {
  BloomFilter bloomFilter = this.generalBloomFilter;
  if (bloomFilter == null) {
    return true;
  }
  // Used in ROW_COL bloom
  Cell kvKey = null;
  // Already if the incoming key is a fake rowcol key then use it as it is
  if (cell.getTypeByte() == KeyValue.Type.Maximum.getCode() && cell.getFamilyLength() == 0) {
    kvKey = cell;
  } else {
    kvKey = PrivateCellUtil.createFirstOnRowCol(cell);
  }
  return checkGeneralBloomFilter(null, kvKey, bloomFilter);
}
 
源代码4 项目: hbase   文件: HashTable.java
public void hashResult(Result result) {
  if (!batchStarted) {
    throw new RuntimeException("Cannot add to batch that has not been started.");
  }
  for (Cell cell : result.rawCells()) {
    int rowLength = cell.getRowLength();
    int familyLength = cell.getFamilyLength();
    int qualifierLength = cell.getQualifierLength();
    int valueLength = cell.getValueLength();
    digest.update(cell.getRowArray(), cell.getRowOffset(), rowLength);
    digest.update(cell.getFamilyArray(), cell.getFamilyOffset(), familyLength);
    digest.update(cell.getQualifierArray(), cell.getQualifierOffset(), qualifierLength);

    if (!ignoreTimestamps) {
      long ts = cell.getTimestamp();
      for (int i = 8; i > 0; i--) {
        digest.update((byte) ts);
        ts >>>= 8;
      }
    }
    digest.update(cell.getValueArray(), cell.getValueOffset(), valueLength);

    batchSize += rowLength + familyLength + qualifierLength + 8 + valueLength;
  }
}
 
源代码5 项目: phoenix   文件: MetaDataUtil.java
public static void mutatePutValue(Put somePut, byte[] family, byte[] qualifier, byte[] newValue) {
    NavigableMap<byte[], List<Cell>> familyCellMap = somePut.getFamilyCellMap();
    List<Cell> cells = familyCellMap.get(family);
    List<Cell> newCells = Lists.newArrayList();
    if (cells != null) {
        for (Cell cell : cells) {
            if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
                qualifier, 0, qualifier.length) == 0) {
                Cell replacementCell = new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
                    cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(),
                    cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(),
                    KeyValue.Type.codeToType(cell.getType().getCode()), newValue, 0, newValue.length);
                newCells.add(replacementCell);
            } else {
                newCells.add(cell);
            }
        }
        familyCellMap.put(family, newCells);
    }
}
 
源代码6 项目: phoenix-omid   文件: CellUtils.java
/**
 * Returns a new shadow cell created from a particular cell.
 * @param cell
 *            the cell to reconstruct the shadow cell from.
 * @param shadowCellValue
 *            the value for the new shadow cell created
 * @return the brand-new shadow cell
 */
public static Cell buildShadowCellFromCell(Cell cell, byte[] shadowCellValue) {
    byte[] shadowCellQualifier = addShadowCellSuffixPrefix(cell.getQualifierArray(),
            cell.getQualifierOffset(),
            cell.getQualifierLength());
    return new KeyValue(
            cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
            cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
            shadowCellQualifier, 0, shadowCellQualifier.length,
            cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
            shadowCellValue, 0, shadowCellValue.length);
}
 
源代码7 项目: hbase   文件: CodecPerformance.java
static int getRoughSize(final Cell [] cells) {
  int size = 0;
  for (Cell c: cells) {
    size += c.getRowLength() + c.getFamilyLength() + c.getQualifierLength() + c.getValueLength();
    size += Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE;
  }
  return size;
}
 
源代码8 项目: hbase   文件: BufferedDataBlockEncoder.java
private int compareTypeBytes(Cell key, Cell right) {
  if (key.getFamilyLength() + key.getQualifierLength() == 0
      && key.getTypeByte() == Type.Minimum.getCode()) {
    // left is "bigger", i.e. it appears later in the sorted order
    return 1;
  }
  if (right.getFamilyLength() + right.getQualifierLength() == 0
      && right.getTypeByte() == Type.Minimum.getCode()) {
    return -1;
  }
  return 0;
}
 
源代码9 项目: phoenix   文件: LocalIndexStoreFileScanner.java
private Cell getChangedKey(Cell next, boolean changeBottomKeys) {
    // If it is a top store file change the StartKey with SplitKey in Key
    //and produce the new value corresponding to the change in key
    byte[] changedKey = getNewRowkeyByRegionStartKeyReplacedWithSplitKey(next, changeBottomKeys);
    KeyValue changedKv =
            new KeyValue(changedKey, 0, changedKey.length, next.getFamilyArray(),
                next.getFamilyOffset(), next.getFamilyLength(), next.getQualifierArray(),
                next.getQualifierOffset(), next.getQualifierLength(),
                next.getTimestamp(), Type.codeToType(next.getTypeByte()),
                next.getValueArray(), next.getValueOffset(), next.getValueLength(),
                next.getTagsArray(), next.getTagsOffset(), next.getTagsLength());
    return changedKv;
}
 
源代码10 项目: hbase-operator-tools   文件: TableReporter.java
/**
 * @return Sum of all elements that make up a key; does not include infrastructure, tags or
 *         values.
 */
private static int getSumOfCellKeyElementLengths(final Cell cell) {
  return cell.getRowLength() + cell.getFamilyLength() + cell.getQualifierLength()
      + KeyValue.TIMESTAMP_TYPE_SIZE;
}
 
private ImmutableBytesWritable createImmutableBytesWritable(Cell v) {
    return new ImmutableBytesWritable(v.getFamilyArray(),
            v.getFamilyOffset(),v.getFamilyLength());
}
 
源代码12 项目: hbase   文件: SecureWALCellCodec.java
@Override
public void write(Cell cell) throws IOException {
  if (encryptor == null) {
    super.write(cell);
    return;
  }

  byte[] iv = nextIv();
  encryptor.setIv(iv);
  encryptor.reset();

  // TODO: Check if this is a cell for an encrypted CF. If not, we can
  // write a 0 here to signal an unwrapped cell and just dump the KV bytes
  // afterward

  StreamUtils.writeRawVInt32(out, iv.length);
  out.write(iv);

  // TODO: Add support for WAL compression

  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  OutputStream cout = encryptor.createEncryptionStream(baos);
  ByteBufferWriterOutputStream bos = new ByteBufferWriterOutputStream(cout);
  int tlen = cell.getTagsLength();
  // Write the KeyValue infrastructure as VInts.
  StreamUtils.writeRawVInt32(bos, KeyValueUtil.keyLength(cell));
  StreamUtils.writeRawVInt32(bos, cell.getValueLength());
  // To support tags
  StreamUtils.writeRawVInt32(bos, tlen);

  // Write row, qualifier, and family
  short rowLength = cell.getRowLength();
  StreamUtils.writeRawVInt32(bos, rowLength);
  PrivateCellUtil.writeRow(bos, cell, rowLength);
  byte familyLength = cell.getFamilyLength();
  StreamUtils.writeRawVInt32(bos, familyLength);
  PrivateCellUtil.writeFamily(bos, cell, familyLength);
  int qualifierLength = cell.getQualifierLength();
  StreamUtils.writeRawVInt32(bos, qualifierLength);
  PrivateCellUtil.writeQualifier(bos, cell, qualifierLength);
  // Write the rest ie. ts, type, value and tags parts
  StreamUtils.writeLong(bos, cell.getTimestamp());
  bos.write(cell.getTypeByte());
  PrivateCellUtil.writeValue(bos, cell, cell.getValueLength());
  if (tlen > 0) {
    PrivateCellUtil.writeTags(bos, cell, tlen);
  }
  bos.close();

  StreamUtils.writeRawVInt32(out, baos.size());
  baos.writeTo(out);

  // Increment IV given the final payload length
  incrementIv(baos.size());
}
 
源代码13 项目: hbase   文件: StoreFileScanner.java
/**
 * Pretend we have done a seek but don't do it yet, if possible. The hope is
 * that we find requested columns in more recent files and won't have to seek
 * in older files. Creates a fake key/value with the given row/column and the
 * highest (most recent) possible timestamp we might get from this file. When
 * users of such "lazy scanner" need to know the next KV precisely (e.g. when
 * this scanner is at the top of the heap), they run {@link #enforceSeek()}.
 * <p>
 * Note that this function does guarantee that the current KV of this scanner
 * will be advanced to at least the given KV. Because of this, it does have
 * to do a real seek in cases when the seek timestamp is older than the
 * highest timestamp of the file, e.g. when we are trying to seek to the next
 * row/column and use OLDEST_TIMESTAMP in the seek key.
 */
@Override
public boolean requestSeek(Cell kv, boolean forward, boolean useBloom)
    throws IOException {
  if (kv.getFamilyLength() == 0) {
    useBloom = false;
  }

  boolean haveToSeek = true;
  if (useBloom) {
    // check ROWCOL Bloom filter first.
    if (reader.getBloomFilterType() == BloomType.ROWCOL) {
      haveToSeek = reader.passesGeneralRowColBloomFilter(kv);
    } else if (canOptimizeForNonNullColumn
        && ((PrivateCellUtil.isDeleteFamily(kv)
            || PrivateCellUtil.isDeleteFamilyVersion(kv)))) {
      // if there is no such delete family kv in the store file,
      // then no need to seek.
      haveToSeek = reader.passesDeleteFamilyBloomFilter(kv.getRowArray(), kv.getRowOffset(),
        kv.getRowLength());
    }
  }

  delayedReseek = forward;
  delayedSeekKV = kv;

  if (haveToSeek) {
    // This row/column might be in this store file (or we did not use the
    // Bloom filter), so we still need to seek.
    realSeekDone = false;
    long maxTimestampInFile = reader.getMaxTimestamp();
    long seekTimestamp = kv.getTimestamp();
    if (seekTimestamp > maxTimestampInFile) {
      // Create a fake key that is not greater than the real next key.
      // (Lower timestamps correspond to higher KVs.)
      // To understand this better, consider that we are asked to seek to
      // a higher timestamp than the max timestamp in this file. We know that
      // the next point when we have to consider this file again is when we
      // pass the max timestamp of this file (with the same row/column).
      setCurrentCell(PrivateCellUtil.createFirstOnRowColTS(kv, maxTimestampInFile));
    } else {
      // This will be the case e.g. when we need to seek to the next
      // row/column, and we don't know exactly what they are, so we set the
      // seek key's timestamp to OLDEST_TIMESTAMP to skip the rest of this
      // row/column.
      enforceSeek();
    }
    return cur != null;
  }

  // Multi-column Bloom filter optimization.
  // Create a fake key/value, so that this scanner only bubbles up to the top
  // of the KeyValueHeap in StoreScanner after we scanned this row/column in
  // all other store files. The query matcher will then just skip this fake
  // key/value and the store scanner will progress to the next column. This
  // is obviously not a "real real" seek, but unlike the fake KV earlier in
  // this method, we want this to be propagated to ScanQueryMatcher.
  setCurrentCell(PrivateCellUtil.createLastOnRowCol(kv));

  realSeekDone = true;
  return true;
}
 
源代码14 项目: spliceengine   文件: CellByteBufferArrayUtils.java
public static boolean matchingFamily(Cell keyValue, byte[] family) {
    return !(family == null || keyValue == null || family.length != keyValue.getFamilyLength()) &&
            ArrayUtil.equals(CellUtils.getBuffer(keyValue), keyValue.getFamilyOffset(), family, 0,
                             keyValue.getFamilyLength());
}
 
源代码15 项目: hbase   文件: DiffKeyDeltaEncoder.java
private int compressSingleKeyValue(DataOutputStream out, Cell cell, Cell prevCell)
    throws IOException {
  int flag = 0; // Do not use more bits that can fit into a byte
  int kLength = KeyValueUtil.keyLength(cell);
  int vLength = cell.getValueLength();

  long timestamp;
  long diffTimestamp = 0;
  int diffTimestampFitsInBytes = 0;
  int timestampFitsInBytes;
  int commonPrefix = 0;

  if (prevCell == null) {
    timestamp = cell.getTimestamp();
    if (timestamp < 0) {
      flag |= FLAG_TIMESTAMP_SIGN;
      timestamp = -timestamp;
    }
    timestampFitsInBytes = ByteBufferUtils.longFitsIn(timestamp);
    flag |= (timestampFitsInBytes - 1) << SHIFT_TIMESTAMP_LENGTH;
    // put column family
    byte familyLength = cell.getFamilyLength();
    out.write(familyLength);
    PrivateCellUtil.writeFamily(out, cell, familyLength);
  } else {
    // Finding common prefix
    int preKeyLength = KeyValueUtil.keyLength(prevCell);
    commonPrefix = PrivateCellUtil.findCommonPrefixInFlatKey(cell, prevCell, true, false);
    if (kLength == preKeyLength) {
      flag |= FLAG_SAME_KEY_LENGTH;
    }
    if (vLength == prevCell.getValueLength()) {
      flag |= FLAG_SAME_VALUE_LENGTH;
    }
    if (cell.getTypeByte() == prevCell.getTypeByte()) {
      flag |= FLAG_SAME_TYPE;
    }
    // don't compress timestamp and type using prefix encode timestamp
    timestamp = cell.getTimestamp();
    diffTimestamp = prevCell.getTimestamp() - timestamp;
    boolean negativeTimestamp = timestamp < 0;
    if (negativeTimestamp) {
      timestamp = -timestamp;
    }
    timestampFitsInBytes = ByteBufferUtils.longFitsIn(timestamp);
    boolean minusDiffTimestamp = diffTimestamp < 0;
    if (minusDiffTimestamp) {
      diffTimestamp = -diffTimestamp;
    }
    diffTimestampFitsInBytes = ByteBufferUtils.longFitsIn(diffTimestamp);
    if (diffTimestampFitsInBytes < timestampFitsInBytes) {
      flag |= (diffTimestampFitsInBytes - 1) << SHIFT_TIMESTAMP_LENGTH;
      flag |= FLAG_TIMESTAMP_IS_DIFF;
      if (minusDiffTimestamp) {
        flag |= FLAG_TIMESTAMP_SIGN;
      }
    } else {
      flag |= (timestampFitsInBytes - 1) << SHIFT_TIMESTAMP_LENGTH;
      if (negativeTimestamp) {
        flag |= FLAG_TIMESTAMP_SIGN;
      }
    }
  }
  out.write(flag);
  if ((flag & FLAG_SAME_KEY_LENGTH) == 0) {
    ByteBufferUtils.putCompressedInt(out, kLength);
  }
  if ((flag & FLAG_SAME_VALUE_LENGTH) == 0) {
    ByteBufferUtils.putCompressedInt(out, vLength);
  }
  ByteBufferUtils.putCompressedInt(out, commonPrefix);
  short rLen = cell.getRowLength();
  if (commonPrefix < rLen + KeyValue.ROW_LENGTH_SIZE) {
    // Previous and current rows are different. Copy the differing part of
    // the row, skip the column family, and copy the qualifier.
    PrivateCellUtil.writeRowKeyExcludingCommon(cell, rLen, commonPrefix, out);
    PrivateCellUtil.writeQualifier(out, cell, cell.getQualifierLength());
  } else {
    // The common part includes the whole row. As the column family is the
    // same across the whole file, it will automatically be included in the
    // common prefix, so we need not special-case it here.
    // What we write here is the non common part of the qualifier
    int commonQualPrefix = commonPrefix - (rLen + KeyValue.ROW_LENGTH_SIZE)
        - (cell.getFamilyLength() + KeyValue.FAMILY_LENGTH_SIZE);
    PrivateCellUtil.writeQualifierSkippingBytes(out, cell, cell.getQualifierLength(),
      commonQualPrefix);
  }
  if ((flag & FLAG_TIMESTAMP_IS_DIFF) == 0) {
    ByteBufferUtils.putLong(out, timestamp, timestampFitsInBytes);
  } else {
    ByteBufferUtils.putLong(out, diffTimestamp, diffTimestampFitsInBytes);
  }

  if ((flag & FLAG_SAME_TYPE) == 0) {
    out.write(cell.getTypeByte());
  }
  PrivateCellUtil.writeValue(out, cell, vLength);
  return kLength + vLength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
}
 
源代码16 项目: hbase   文件: FastDiffDeltaEncoder.java
private int compressSingleKeyValue(DataOutputStream out, Cell cell, Cell prevCell)
    throws IOException {
  int flag = 0; // Do not use more bits than will fit into a byte
  int kLength = KeyValueUtil.keyLength(cell);
  int vLength = cell.getValueLength();

  if (prevCell == null) {
    // copy the key, there is no common prefix with none
    out.write(flag);
    ByteBufferUtils.putCompressedInt(out, kLength);
    ByteBufferUtils.putCompressedInt(out, vLength);
    ByteBufferUtils.putCompressedInt(out, 0);
    PrivateCellUtil.writeFlatKey(cell, (DataOutput)out);
    // Write the value part
    PrivateCellUtil.writeValue(out, cell, cell.getValueLength());
  } else {
    int preKeyLength = KeyValueUtil.keyLength(prevCell);
    int preValLength = prevCell.getValueLength();
    // find a common prefix and skip it
    int commonPrefix = PrivateCellUtil.findCommonPrefixInFlatKey(cell, prevCell, true, false);

    if (kLength == preKeyLength) {
      flag |= FLAG_SAME_KEY_LENGTH;
    }
    if (vLength == prevCell.getValueLength()) {
      flag |= FLAG_SAME_VALUE_LENGTH;
    }
    if (cell.getTypeByte() == prevCell.getTypeByte()) {
      flag |= FLAG_SAME_TYPE;
    }

    byte[] curTsBuf = Bytes.toBytes(cell.getTimestamp());
    int commonTimestampPrefix = findCommonTimestampPrefix(curTsBuf,
        Bytes.toBytes(prevCell.getTimestamp()));

    flag |= commonTimestampPrefix << SHIFT_TIMESTAMP_LENGTH;

    // Check if current and previous values are the same. Compare value
    // length first as an optimization.
    if (vLength == preValLength
        && PrivateCellUtil.matchingValue(cell, prevCell, vLength, preValLength)) {
      flag |= FLAG_SAME_VALUE;
    }

    out.write(flag);
    if ((flag & FLAG_SAME_KEY_LENGTH) == 0) {
      ByteBufferUtils.putCompressedInt(out, kLength);
    }
    if ((flag & FLAG_SAME_VALUE_LENGTH) == 0) {
      ByteBufferUtils.putCompressedInt(out, vLength);
    }
    ByteBufferUtils.putCompressedInt(out, commonPrefix);
    short rLen = cell.getRowLength();
    if (commonPrefix < rLen + KeyValue.ROW_LENGTH_SIZE) {
      // Previous and current rows are different. Copy the differing part of
      // the row, skip the column family, and copy the qualifier.
      PrivateCellUtil.writeRowKeyExcludingCommon(cell, rLen, commonPrefix, out);
      PrivateCellUtil.writeQualifier(out, cell, cell.getQualifierLength());
    } else {
      // The common part includes the whole row. As the column family is the
      // same across the whole file, it will automatically be included in the
      // common prefix, so we need not special-case it here.
      // What we write here is the non common part of the qualifier
      int commonQualPrefix = commonPrefix - (rLen + KeyValue.ROW_LENGTH_SIZE)
          - (cell.getFamilyLength() + KeyValue.FAMILY_LENGTH_SIZE);
      PrivateCellUtil.writeQualifierSkippingBytes(out, cell, cell.getQualifierLength(),
        commonQualPrefix);
    }
    // Write non common ts part
    out.write(curTsBuf, commonTimestampPrefix, KeyValue.TIMESTAMP_SIZE - commonTimestampPrefix);

    // Write the type if it is not the same as before.
    if ((flag & FLAG_SAME_TYPE) == 0) {
      out.write(cell.getTypeByte());
    }

    // Write the value if it is not the same as before.
    if ((flag & FLAG_SAME_VALUE) == 0) {
      PrivateCellUtil.writeValue(out, cell, vLength);
    }
  }
  return kLength + vLength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
}
 
源代码17 项目: hbase   文件: PrefixKeyDeltaEncoder.java
private void writeKeyExcludingCommon(Cell cell, int commonPrefix, DataOutputStream out)
    throws IOException {
  short rLen = cell.getRowLength();
  if (commonPrefix < rLen + KeyValue.ROW_LENGTH_SIZE) {
    // Previous and current rows are different. Need to write the differing part followed by
    // cf,q,ts and type
    PrivateCellUtil.writeRowKeyExcludingCommon(cell, rLen, commonPrefix, out);
    byte fLen = cell.getFamilyLength();
    out.writeByte(fLen);
    PrivateCellUtil.writeFamily(out, cell, fLen);
    PrivateCellUtil.writeQualifier(out, cell, cell.getQualifierLength());
    out.writeLong(cell.getTimestamp());
    out.writeByte(cell.getTypeByte());
  } else {
    // The full row key part is common. CF part will be common for sure as we deal with Cells in
    // same family. Just need write the differing part in q, ts and type
    commonPrefix = commonPrefix - (rLen + KeyValue.ROW_LENGTH_SIZE)
        - (cell.getFamilyLength() + KeyValue.FAMILY_LENGTH_SIZE);
    int qLen = cell.getQualifierLength();
    int commonQualPrefix = Math.min(commonPrefix, qLen);
    int qualPartLenToWrite = qLen - commonQualPrefix;
    if (qualPartLenToWrite > 0) {
      PrivateCellUtil.writeQualifierSkippingBytes(out, cell, qLen, commonQualPrefix);
    }
    commonPrefix -= commonQualPrefix;
    // Common part in TS also?
    if (commonPrefix > 0) {
      int commonTimestampPrefix = Math.min(commonPrefix, KeyValue.TIMESTAMP_SIZE);
      if (commonTimestampPrefix < KeyValue.TIMESTAMP_SIZE) {
        byte[] curTsBuf = Bytes.toBytes(cell.getTimestamp());
        out.write(curTsBuf, commonTimestampPrefix, KeyValue.TIMESTAMP_SIZE
            - commonTimestampPrefix);
      }
      commonPrefix -= commonTimestampPrefix;
      if (commonPrefix == 0) {
        out.writeByte(cell.getTypeByte());
      }
    } else {
      out.writeLong(cell.getTimestamp());
      out.writeByte(cell.getTypeByte());
    }
  }
}
 
源代码18 项目: spliceengine   文件: CellUtils.java
public static Cell newKeyValue(Cell keyValue, byte[] value) {
    return new KeyValue(getBuffer(keyValue), keyValue.getRowOffset(), keyValue.getRowLength(),
                        getBuffer(keyValue), keyValue.getFamilyOffset(), keyValue.getFamilyLength(),
                        getBuffer(keyValue), keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
                        keyValue.getTimestamp(), KeyValue.Type.Put, value, 0, value == null ? 0 : value.length);
}
 
源代码19 项目: phoenix   文件: DefaultStatisticsCollector.java
/**
 * Update the current statistics based on the latest batch of key-values from the underlying scanner
 * 
 * @param results
 *            next batch of {@link KeyValue}s
 * @throws IOException 
 */
@Override
public void collectStatistics(final List<Cell> results) {
    // A guide posts depth of zero disables the collection of stats
    if (guidePostDepth == 0 || results.size() == 0) {
        return;
    }
    Map<ImmutableBytesPtr, Boolean> famMap = Maps.newHashMap();
    boolean incrementRow = false;
    Cell c = results.get(0);
    ImmutableBytesWritable row = new ImmutableBytesWritable(c.getRowArray(), c.getRowOffset(), c.getRowLength());
    /*
     * During compaction, it is possible that HBase will not return all the key values when
     * internalScanner.next() is called. So we need the below check to avoid counting a row more
     * than once.
     */
    if (currentRow == null || !row.equals(currentRow)) {
        currentRow = row;
        incrementRow = true;
    }
    for (Cell cell : results) {
        maxTimeStamp = Math.max(maxTimeStamp, cell.getTimestamp());
        Pair<Long, GuidePostsInfoBuilder> gps;
        if (cachedGuidePosts == null) {
            ImmutableBytesPtr cfKey = new ImmutableBytesPtr(cell.getFamilyArray(), cell.getFamilyOffset(),
                    cell.getFamilyLength());
            gps = guidePostsInfoWriterMap.get(cfKey);
            if (gps == null) {
                gps = new Pair<Long, GuidePostsInfoBuilder>(0l,
                        new GuidePostsInfoBuilder());
                guidePostsInfoWriterMap.put(cfKey, gps);
            }
            if (famMap.get(cfKey) == null) {
                famMap.put(cfKey, true);
                gps.getSecond().incrementRowCount();
            }
        } else {
            gps = cachedGuidePosts;
            if (incrementRow) {
                cachedGuidePosts.getSecond().incrementRowCount();
                incrementRow = false;
            }
        }
        int kvLength = KeyValueUtil.getSerializedSize(cell, true);
        long byteCount = gps.getFirst() + kvLength;
        gps.setFirst(byteCount);
        if (byteCount >= guidePostDepth) {
            if (gps.getSecond().addGuidePostOnCollection(row, byteCount, gps.getSecond().getRowCount())) {
                gps.setFirst(0l);
                gps.getSecond().resetRowCount();
            }
        }
    }
}
 
源代码20 项目: geowave   文件: MergingServerOp.java
protected Cell mergeList(final List<Cell> cells) {
  synchronized (MUTEX) {
    Mergeable currentMergeable = null;
    final Cell firstCell = cells.get(0);
    for (final Cell cell : cells) {
      final Mergeable mergeable =
          getMergeable(
              cell,
              // TODO consider avoiding extra byte array
              // allocations (which would require
              // persistence utils to be able to use
              // bytebuffer instead of byte[])
              CellUtil.cloneValue(cell));
      if (mergeable != null) {
        if (currentMergeable == null) {
          currentMergeable = mergeable;
        } else {
          currentMergeable.merge(mergeable);
        }
      }
    }
    final byte[] valueBinary = getBinary(currentMergeable);
    // this is basically a lengthy verbose form of cloning
    // in-place (without allocating new byte arrays) and
    // simply replacing the value with the new mergeable
    // value
    return new KeyValue(
        firstCell.getRowArray(),
        firstCell.getRowOffset(),
        firstCell.getRowLength(),
        firstCell.getFamilyArray(),
        firstCell.getFamilyOffset(),
        firstCell.getFamilyLength(),
        firstCell.getQualifierArray(),
        firstCell.getQualifierOffset(),
        firstCell.getQualifierLength(),
        firstCell.getTimestamp(),
        Type.codeToType(firstCell.getTypeByte()),
        valueBinary,
        0,
        valueBinary.length,
        firstCell.getTagsArray(),
        firstCell.getTagsOffset(),
        firstCell.getTagsLength());
  }
}