下面列出了org.apache.hadoop.hbase.client.Delete#getRow ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
Durability durability) throws IOException {
// Translate deletes into our own delete tombstones
// Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
// us to rollback the changes (by a real delete) if the transaction fails
// Deletes that are part of a transaction rollback do not need special handling.
// They will never be rolled back, so are performed as normal HBase deletes.
if (isRollbackOperation(delete)) {
return;
}
Transaction tx = getFromOperation(delete);
ensureValidTxLifetime(e.getEnvironment(), delete, tx);
// Other deletes are client-initiated and need to be translated into our own tombstones
// TODO: this should delegate to the DeleteStrategy implementation.
Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
for (byte[] family : delete.getFamilyCellMap().keySet()) {
List<Cell> familyCells = delete.getFamilyCellMap().get(family);
if (isFamilyDelete(familyCells)) {
deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
} else {
for (Cell cell : familyCells) {
deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
}
}
}
for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
}
e.getEnvironment().getRegion().put(deleteMarkers);
// skip normal delete handling
e.bypass();
}
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
Durability durability) throws IOException {
// Translate deletes into our own delete tombstones
// Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
// us to rollback the changes (by a real delete) if the transaction fails
// Deletes that are part of a transaction rollback do not need special handling.
// They will never be rolled back, so are performed as normal HBase deletes.
if (isRollbackOperation(delete)) {
return;
}
Transaction tx = getFromOperation(delete);
ensureValidTxLifetime(e.getEnvironment(), delete, tx);
// Other deletes are client-initiated and need to be translated into our own tombstones
// TODO: this should delegate to the DeleteStrategy implementation.
Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
for (byte[] family : delete.getFamilyCellMap().keySet()) {
List<Cell> familyCells = delete.getFamilyCellMap().get(family);
if (isFamilyDelete(familyCells)) {
deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
} else {
for (Cell cell : familyCells) {
deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
}
}
}
for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
}
e.getEnvironment().getRegion().put(deleteMarkers);
// skip normal delete handling
e.bypass();
}
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
Durability durability) throws IOException {
// Translate deletes into our own delete tombstones
// Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
// us to rollback the changes (by a real delete) if the transaction fails
// Deletes that are part of a transaction rollback do not need special handling.
// They will never be rolled back, so are performed as normal HBase deletes.
if (isRollbackOperation(delete)) {
return;
}
Transaction tx = getFromOperation(delete);
ensureValidTxLifetime(e.getEnvironment(), delete, tx);
// Other deletes are client-initiated and need to be translated into our own tombstones
// TODO: this should delegate to the DeleteStrategy implementation.
Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
for (byte[] family : delete.getFamilyCellMap().keySet()) {
List<Cell> familyCells = delete.getFamilyCellMap().get(family);
if (isFamilyDelete(familyCells)) {
deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
} else {
for (Cell cell : familyCells) {
deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
}
}
}
for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
}
e.getEnvironment().getRegion().put(deleteMarkers);
// skip normal delete handling
e.bypass();
}
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete,
WALEdit edit, Durability durability) throws IOException {
// Translate deletes into our own delete tombstones
// Since HBase deletes cannot be undone, we need to translate deletes into special puts,
// which allows
// us to rollback the changes (by a real delete) if the transaction fails
// Deletes that are part of a transaction rollback do not need special handling.
// They will never be rolled back, so are performed as normal HBase deletes.
if (isRollbackOperation(delete)) {
return;
}
Transaction tx = getFromOperation(delete);
ensureValidTxLifetime(e.getEnvironment(), delete, tx);
// Other deletes are client-initiated and need to be translated into our own tombstones
// TODO: this should delegate to the DeleteStrategy implementation.
Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
for (byte[] family : delete.getFamilyCellMap().keySet()) {
List<Cell> familyCells = delete.getFamilyCellMap().get(family);
if (isFamilyDelete(familyCells)) {
deleteMarkers.addColumn(family, TxConstants.FAMILY_DELETE_QUALIFIER,
familyCells.get(0).getTimestamp(), HConstants.EMPTY_BYTE_ARRAY);
} else {
for (Cell cell : familyCells) {
deleteMarkers.addColumn(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
}
}
}
for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
}
e.getEnvironment().getRegion().put(deleteMarkers);
// skip normal delete handling
e.bypass();
}
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
Durability durability) throws IOException {
// Translate deletes into our own delete tombstones
// Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
// us to rollback the changes (by a real delete) if the transaction fails
// Deletes that are part of a transaction rollback do not need special handling.
// They will never be rolled back, so are performed as normal HBase deletes.
if (isRollbackOperation(delete)) {
return;
}
Transaction tx = getFromOperation(delete);
ensureValidTxLifetime(e.getEnvironment(), delete, tx);
// Other deletes are client-initiated and need to be translated into our own tombstones
// TODO: this should delegate to the DeleteStrategy implementation.
Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
for (byte[] family : delete.getFamilyCellMap().keySet()) {
List<Cell> familyCells = delete.getFamilyCellMap().get(family);
if (isFamilyDelete(familyCells)) {
deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
} else {
for (Cell cell : familyCells) {
deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
}
}
}
for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
}
e.getEnvironment().getRegion().put(deleteMarkers);
// skip normal delete handling
e.bypass();
}
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
Durability durability) throws IOException {
// Translate deletes into our own delete tombstones
// Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
// us to rollback the changes (by a real delete) if the transaction fails
// Deletes that are part of a transaction rollback do not need special handling.
// They will never be rolled back, so are performed as normal HBase deletes.
if (isRollbackOperation(delete)) {
return;
}
Transaction tx = getFromOperation(delete);
ensureValidTxLifetime(e.getEnvironment(), delete, tx);
// Other deletes are client-initiated and need to be translated into our own tombstones
// TODO: this should delegate to the DeleteStrategy implementation.
Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
for (byte[] family : delete.getFamilyCellMap().keySet()) {
List<Cell> familyCells = delete.getFamilyCellMap().get(family);
if (isFamilyDelete(familyCells)) {
deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
} else {
for (Cell cell : familyCells) {
deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
}
}
}
for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
}
e.getEnvironment().getRegion().put(deleteMarkers);
// skip normal delete handling
e.bypass();
}
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
Durability durability) throws IOException {
// Translate deletes into our own delete tombstones
// Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
// us to rollback the changes (by a real delete) if the transaction fails
// Deletes that are part of a transaction rollback do not need special handling.
// They will never be rolled back, so are performed as normal HBase deletes.
if (isRollbackOperation(delete)) {
return;
}
Transaction tx = getFromOperation(delete);
ensureValidTxLifetime(e.getEnvironment(), delete, tx);
// Other deletes are client-initiated and need to be translated into our own tombstones
// TODO: this should delegate to the DeleteStrategy implementation.
Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
for (byte[] family : delete.getFamilyCellMap().keySet()) {
List<Cell> familyCells = delete.getFamilyCellMap().get(family);
if (isFamilyDelete(familyCells)) {
deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
} else {
for (Cell cell : familyCells) {
deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
}
}
}
for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
}
e.getEnvironment().getRegion().put(deleteMarkers);
// skip normal delete handling
e.bypass();
}
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
Durability durability) throws IOException {
// Translate deletes into our own delete tombstones
// Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
// us to rollback the changes (by a real delete) if the transaction fails
// Deletes that are part of a transaction rollback do not need special handling.
// They will never be rolled back, so are performed as normal HBase deletes.
if (isRollbackOperation(delete)) {
return;
}
Transaction tx = getFromOperation(delete);
ensureValidTxLifetime(e.getEnvironment(), delete, tx);
// Other deletes are client-initiated and need to be translated into our own tombstones
// TODO: this should delegate to the DeleteStrategy implementation.
Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
for (byte[] family : delete.getFamilyCellMap().keySet()) {
List<Cell> familyCells = delete.getFamilyCellMap().get(family);
if (isFamilyDelete(familyCells)) {
deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
} else {
for (Cell cell : familyCells) {
deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
HConstants.EMPTY_BYTE_ARRAY);
}
}
}
for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
}
e.getEnvironment().getRegion().put(deleteMarkers);
// skip normal delete handling
e.bypass();
}
@Override
public boolean thenDelete(Delete delete) throws IOException {
preCheck();
RowMutations rowMutations = new RowMutations(delete.getRow());
rowMutations.add(delete);
return checkAndMutate(row, family, qualifier, op, value, rowMutations);
}
private Put deleteInternal(Transaction tx, Delete delete) throws IOException {
throwExceptionIfOpSetsTimerange(delete);
HBaseTransaction transaction = enforceHBaseTransactionAsParam(tx);
final long writeTimestamp = transaction.getWriteTimestamp();
boolean deleteFamily = false;
final Put deleteP = new Put(delete.getRow(), writeTimestamp);
final Get deleteG = new Get(delete.getRow());
propagateAttributes(delete, deleteP);
propagateAttributes(delete, deleteG);
Map<byte[], List<Cell>> fmap = delete.getFamilyCellMap();
if (fmap.isEmpty()) {
familyQualifierBasedDeletion(transaction, deleteP, deleteG);
}
for (List<Cell> cells : fmap.values()) {
for (Cell cell : cells) {
CellUtils.validateCell(cell, writeTimestamp);
switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
case DeleteColumn:
deleteP.addColumn(CellUtil.cloneFamily(cell),
CellUtil.cloneQualifier(cell),
writeTimestamp,
CellUtils.DELETE_TOMBSTONE);
addWriteSetElement(transaction,
new HBaseCellId(this,
delete.getRow(),
CellUtil.cloneFamily(cell),
CellUtil.cloneQualifier(cell),
writeTimestamp));
break;
case DeleteFamily:
deleteG.addFamily(CellUtil.cloneFamily(cell));
deleteFamily = true;
break;
case Delete:
if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP) {
deleteP.addColumn(CellUtil.cloneFamily(cell),
CellUtil.cloneQualifier(cell),
writeTimestamp,
CellUtils.DELETE_TOMBSTONE);
addWriteSetElement(transaction,
new HBaseCellId(this,
delete.getRow(),
CellUtil.cloneFamily(cell),
CellUtil.cloneQualifier(cell),
writeTimestamp));
break;
} else {
throw new UnsupportedOperationException(
"Cannot delete specific versions on Snapshot Isolation.");
}
default:
break;
}
}
}
if (deleteFamily) {
if (enforceHBaseTransactionManagerAsParam(transaction.getTransactionManager()).
getConflictDetectionLevel() == ConflictDetectionLevel.ROW) {
familyQualifierBasedDeletionWithOutRead(transaction, deleteP, deleteG);
} else {
familyQualifierBasedDeletion(transaction, deleteP, deleteG);
}
}
return deleteP;
}
@Override
public void postDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
Durability durability) throws IOException {
String tableName = e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
if (tableName.startsWith("hbase:")) { //Ԫ���ݱ�,����!
return;
}
String rowKey = new String(delete.getRow());
String cFamily = null;
String cQualifier = null;
NavigableMap<byte[], List<Cell>> map = delete.getFamilyCellMap();
JsonObject jsonSet = new JsonObject();
for (List<Cell> cells : map.values()) {
for (Cell cell : cells) {
cFamily = new String(CellUtil.cloneFamily(cell));
cQualifier = new String(CellUtil.cloneQualifier(cell));
if (cQualifier.endsWith("_s")) { //string
jsonSet.putObject(cFamily + F_SEPARATOR + cQualifier, (new JsonObject()).putString("set", null));
} else if (cQualifier.endsWith("_t")) { //text_general
jsonSet.putObject(cFamily + F_SEPARATOR + cQualifier, (new JsonObject()).putString("set", null));
} else if (cQualifier.endsWith("_dt")) { //date
jsonSet.putObject(cFamily + F_SEPARATOR + cQualifier, (new JsonObject()).putString("set", null));
} else if (cQualifier.endsWith("_i")) { //int
jsonSet.putObject(cFamily + F_SEPARATOR + cQualifier, (new JsonObject()).putString("set", null));
} else if (cQualifier.endsWith("_l")) { //long
jsonSet.putObject(cFamily + F_SEPARATOR + cQualifier, (new JsonObject()).putString("set", null));
} else if (cQualifier.endsWith("_f")) { //float
jsonSet.putObject(cFamily + F_SEPARATOR + cQualifier, (new JsonObject()).putString("set", null));
} else if (cQualifier.endsWith("_d")) { //double
jsonSet.putObject(cFamily + F_SEPARATOR + cQualifier, (new JsonObject()).putString("set", null));
} else if (cQualifier.endsWith("_b")) { //boolean
jsonSet.putObject(cFamily + F_SEPARATOR + cQualifier, (new JsonObject()).putString("set", null));
} else { //������Ҫ������,����!
continue;
}
}
}
if (jsonSet.size() == 0) { //˵��û��solr�ֶ�
if (delete.numFamilies() == e.getEnvironment().getRegion().getTableDesc().getFamilies().size()) { //˵����ɾ����
JsonObject jsonDel = new JsonObject();
jsonDel.putObject("delete", (new JsonObject()).putString("query", F_ID + ":\"" + tableName + F_SEPARATOR + rowKey + "\""));
log.debug("postDelete!!! Row:" + jsonDel.encode());
_bqDelete.enqueue(jsonDel.encode().getBytes(SolrTools.UTF_8));
} else { //˵������ɾ����
return;
}
} else {
jsonSet.putString(F_ID, tableName + F_SEPARATOR + rowKey);
jsonSet.putObject(F_UPDATETIME, (new JsonObject()).putString("set", SolrTools.solrDateFormat.format(new java.util.Date())));
log.debug("postDelete!!! Column:" + jsonSet.encode());
_bqUpdate.enqueue(jsonSet.encode().getBytes(SolrTools.UTF_8));
}
}
@Override
public Collection<Pair<Mutation, byte[]>> getIndexUpdate(Delete d) throws IOException {
// stores all the return values
IndexUpdateManager updateMap = new IndexUpdateManager();
// We have to figure out which kind of delete it is, since we need to do different things if its
// a general (row) delete, versus a delete of just a single column or family
Map<byte[], List<Cell>> families = d.getFamilyCellMap();
/*
* Option 1: its a row delete marker, so we just need to delete the most recent state for each
* group, as of the specified timestamp in the delete. This can happen if we have a single row
* update and it is part of a batch mutation (prepare doesn't happen until later... maybe a
* bug?). In a single delete, this delete gets all the column families appended, so the family
* map won't be empty by the time it gets here.
*/
if (families.size() == 0) {
LocalTableState state = new LocalTableState(env, localTable, d);
// get a consistent view of name
long now = d.getTimeStamp();
if (now == HConstants.LATEST_TIMESTAMP) {
now = EnvironmentEdgeManager.currentTimeMillis();
// update the delete's idea of 'now' to be consistent with the index
d.setTimestamp(now);
}
// get deletes from the codec
// we only need to get deletes and not add puts because this delete covers all columns
addDeleteUpdatesToMap(updateMap, state, now);
/*
* Update the current state for all the kvs in the delete. Generally, we would just iterate
* the family map, but since we go here, the family map is empty! Therefore, we need to fake a
* bunch of family deletes (just like hos HRegion#prepareDelete works). This is just needed
* for current version of HBase that has an issue where the batch update doesn't update the
* deletes before calling the hook.
*/
byte[] deleteRow = d.getRow();
for (byte[] family : this.env.getRegion().getTableDesc().getFamiliesKeys()) {
state.addPendingUpdates(new KeyValue(deleteRow, family, null, now,
KeyValue.Type.DeleteFamily));
}
} else {
// Option 2: Its actually a bunch single updates, which can have different timestamps.
// Therefore, we need to do something similar to the put case and batch by timestamp
batchMutationAndAddUpdates(updateMap, d);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Found index updates for Delete: " + d + "\n" + updateMap);
}
return updateMap.toMap();
}
void addCellToDelMutation(Delete del, byte[] family, byte[] column, long ts, KeyValue.Type type) throws Exception {
byte[] rowKey = del.getRow();
Cell cell = CellUtil.createCell(rowKey, family, column, ts, type.getCode(), null);
del.addDeleteMarker(cell);
}
@Override
public Collection<Pair<Mutation, byte[]>> getIndexUpdate(Delete d) throws IOException {
// stores all the return values
IndexUpdateManager updateMap = new IndexUpdateManager();
// We have to figure out which kind of delete it is, since we need to do different things if its
// a general (row) delete, versus a delete of just a single column or family
Map<byte[], List<KeyValue>> families = d.getFamilyMap();
/*
* Option 1: its a row delete marker, so we just need to delete the most recent state for each
* group, as of the specified timestamp in the delete. This can happen if we have a single row
* update and it is part of a batch mutation (prepare doesn't happen until later... maybe a
* bug?). In a single delete, this delete gets all the column families appended, so the family
* map won't be empty by the time it gets here.
*/
if (families.size() == 0) {
LocalTableState state = new LocalTableState(env, localTable, d);
// get a consistent view of name
long now = d.getTimeStamp();
if (now == HConstants.LATEST_TIMESTAMP) {
now = EnvironmentEdgeManager.currentTimeMillis();
// update the delete's idea of 'now' to be consistent with the index
d.setTimestamp(now);
}
// get deletes from the codec
// we only need to get deletes and not add puts because this delete covers all columns
addDeleteUpdatesToMap(updateMap, state, now);
/*
* Update the current state for all the kvs in the delete. Generally, we would just iterate
* the family map, but since we go here, the family map is empty! Therefore, we need to fake a
* bunch of family deletes (just like hos HRegion#prepareDelete works). This is just needed
* for current version of HBase that has an issue where the batch update doesn't update the
* deletes before calling the hook.
*/
byte[] deleteRow = d.getRow();
for (byte[] family : this.env.getRegion().getTableDesc().getFamiliesKeys()) {
state.addPendingUpdates(new KeyValue(deleteRow, family, null, now,
KeyValue.Type.DeleteFamily));
}
} else {
// Option 2: Its actually a bunch single updates, which can have different timestamps.
// Therefore, we need to do something similar to the put case and batch by timestamp
batchMutationAndAddUpdates(updateMap, d);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Found index updates for Delete: " + d + "\n" + updateMap);
}
return updateMap.toMap();
}