org.apache.hadoop.hbase.client.Delete#deleteColumn ( )源码实例Demo

下面列出了org.apache.hadoop.hbase.client.Delete#deleteColumn ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop-arch-book   文件: BasicFraudHBaseService.java
public void logInProfileInHBase(long userId, String ipAddress) throws IOException, Exception {
  HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
  
  ArrayList<Row> actions = new ArrayList<Row>();
  
  byte[] profileRowKey = generateProfileRowKey(userId);

  Delete delete = new Delete(profileRowKey);
  delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL);
  delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL);
  actions.add(delete);
  
  Increment increment = new Increment(profileRowKey);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
  actions.add(increment);
  
  Put put = new Put(profileRowKey);
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
  actions.add(put);
  
  profileTable.batch(actions);
}
 
源代码2 项目: phoenix   文件: ReadWriteKeyValuesWithCodecIT.java
/**
 * @return a bunch of {@link WALEdit}s that test a range of serialization possibilities.
 */
private List<WALEdit> getEdits() {
  // Build up a couple of edits
  List<WALEdit> edits = new ArrayList<WALEdit>();
  Put p = new Put(ROW);
  p.add(FAMILY, null, Bytes.toBytes("v1"));

  WALEdit withPut = new WALEdit();
  addMutation(withPut, p, FAMILY);
  edits.add(withPut);

  Delete d = new Delete(ROW);
  d.deleteColumn(FAMILY, null);
  WALEdit withDelete = new WALEdit();
  addMutation(withDelete, d, FAMILY);
  edits.add(withDelete);
  
  WALEdit withPutsAndDeletes = new WALEdit();
  addMutation(withPutsAndDeletes, d, FAMILY);
  addMutation(withPutsAndDeletes, p, FAMILY);
  edits.add(withPutsAndDeletes);
  
  WALEdit justIndexUpdates = new WALEdit();
  byte[] table = Bytes.toBytes("targetTable");
  IndexedKeyValue ikv = new IndexedKeyValue(table, p);
  justIndexUpdates.add(ikv);
  edits.add(justIndexUpdates);

  WALEdit mixed = new WALEdit();
  addMutation(mixed, d, FAMILY);
  mixed.add(ikv);
  addMutation(mixed, p, FAMILY);
  edits.add(mixed);

  return edits;
}
 
源代码3 项目: phoenix   文件: TestReadWriteKeyValuesWithCodec.java
/**
 * @return a bunch of {@link WALEdit}s that test a range of serialization possibilities.
 */
private List<WALEdit> getEdits() {
  // Build up a couple of edits
  List<WALEdit> edits = new ArrayList<WALEdit>();
  Put p = new Put(ROW);
  p.add(FAMILY, null, Bytes.toBytes("v1"));

  WALEdit withPut = new WALEdit();
  addMutation(withPut, p, FAMILY);
  edits.add(withPut);

  Delete d = new Delete(ROW);
  d.deleteColumn(FAMILY, null);
  WALEdit withDelete = new WALEdit();
  addMutation(withDelete, d, FAMILY);
  edits.add(withDelete);
  
  WALEdit withPutsAndDeletes = new WALEdit();
  addMutation(withPutsAndDeletes, d, FAMILY);
  addMutation(withPutsAndDeletes, p, FAMILY);
  edits.add(withPutsAndDeletes);
  
  WALEdit justIndexUpdates = new WALEdit();
  byte[] table = Bytes.toBytes("targetTable");
  IndexedKeyValue ikv = new IndexedKeyValue(table, p);
  justIndexUpdates.add(ikv);
  edits.add(justIndexUpdates);

  WALEdit mixed = new WALEdit();
  addMutation(mixed, d, FAMILY);
  mixed.add(ikv);
  addMutation(mixed, p, FAMILY);
  edits.add(mixed);

  return edits;
}
 
源代码4 项目: phoenix-tephra   文件: TransactionAwareHTable.java
@Override
protected boolean doRollback() throws Exception {
  try {
    // pre-size arraylist of deletes
    int size = 0;
    for (Set<ActionChange> cs : changeSets.values()) {
      size += cs.size();
    }
    List<Delete> rollbackDeletes = new ArrayList<>(size);
    for (Map.Entry<Long, Set<ActionChange>> entry : changeSets.entrySet()) {
      long transactionTimestamp = entry.getKey();
      for (ActionChange change : entry.getValue()) {
        byte[] row = change.getRow();
        byte[] family = change.getFamily();
        byte[] qualifier = change.getQualifier();
        Delete rollbackDelete = new Delete(row);
        makeRollbackOperation(rollbackDelete);
        switch (conflictLevel) {
          case ROW:
          case NONE:
            // issue family delete for the tx write pointer
            rollbackDelete.deleteFamilyVersion(change.getFamily(), transactionTimestamp);
            break;
          case COLUMN:
            if (family != null && qualifier == null) {
              rollbackDelete.deleteFamilyVersion(family, transactionTimestamp);
            } else if (family != null && qualifier != null) {
              rollbackDelete.deleteColumn(family, qualifier, transactionTimestamp);
            }
            break;
          default:
            throw new IllegalStateException("Unknown conflict detection level: " + conflictLevel);
        }
        rollbackDeletes.add(rollbackDelete);
      }
    }
    hTable.delete(rollbackDeletes);
    return true;
  } finally {
    tx = null;
    changeSets.clear();
  }
}
 
源代码5 项目: phoenix-tephra   文件: TransactionAwareHTable.java
@Override
protected boolean doRollback() throws Exception {
  try {
    // pre-size arraylist of deletes
    int size = 0;
    for (Set<ActionChange> cs : changeSets.values()) {
      size += cs.size();
    }
    List<Delete> rollbackDeletes = new ArrayList<>(size);
    for (Map.Entry<Long, Set<ActionChange>> entry : changeSets.entrySet()) {
      long transactionTimestamp = entry.getKey();
      for (ActionChange change : entry.getValue()) {
        byte[] row = change.getRow();
        byte[] family = change.getFamily();
        byte[] qualifier = change.getQualifier();
        Delete rollbackDelete = new Delete(row);
        makeRollbackOperation(rollbackDelete);
        switch (conflictLevel) {
          case ROW:
          case NONE:
            // issue family delete for the tx write pointer
            rollbackDelete.deleteFamilyVersion(change.getFamily(), transactionTimestamp);
            break;
          case COLUMN:
            if (family != null && qualifier == null) {
              rollbackDelete.deleteFamilyVersion(family, transactionTimestamp);
            } else if (family != null && qualifier != null) {
              rollbackDelete.deleteColumn(family, qualifier, transactionTimestamp);
            }
            break;
          default:
            throw new IllegalStateException("Unknown conflict detection level: " + conflictLevel);
        }
        rollbackDeletes.add(rollbackDelete);
      }
    }
    hTable.delete(rollbackDeletes);
    return true;
  } finally {
    try {
      hTable.flushCommits();
    } catch (Exception e) {
      LOG.error("Could not flush HTable commits", e);
    }
    tx = null;
    changeSets.clear();
  }
}
 
源代码6 项目: phoenix-tephra   文件: TransactionAwareHTable.java
@Override
protected boolean doRollback() throws Exception {
  try {
    // pre-size arraylist of deletes
    int size = 0;
    for (Set<ActionChange> cs : changeSets.values()) {
      size += cs.size();
    }
    List<Delete> rollbackDeletes = new ArrayList<>(size);
    for (Map.Entry<Long, Set<ActionChange>> entry : changeSets.entrySet()) {
      long transactionTimestamp = entry.getKey();
      for (ActionChange change : entry.getValue()) {
        byte[] row = change.getRow();
        byte[] family = change.getFamily();
        byte[] qualifier = change.getQualifier();
        Delete rollbackDelete = new Delete(row);
        makeRollbackOperation(rollbackDelete);
        switch (conflictLevel) {
          case ROW:
          case NONE:
            // issue family delete for the tx write pointer
            rollbackDelete.deleteFamilyVersion(change.getFamily(), transactionTimestamp);
            break;
          case COLUMN:
            if (family != null && qualifier == null) {
              rollbackDelete.deleteFamilyVersion(family, transactionTimestamp);
            } else if (family != null && qualifier != null) {
              rollbackDelete.deleteColumn(family, qualifier, transactionTimestamp);
            }
            break;
          default:
            throw new IllegalStateException("Unknown conflict detection level: " + conflictLevel);
        }
        rollbackDeletes.add(rollbackDelete);
      }
    }
    hTable.delete(rollbackDeletes);
    return true;
  } finally {
    tx = null;
    changeSets.clear();
  }
}
 
源代码7 项目: phoenix-tephra   文件: TransactionAwareHTable.java
@Override
protected boolean doRollback() throws Exception {
  try {
    // pre-size arraylist of deletes
    int size = 0;
    for (Set<ActionChange> cs : changeSets.values()) {
      size += cs.size();
    }
    List<Delete> rollbackDeletes = new ArrayList<>(size);
    for (Map.Entry<Long, Set<ActionChange>> entry : changeSets.entrySet()) {
      long transactionTimestamp = entry.getKey();
      for (ActionChange change : entry.getValue()) {
        byte[] row = change.getRow();
        byte[] family = change.getFamily();
        byte[] qualifier = change.getQualifier();
        Delete rollbackDelete = new Delete(row);
        makeRollbackOperation(rollbackDelete);
        switch (conflictLevel) {
          case ROW:
          case NONE:
            // issue family delete for the tx write pointer
            rollbackDelete.deleteFamilyVersion(change.getFamily(), transactionTimestamp);
            break;
          case COLUMN:
            if (family != null && qualifier == null) {
              rollbackDelete.deleteFamilyVersion(family, transactionTimestamp);
            } else if (family != null && qualifier != null) {
              rollbackDelete.deleteColumn(family, qualifier, transactionTimestamp);
            }
            break;
          default:
            throw new IllegalStateException("Unknown conflict detection level: " + conflictLevel);
        }
        rollbackDeletes.add(rollbackDelete);
      }
    }
    hTable.delete(rollbackDeletes);
    return true;
  } finally {
    try {
      hTable.flushCommits();
    } catch (Exception e) {
      LOG.error("Could not flush HTable commits", e);
    }
    tx = null;
    changeSets.clear();
  }
}
 
源代码8 项目: phoenix-tephra   文件: TransactionAwareHTable.java
@Override
protected boolean doRollback() throws Exception {
  try {
    // pre-size arraylist of deletes
    int size = 0;
    for (Set<ActionChange> cs : changeSets.values()) {
      size += cs.size();
    }
    List<Delete> rollbackDeletes = new ArrayList<>(size);
    for (Map.Entry<Long, Set<ActionChange>> entry : changeSets.entrySet()) {
      long transactionTimestamp = entry.getKey();
      for (ActionChange change : entry.getValue()) {
        byte[] row = change.getRow();
        byte[] family = change.getFamily();
        byte[] qualifier = change.getQualifier();
        Delete rollbackDelete = new Delete(row);
        makeRollbackOperation(rollbackDelete);
        switch (conflictLevel) {
          case ROW:
          case NONE:
            // issue family delete for the tx write pointer
            rollbackDelete.deleteFamilyVersion(change.getFamily(), transactionTimestamp);
            break;
          case COLUMN:
            if (family != null && qualifier == null) {
              rollbackDelete.deleteFamilyVersion(family, transactionTimestamp);
            } else if (family != null && qualifier != null) {
              rollbackDelete.deleteColumn(family, qualifier, transactionTimestamp);
            }
            break;
          default:
            throw new IllegalStateException("Unknown conflict detection level: " + conflictLevel);
        }
        rollbackDeletes.add(rollbackDelete);
      }
    }
    hTable.delete(rollbackDeletes);
    return true;
  } finally {
    try {
      hTable.flushCommits();
    } catch (Exception e) {
      LOG.error("Could not flush HTable commits", e);
    }
    tx = null;
    changeSets.clear();
  }
}
 
源代码9 项目: phoenix-tephra   文件: TransactionAwareHTable.java
@Override
protected boolean doRollback() throws Exception {
  try {
    // pre-size arraylist of deletes
    int size = 0;
    for (Set<ActionChange> cs : changeSets.values()) {
      size += cs.size();
    }
    List<Delete> rollbackDeletes = new ArrayList<>(size);
    for (Map.Entry<Long, Set<ActionChange>> entry : changeSets.entrySet()) {
      long transactionTimestamp = entry.getKey();
      for (ActionChange change : entry.getValue()) {
        byte[] row = change.getRow();
        byte[] family = change.getFamily();
        byte[] qualifier = change.getQualifier();
        Delete rollbackDelete = new Delete(row);
        makeRollbackOperation(rollbackDelete);
        switch (conflictLevel) {
          case ROW:
          case NONE:
            // issue family delete for the tx write pointer
            rollbackDelete.deleteFamilyVersion(change.getFamily(), transactionTimestamp);
            break;
          case COLUMN:
            if (family != null && qualifier == null) {
              rollbackDelete.deleteFamilyVersion(family, transactionTimestamp);
            } else if (family != null && qualifier != null) {
              rollbackDelete.deleteColumn(family, qualifier, transactionTimestamp);
            }
            break;
          default:
            throw new IllegalStateException("Unknown conflict detection level: " + conflictLevel);
        }
        rollbackDeletes.add(rollbackDelete);
      }
    }
    hTable.delete(rollbackDeletes);
    return true;
  } finally {
    tx = null;
    changeSets.clear();
  }
}
 
源代码10 项目: phoenix-tephra   文件: TransactionAwareHTable.java
@Override
protected boolean doRollback() throws Exception {
  try {
    // pre-size arraylist of deletes
    int size = 0;
    for (Set<ActionChange> cs : changeSets.values()) {
      size += cs.size();
    }
    List<Delete> rollbackDeletes = new ArrayList<>(size);
    for (Map.Entry<Long, Set<ActionChange>> entry : changeSets.entrySet()) {
      long transactionTimestamp = entry.getKey();
      for (ActionChange change : entry.getValue()) {
        byte[] row = change.getRow();
        byte[] family = change.getFamily();
        byte[] qualifier = change.getQualifier();
        Delete rollbackDelete = new Delete(row);
        makeRollbackOperation(rollbackDelete);
        switch (conflictLevel) {
          case ROW:
          case NONE:
            // issue family delete for the tx write pointer
            rollbackDelete.deleteFamilyVersion(change.getFamily(), transactionTimestamp);
            break;
          case COLUMN:
            if (family != null && qualifier == null) {
              rollbackDelete.deleteFamilyVersion(family, transactionTimestamp);
            } else if (family != null && qualifier != null) {
              rollbackDelete.deleteColumn(family, qualifier, transactionTimestamp);
            }
            break;
          default:
            throw new IllegalStateException("Unknown conflict detection level: " + conflictLevel);
        }
        rollbackDeletes.add(rollbackDelete);
      }
    }
    hTable.delete(rollbackDeletes);
    return true;
  } finally {
    try {
      hTable.flushCommits();
    } catch (Exception e) {
      LOG.error("Could not flush HTable commits", e);
    }
    tx = null;
    changeSets.clear();
  }
}
 
源代码11 项目: phoenix   文件: EndToEndCoveredIndexingIT.java
/**
 * Similar to the {@link #testMultipleTimestampsInSinglePut()}, this check the same with deletes
 * @throws Exception on failure
 */
@Test
public void testMultipleTimestampsInSingleDelete() throws Exception {
  HTable primary = createSetupTables(fam1);

  // do a put to the primary table
  Put p = new Put(row1);
  long ts1 = 10, ts2 = 11, ts3 = 12;
  p.add(FAM, indexed_qualifer, ts1, value1);
  // our group indexes all columns in the this family, so any qualifier here is ok
  p.add(FAM2, regular_qualifer, ts2, value3);
  primary.put(p);
  primary.flushCommits();

  // check to make sure everything we expect is there
  HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());

  // ts1, we just have v1
  List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
  pairs.clear();
  pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
  pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
  List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);

  // at ts2, don't have the above anymore
  pairs.clear();
  expected = Collections.emptyList();
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts2 + 1, value1, value1);
  // but we do have the new entry at ts2
  pairs.clear();
  pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
  pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
  expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);

  // now build up a delete with a couple different timestamps
  Delete d = new Delete(row1);
  // these deletes have to match the exact ts since we are doing an exact match (deleteColumn).
  d.deleteColumn(FAM, indexed_qualifer, ts1);
  // since this doesn't match exactly, we actually shouldn't see a change in table state
  d.deleteColumn(FAM2, regular_qualifer, ts3);
  primary.delete(d);

  // at ts1, we should have the put covered exactly by the delete and into the entire future
  expected = Collections.emptyList();
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, Long.MAX_VALUE, value1,
    value1);

  // at ts2, we should just see value3
  pairs.clear();
  pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
  pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
  expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);

  // the later delete is a point delete, so we shouldn't see any change at ts3
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts3, value1,
    HConstants.EMPTY_END_ROW);

  // cleanup
  closeAndCleanupTables(primary, index1);
}
 
源代码12 项目: phoenix   文件: TestEndToEndCoveredIndexing.java
/**
 * Similar to the {@link #testMultipleTimestampsInSinglePut()}, this check the same with deletes
 * @throws Exception on failure
 */
@Test
public void testMultipleTimestampsInSingleDelete() throws Exception {
  HTable primary = createSetupTables(fam1);

  // do a put to the primary table
  Put p = new Put(row1);
  long ts1 = 10, ts2 = 11, ts3 = 12;
  p.add(FAM, indexed_qualifer, ts1, value1);
  // our group indexes all columns in the this family, so any qualifier here is ok
  p.add(FAM2, regular_qualifer, ts2, value3);
  primary.put(p);
  primary.flushCommits();

  // check to make sure everything we expect is there
  HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());

  // ts1, we just have v1
  List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
  pairs.clear();
  pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
  pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
  List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);

  // at ts2, don't have the above anymore
  pairs.clear();
  expected = Collections.emptyList();
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts2 + 1, value1, value1);
  // but we do have the new entry at ts2
  pairs.clear();
  pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
  pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
  expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);

  // now build up a delete with a couple different timestamps
  Delete d = new Delete(row1);
  // these deletes have to match the exact ts since we are doing an exact match (deleteColumn).
  d.deleteColumn(FAM, indexed_qualifer, ts1);
  // since this doesn't match exactly, we actually shouldn't see a change in table state
  d.deleteColumn(FAM2, regular_qualifer, ts3);
  primary.delete(d);

  // at ts1, we should have the put covered exactly by the delete and into the entire future
  expected = Collections.emptyList();
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, Long.MAX_VALUE, value1,
    value1);

  // at ts2, we should just see value3
  pairs.clear();
  pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
  pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
  expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);

  // the later delete is a point delete, so we shouldn't see any change at ts3
  IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts3, value1,
    HConstants.EMPTY_END_ROW);

  // cleanup
  closeAndCleanupTables(primary, index1);
}
 
protected void doReconstructionLog(final Path oldCoreLogFile, final long minSeqId, final long maxSeqId,
        final Progressable reporter) throws UnsupportedEncodingException, IOException {

    Path trxPath = new Path(oldCoreLogFile.getParent(), THLog.HREGION_OLD_THLOGFILE_NAME);

    // We can ignore doing anything with the Trx Log table, it is
    // not-transactional.
    if (super.getTableDesc().getNameAsString().equals(HBaseBackedTransactionLogger.TABLE_NAME)) {
        return;
    }

    THLogRecoveryManager recoveryManager = new THLogRecoveryManager(this);
    Map<Long, WALEdit> commitedTransactionsById = recoveryManager.getCommitsFromLog(trxPath, minSeqId, reporter);

    if (commitedTransactionsById != null && commitedTransactionsById.size() > 0) {
        LOG.debug("found " + commitedTransactionsById.size() + " COMMITED transactions to recover.");

        for (Entry<Long, WALEdit> entry : commitedTransactionsById.entrySet()) {
            LOG.debug("Writing " + entry.getValue().size() + " updates for transaction " + entry.getKey());
            WALEdit b = entry.getValue();

            for (KeyValue kv : b.getKeyValues()) {
                // FIXME need to convert these into puts and deletes. Not sure this is
                // the write way.
                // Could probably combine multiple KV's into single put/delete.
                // Also timestamps?
                if (kv.getType() == KeyValue.Type.Put.getCode()) {
                    Put put = new Put();
                    put.add(kv);
                    super.put(put);
                } else if (kv.isDelete()) {
                    Delete del = new Delete(kv.getRow());
                    if (kv.isDeleteFamily()) {
                        del.deleteFamily(kv.getFamily());
                    } else if (kv.isDeleteType()) {
                        del.deleteColumn(kv.getFamily(), kv.getQualifier());
                    }
                }

            }

        }

        LOG.debug("Flushing cache"); // We must trigger a cache flush,
        // otherwise we will would ignore the log on subsequent failure
        if (!super.flushcache()) {
            LOG.warn("Did not flush cache");
        }
    }
}
 
源代码14 项目: bigdata-tutorial   文件: HBaseUtils.java
/**
 * delete a row family:qualifier data by rowkey
 *
 * @param table
 * @param rowKey
 * @throws Exception
 */
public static void deleteQualifier(HTableInterface table, String rowKey, String family, String qualifier) throws Exception {
	Delete delete = new Delete(Bytes.toBytes(rowKey));
	delete.deleteColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
	table.delete(delete);
	LOGGER.info(">>>> HBase Delete {} data with key = {}, columnFamily = {}, qualifier = {}.", new String(table.getTableName()), rowKey, family, qualifier);
}
 
源代码15 项目: bigdata-tutorial   文件: HBaseDMLHandler.java
/**
 * delete a row identified by rowkey
 *
 * @param tableName
 * @param rowKey
 * @throws Exception
 */
public void deleteQualifier(String tableName, String rowKey, String family, String qualifier) throws Exception {
	HTableInterface htable = getTable(tableName);
	Delete delete = new Delete(Bytes.toBytes(rowKey));
	delete.deleteColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
	htable.delete(delete);
}