下面列出了org.apache.hadoop.hbase.client.Delete#deleteColumns ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Test
public void testDeleteFiltering() throws Exception {
String tableName = "TestDeleteFiltering";
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableName, familyBytes, 0);
try {
region.initialize();
TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));
byte[] row = Bytes.toBytes(1);
for (int i = 4; i < V.length; i++) {
Put p = new Put(row);
p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
region.put(p);
}
// delete from the third entry back
// take that cell's timestamp + 1 to simulate a delete in a new tx
long deleteTs = V[5] + 1;
Delete d = new Delete(row, deleteTs);
LOG.info("Issuing delete at timestamp " + deleteTs);
// row deletes are not yet supported (TransactionAwareHTable normally handles this)
d.deleteColumns(familyBytes, columnBytes);
region.delete(d);
List<Cell> results = Lists.newArrayList();
// force a flush to clear the data
// during flush, we should drop the deleted version, but not the others
LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString());
region.flushcache(true, false);
// now a normal scan should return row with versions at: V[8], V[6].
// V[7] is invalid and V[5] and prior are deleted.
Scan scan = new Scan();
scan.setMaxVersions(10);
RegionScanner regionScanner = region.getScanner(scan);
// should be only one row
assertFalse(regionScanner.next(results));
assertKeyValueMatches(results, 1,
new long[]{V[8], V[6], deleteTs},
new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
} finally {
region.close();
}
}
@Test
public void testDeleteFiltering() throws Exception {
String tableName = "TestDeleteFiltering";
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableName, familyBytes, 0);
try {
region.initialize();
TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));
byte[] row = Bytes.toBytes(1);
for (int i = 4; i < V.length; i++) {
Put p = new Put(row);
p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
region.put(p);
}
// delete from the third entry back
// take that cell's timestamp + 1 to simulate a delete in a new tx
long deleteTs = V[5] + 1;
Delete d = new Delete(row, deleteTs);
LOG.info("Issuing delete at timestamp " + deleteTs);
// row deletes are not yet supported (TransactionAwareHTable normally handles this)
d.deleteColumns(familyBytes, columnBytes, deleteTs);
region.delete(d);
List<Cell> results = Lists.newArrayList();
// force a flush to clear the data
// during flush, we should drop the deleted version, but not the others
LOG.info("Flushing region " + region.getRegionNameAsString());
region.flushcache();
// now a normal scan should return row with versions at: V[8], V[6].
// V[7] is invalid and V[5] and prior are deleted.
Scan scan = new Scan();
scan.setMaxVersions(10);
RegionScanner regionScanner = region.getScanner(scan);
// should be only one row
assertFalse(regionScanner.next(results));
assertKeyValueMatches(results, 1,
new long[]{V[8], V[6], deleteTs},
new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
} finally {
region.close();
}
}
@Test
public void testDeleteFiltering() throws Exception {
String tableName = "TestDeleteFiltering";
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableName, familyBytes, 0);
try {
region.initialize();
TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));
byte[] row = Bytes.toBytes(1);
for (int i = 4; i < V.length; i++) {
Put p = new Put(row);
p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
region.put(p);
}
// delete from the third entry back
// take that cell's timestamp + 1 to simulate a delete in a new tx
long deleteTs = V[5] + 1;
Delete d = new Delete(row, deleteTs);
LOG.info("Issuing delete at timestamp " + deleteTs);
// row deletes are not yet supported (TransactionAwareHTable normally handles this)
d.deleteColumns(familyBytes, columnBytes);
region.delete(d);
List<Cell> results = Lists.newArrayList();
// force a flush to clear the data
// during flush, we should drop the deleted version, but not the others
LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString());
region.flushcache(true, false);
// now a normal scan should return row with versions at: V[8], V[6].
// V[7] is invalid and V[5] and prior are deleted.
Scan scan = new Scan();
scan.setMaxVersions(10);
RegionScanner regionScanner = region.getScanner(scan);
// should be only one row
assertFalse(regionScanner.next(results));
assertKeyValueMatches(results, 1,
new long[]{V[8], V[6], deleteTs},
new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
} finally {
region.close();
}
}
@Test
public void testDeleteFiltering() throws Exception {
String tableName = "TestDeleteFiltering";
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableName, familyBytes, 0);
try {
region.initialize();
TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));
byte[] row = Bytes.toBytes(1);
for (int i = 4; i < V.length; i++) {
Put p = new Put(row);
p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
region.put(p);
}
// delete from the third entry back
// take that cell's timestamp + 1 to simulate a delete in a new tx
long deleteTs = V[5] + 1;
Delete d = new Delete(row, deleteTs);
LOG.info("Issuing delete at timestamp " + deleteTs);
// row deletes are not yet supported (TransactionAwareHTable normally handles this)
d.deleteColumns(familyBytes, columnBytes);
region.delete(d);
List<Cell> results = Lists.newArrayList();
// force a flush to clear the data
// during flush, we should drop the deleted version, but not the others
LOG.info("Flushing region " + region.getRegionNameAsString());
region.flushcache();
// now a normal scan should return row with versions at: V[8], V[6].
// V[7] is invalid and V[5] and prior are deleted.
Scan scan = new Scan();
scan.setMaxVersions(10);
RegionScanner regionScanner = region.getScanner(scan);
// should be only one row
assertFalse(regionScanner.next(results));
assertKeyValueMatches(results, 1,
new long[]{V[8], V[6], deleteTs},
new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
} finally {
region.close();
}
}
@Test
public void testDeleteFiltering() throws Exception {
String tableName = "TestDeleteFiltering";
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableName, familyBytes, 0);
try {
region.initialize();
TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));
byte[] row = Bytes.toBytes(1);
for (int i = 4; i < V.length; i++) {
Put p = new Put(row);
p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
region.put(p);
}
// delete from the third entry back
// take that cell's timestamp + 1 to simulate a delete in a new tx
long deleteTs = V[5] + 1;
Delete d = new Delete(row, deleteTs);
LOG.info("Issuing delete at timestamp " + deleteTs);
// row deletes are not yet supported (TransactionAwareHTable normally handles this)
d.deleteColumns(familyBytes, columnBytes);
region.delete(d);
List<Cell> results = Lists.newArrayList();
// force a flush to clear the data
// during flush, we should drop the deleted version, but not the others
LOG.info("Flushing region " + region.getRegionNameAsString());
region.flushcache();
// now a normal scan should return row with versions at: V[8], V[6].
// V[7] is invalid and V[5] and prior are deleted.
Scan scan = new Scan();
scan.setMaxVersions(10);
RegionScanner regionScanner = region.getScanner(scan);
// should be only one row
assertFalse(regionScanner.next(results));
assertKeyValueMatches(results, 1,
new long[]{V[8], V[6], deleteTs},
new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
} finally {
region.close();
}
}
@Test
public void testDeleteFiltering() throws Exception {
String tableName = "TestDeleteFiltering";
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableName, familyBytes, 0);
try {
region.initialize();
TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));
byte[] row = Bytes.toBytes(1);
for (int i = 4; i < V.length; i++) {
Put p = new Put(row);
p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
region.put(p);
}
// delete from the third entry back
// take that cell's timestamp + 1 to simulate a delete in a new tx
long deleteTs = V[5] + 1;
Delete d = new Delete(row, deleteTs);
LOG.info("Issuing delete at timestamp " + deleteTs);
// row deletes are not yet supported (TransactionAwareHTable normally handles this)
d.deleteColumns(familyBytes, columnBytes);
region.delete(d);
List<Cell> results = Lists.newArrayList();
// force a flush to clear the data
// during flush, we should drop the deleted version, but not the others
LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString());
region.flushcache(true, false);
// now a normal scan should return row with versions at: V[8], V[6].
// V[7] is invalid and V[5] and prior are deleted.
Scan scan = new Scan();
scan.setMaxVersions(10);
RegionScanner regionScanner = region.getScanner(scan);
// should be only one row
assertFalse(regionScanner.next(results));
assertKeyValueMatches(results, 1,
new long[]{V[8], V[6], deleteTs},
new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
} finally {
region.close();
}
}
@Test
public void testDeleteFiltering() throws Exception {
String tableName = "TestDeleteFiltering";
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableName, familyBytes, 0);
try {
region.initialize();
TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));
byte[] row = Bytes.toBytes(1);
for (int i = 4; i < V.length; i++) {
Put p = new Put(row);
p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
region.put(p);
}
// delete from the third entry back
// take that cell's timestamp + 1 to simulate a delete in a new tx
long deleteTs = V[5] + 1;
Delete d = new Delete(row, deleteTs);
LOG.info("Issuing delete at timestamp " + deleteTs);
// row deletes are not yet supported (TransactionAwareHTable normally handles this)
d.deleteColumns(familyBytes, columnBytes);
region.delete(d);
List<Cell> results = Lists.newArrayList();
// force a flush to clear the data
// during flush, we should drop the deleted version, but not the others
LOG.info("Flushing region " + region.getRegionNameAsString());
region.flushcache();
// now a normal scan should return row with versions at: V[8], V[6].
// V[7] is invalid and V[5] and prior are deleted.
Scan scan = new Scan();
scan.setMaxVersions(10);
RegionScanner regionScanner = region.getScanner(scan);
// should be only one row
assertFalse(regionScanner.next(results));
assertKeyValueMatches(results, 1,
new long[]{V[8], V[6], deleteTs},
new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
} finally {
region.close();
}
}
/**
* Covering deletes (via {@link Delete#deleteColumns}) cover everything back in time from the
* given time. If its modifying the latest state, we don't need to do anything but add deletes. If
* its modifying back in time state, we need to just fix up the surrounding elements as anything
* else ahead of it will be fixed up by later updates.
* <p>
* similar to {@link #testMultipleTimestampsInSingleDelete()}, but with covering deletes.
* @throws Exception on failure
*/
@Test
public void testDeleteColumnsInThePast() throws Exception {
HTable primary = createSetupTables(fam1);
// do a put to the primary table
Put p = new Put(row1);
long ts1 = 10, ts2 = 11, ts3 = 12;
p.add(FAM, indexed_qualifer, ts1, value1);
p.add(FAM2, regular_qualifer, ts2, value3);
primary.put(p);
primary.flushCommits();
// now build up a delete with a couple different timestamps
Delete d = new Delete(row1);
// these deletes don't need to match the exact ts because they cover everything earlier
d.deleteColumns(FAM, indexed_qualifer, ts2);
d.deleteColumns(FAM2, regular_qualifer, ts3);
primary.delete(d);
// read the index for the expected values
HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
// build the expected kvs
List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
// check the first entry at ts1
List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);
// delete at ts2 changes what the put would insert
pairs.clear();
pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
// final delete clears out everything
expected = Collections.emptyList();
IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value1);
// cleanup
closeAndCleanupTables(primary, index1);
}
/**
* Covering deletes (via {@link Delete#deleteColumns}) cover everything back in time from the
* given time. If its modifying the latest state, we don't need to do anything but add deletes. If
* its modifying back in time state, we need to just fix up the surrounding elements as anything
* else ahead of it will be fixed up by later updates.
* <p>
* similar to {@link #testMultipleTimestampsInSingleDelete()}, but with covering deletes.
* @throws Exception on failure
*/
@Test
public void testDeleteColumnsInThePast() throws Exception {
HTable primary = createSetupTables(fam1);
// do a put to the primary table
Put p = new Put(row1);
long ts1 = 10, ts2 = 11, ts3 = 12;
p.add(FAM, indexed_qualifer, ts1, value1);
p.add(FAM2, regular_qualifer, ts2, value3);
primary.put(p);
primary.flushCommits();
// now build up a delete with a couple different timestamps
Delete d = new Delete(row1);
// these deletes don't need to match the exact ts because they cover everything earlier
d.deleteColumns(FAM, indexed_qualifer, ts2);
d.deleteColumns(FAM2, regular_qualifer, ts3);
primary.delete(d);
// read the index for the expected values
HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
// build the expected kvs
List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
// check the first entry at ts1
List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);
// delete at ts2 changes what the put would insert
pairs.clear();
pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
// final delete clears out everything
expected = Collections.emptyList();
IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value1);
// cleanup
closeAndCleanupTables(primary, index1);
}