类org.apache.hadoop.hbase.filter.RowFilter源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.filter.RowFilter的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: xxhadoop   文件: HBaseTest.java
@Test
public void testScan() throws IOException {
	Connection connection = admin.getConnection();
	Table table = connection.getTable(TableName.valueOf("tbl_girls"));
	
	Scan scan = new Scan(Bytes.toBytes("0001"), Bytes.toBytes("0004"));
	// RowKeyFilter
	Filter filter = new PrefixFilter(Bytes.toBytes("000"));
	scan.setFilter(filter);
	
	Filter filter2 = new RowFilter(CompareOp.EQUAL, new SubstringComparator("000"));
	scan.setFilter(filter2);
	
	//BinaryComparator binaryComparator = new BinaryComparator(Bytes.toBytes(29));		
	Filter filter3 = new SingleColumnValueFilter(Bytes.toBytes("base_info"), Bytes.toBytes("age"), CompareOp.GREATER, Bytes.toBytes(29));
	scan.setFilter(filter3);
	
	ResultScanner resultScanner = table.getScanner(scan);
	for (Result result : resultScanner) {
		LOGGER.info(result.toString());
		int value = Bytes.toInt(result.getValue(Bytes.toBytes("base_info"), Bytes.toBytes("age")));
		LOGGER.info(String.valueOf(value));
	}
}
 
源代码2 项目: hbase-operator-tools   文件: RegionsMerger.java
private List<RegionInfo> getOpenRegions(Connection connection, TableName table) throws Exception {
  List<RegionInfo> regions = new ArrayList<>();
  Table metaTbl = connection.getTable(META_TABLE_NAME);
  String tblName = table.getNameAsString();
  RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL,
    new SubstringComparator(tblName+","));
  SingleColumnValueFilter colFilter = new SingleColumnValueFilter(CATALOG_FAMILY,
    STATE_QUALIFIER, CompareOperator.EQUAL, Bytes.toBytes("OPEN"));
  Scan scan = new Scan();
  FilterList filter = new FilterList(FilterList.Operator.MUST_PASS_ALL);
  filter.addFilter(rowFilter);
  filter.addFilter(colFilter);
  scan.setFilter(filter);
  try(ResultScanner rs = metaTbl.getScanner(scan)){
    Result r;
    while ((r = rs.next()) != null) {
      RegionInfo region = RegionInfo.parseFrom(r.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER));
      regions.add(region);
    }
  }
  return regions;
}
 
源代码3 项目: hbase   文件: TestTableInputFormat.java
@Override
public void configure(JobConf job) {
  try {
    Connection connection = ConnectionFactory.createConnection(job);
    Table exampleTable = connection.getTable(TableName.valueOf("exampleDeprecatedTable"));
    // mandatory
    initializeTable(connection, exampleTable.getName());
    byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
      Bytes.toBytes("columnB") };
    // mandatory
    setInputColumns(inputColumns);
    Filter exampleFilter =
      new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*"));
    // optional
    setRowFilter(exampleFilter);
  } catch (IOException exception) {
    throw new RuntimeException("Failed to configure for job.", exception);
  }
}
 
源代码4 项目: hbase   文件: TestTableInputFormat.java
@Override
public void configure(JobConf job) {
  try {
    Connection connection = ConnectionFactory.createConnection(job);
    Table exampleTable = connection.getTable(TableName.valueOf(("exampleDeprecatedTable")));
    // mandatory
    initializeTable(connection, exampleTable.getName());
    byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
      Bytes.toBytes("columnB") };
    // optional
    Scan scan = new Scan();
    for (byte[] family : inputColumns) {
      scan.addFamily(family);
    }
    Filter exampleFilter =
      new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*"));
    scan.setFilter(exampleFilter);
    setScan(scan);
  } catch (IOException exception) {
    throw new RuntimeException("Failed to configure for job.", exception);
  }
}
 
源代码5 项目: hbase   文件: TestTableInputFormat.java
@Override
public void configure(JobConf job) {
  try {
    Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
    TableName tableName = TableName.valueOf("exampleJobConfigurableTable");
    // mandatory
    initializeTable(connection, tableName);
    byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
      Bytes.toBytes("columnB") };
    //optional
    Scan scan = new Scan();
    for (byte[] family : inputColumns) {
      scan.addFamily(family);
    }
    Filter exampleFilter =
      new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*"));
    scan.setFilter(exampleFilter);
    setScan(scan);
  } catch (IOException exception) {
    throw new RuntimeException("Failed to initialize.", exception);
  }
}
 
源代码6 项目: hbase   文件: TestTableInputFormat.java
@Override
protected void initialize(JobContext job) throws IOException {
  Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(
      job.getConfiguration()));
  TableName tableName = TableName.valueOf("exampleTable");
  // mandatory
  initializeTable(connection, tableName);
  byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
    Bytes.toBytes("columnB") };
  //optional
  Scan scan = new Scan();
  for (byte[] family : inputColumns) {
    scan.addFamily(family);
  }
  Filter exampleFilter =
    new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*"));
  scan.setFilter(exampleFilter);
  setScan(scan);
}
 
源代码7 项目: pxf   文件: HBaseFilterBuilder.java
/**
 * Handles simple column-operator-constant expressions.
 * Creates a special filter in the case the column is the row key column.
 *
 * @param hBaseColumn  the HBase column
 * @param operator the simple column operator
 * @param data         the optional operand
 * @return the {@link Filter} for the given simple column operator
 */
private Filter processSimpleColumnOperator(HBaseColumnDescriptor hBaseColumn,
                                           Operator operator,
                                           OperandNode data) {
    // The value of lastOperand has to be stored after visiting
    // the operand child of this node.
    ByteArrayComparable comparator = getComparator(
            hBaseColumn.columnTypeCode(),
            data);

    /*
     * If row key is of type TEXT, allow filter in start/stop row
     * key API in HBaseAccessor/Scan object.
     */
    if (data != null && isTextualRowKey(hBaseColumn)) {
        storeStartEndKeys(operator, data.toString());
    }

    if (hBaseColumn.isKeyColumn()) {
        // Special filter for row key column
        return new RowFilter(
                OPERATORS_MAP.get(operator),
                comparator);
    } else {
        return new SingleColumnValueFilter(
                hBaseColumn.columnFamilyBytes(),
                hBaseColumn.qualifierBytes(),
                OPERATORS_MAP.get(operator),
                comparator);
    }
}
 
源代码8 项目: hbase-operator-tools   文件: HBCK2.java
int setRegionState(ClusterConnection connection, String region,
      RegionState.State newState)
    throws IOException {
  if (newState == null) {
    throw new IllegalArgumentException("State can't be null.");
  }
  RegionState.State currentState = null;
  Table table = connection.getTable(TableName.valueOf("hbase:meta"));
  RowFilter filter = new RowFilter(CompareOperator.EQUAL, new SubstringComparator(region));
  Scan scan = new Scan();
  scan.setFilter(filter);
  Result result = table.getScanner(scan).next();
  if (result != null) {
    byte[] currentStateValue = result.getValue(HConstants.CATALOG_FAMILY,
      HConstants.STATE_QUALIFIER);
    if (currentStateValue == null) {
      System.out.println("WARN: Region state info on meta was NULL");
    } else {
      currentState = RegionState.State.valueOf(
          org.apache.hadoop.hbase.util.Bytes.toString(currentStateValue));
    }
    Put put = new Put(result.getRow());
    put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
      org.apache.hadoop.hbase.util.Bytes.toBytes(newState.name()));
    table.put(put);
    System.out.println("Changed region " + region + " STATE from "
      + currentState + " to " + newState);
    return EXIT_SUCCESS;
  } else {
    System.out.println("ERROR: Could not find region " + region + " in meta.");
  }
  return EXIT_FAILURE;
}
 
源代码9 项目: beam   文件: HBaseIOTest.java
/** Tests reading all rows using a filter. */
@Test
public void testReadingWithFilter() throws Exception {
  final String table = tmpTable.getName();
  final int numRows = 1001;
  createAndWriteData(table, numRows);

  String regex = ".*17.*";
  Filter filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex));
  runReadTestLength(
      HBaseIO.read().withConfiguration(conf).withTableId(table).withFilter(filter), false, 20);
}
 
源代码10 项目: beam   文件: HBaseIOTest.java
@Test
public void testReadingWithFilterSDF() throws Exception {
  final String table = tmpTable.getName();
  final int numRows = 1001;
  createAndWriteData(table, numRows);

  String regex = ".*17.*";
  Filter filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex));
  runReadTestLength(
      HBaseIO.read().withConfiguration(conf).withTableId(table).withFilter(filter), true, 20);
}
 
源代码11 项目: Eagle   文件: HBaseLogReader.java
/**
 * TODO If the required field is null for a row, then this row will not be fetched. That could be a problem for counting
 * Need another version of read to strictly get the number of rows which will return all the columns for a column family
 */
public void open() throws IOException {
	if (isOpen)
		return; // silently return
	try {
		tbl = EagleConfigFactory.load().getHTable(schema.getTable());
	} catch (RuntimeException ex) {
		throw new IOException(ex);
	}

	String rowkeyRegex = buildRegex2(searchTags);
	RegexStringComparator regexStringComparator = new RegexStringComparator(
			rowkeyRegex);
	regexStringComparator.setCharset(Charset.forName("ISO-8859-1"));
	RowFilter filter = new RowFilter(CompareOp.EQUAL, regexStringComparator);
	FilterList filterList = new FilterList();
	filterList.addFilter(filter);
	Scan s1 = new Scan();
	// reverse timestamp, startRow is stopKey, and stopRow is startKey
	s1.setStartRow(stopKey);
	s1.setStopRow(startKey);
	s1.setFilter(filterList);
	// TODO the # of cached rows should be minimum of (pagesize and 100)
	s1.setCaching(100);
	// TODO not optimized for all applications
	s1.setCacheBlocks(true);
	// scan specified columnfamily and qualifiers
	for(byte[] qualifier : qualifiers){
		s1.addColumn(schema.getColumnFamily().getBytes(), qualifier);
	}
	rs = tbl.getScanner(s1);
	isOpen = true;
}
 
源代码12 项目: eagle   文件: HBaseLogReader.java
/**
 * TODO If the required field is null for a row, then this row will not be fetched. That could be a
 * problem for counting Need another version of read to strictly get the number of rows which will return
 * all the columns for a column family
 */
@Override
public void open() throws IOException {
    if (isOpen) {
        return; // silently return
    }
    try {
        tbl = EagleConfigFactory.load().getHTable(schema.getTable());
    } catch (RuntimeException ex) {
        throw new IOException(ex);
    }

    String rowkeyRegex = buildRegex2(searchTags);
    RegexStringComparator regexStringComparator = new RegexStringComparator(rowkeyRegex);
    regexStringComparator.setCharset(Charset.forName("ISO-8859-1"));
    RowFilter filter = new RowFilter(CompareOp.EQUAL, regexStringComparator);
    FilterList filterList = new FilterList();
    filterList.addFilter(filter);
    Scan s1 = new Scan();
    // reverse timestamp, startRow is stopKey, and stopRow is startKey
    s1.setStartRow(stopKey);
    s1.setStopRow(startKey);
    s1.setFilter(filterList);
    // TODO the # of cached rows should be minimum of (pagesize and 100)
    s1.setCaching(100);
    // TODO not optimized for all applications
    s1.setCacheBlocks(true);
    // scan specified columnfamily and qualifiers
    for (byte[] qualifier : qualifiers) {
        s1.addColumn(schema.getColumnFamily().getBytes(), qualifier);
    }
    rs = tbl.getScanner(s1);
    isOpen = true;
}
 
源代码13 项目: hbase   文件: MetaTableAccessor.java
/**
 * Scans META table for a row whose key contains the specified <B>regionEncodedName</B>, returning
 * a single related <code>Result</code> instance if any row is found, null otherwise.
 * @param connection the connection to query META table.
 * @param regionEncodedName the region encoded name to look for at META.
 * @return <code>Result</code> instance with the row related info in META, null otherwise.
 * @throws IOException if any errors occur while querying META.
 */
public static Result scanByRegionEncodedName(Connection connection, String regionEncodedName)
  throws IOException {
  RowFilter rowFilter =
    new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName));
  Scan scan = getMetaScan(connection, 1);
  scan.setFilter(rowFilter);
  ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan);
  return resultScanner.next();
}
 
源代码14 项目: hbase   文件: TestFromClientSide5.java
@Test
public void testJira6912() throws Exception {
  final TableName tableName = name.getTableName();
  try (Table foo = TEST_UTIL.createTable(tableName, new byte[][] {FAMILY}, 10)) {

    List<Put> puts = new ArrayList<>();
    for (int i = 0; i != 100; i++) {
      Put put = new Put(Bytes.toBytes(i));
      put.addColumn(FAMILY, FAMILY, Bytes.toBytes(i));
      puts.add(put);
    }
    foo.put(puts);
    // If i comment this out it works
    TEST_UTIL.flush();

    Scan scan = new Scan();
    scan.withStartRow(Bytes.toBytes(1));
    scan.withStopRow(Bytes.toBytes(3));
    scan.addColumn(FAMILY, FAMILY);
    scan.setFilter(new RowFilter(CompareOperator.NOT_EQUAL,
            new BinaryComparator(Bytes.toBytes(1))));

    try (ResultScanner scanner = foo.getScanner(scan)) {
      Result[] bar = scanner.next(100);
      assertEquals(1, bar.length);
    }
  }
}
 
源代码15 项目: hbase   文件: TestSerialization.java
@Test
public void testCompareFilter() throws Exception {
  Filter f =
    new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2")));
  byte[] bytes = f.toByteArray();
  Filter ff = RowFilter.parseFrom(bytes);
  assertNotNull(ff);
}
 
源代码16 项目: hbase   文件: TestTableInputFormat.java
protected void initialize(JobConf job, String table) throws IOException {
  Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
  TableName tableName = TableName.valueOf(table);
  // mandatory
  initializeTable(connection, tableName);
  byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
    Bytes.toBytes("columnB") };
  // mandatory
  setInputColumns(inputColumns);
  Filter exampleFilter =
    new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*"));
  // optional
  setRowFilter(exampleFilter);
}
 
源代码17 项目: spork   文件: HBaseStorage.java
private void addRowFilter(CompareOp op, byte[] val) {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Adding filter " + op.toString() +
                " with value " + Bytes.toStringBinary(val));
    }
    addFilter(new RowFilter(op, new BinaryComparator(val)));
}
 
源代码18 项目: spork   文件: HBaseTableInputFormat.java
private boolean skipRegion(CompareOp op, byte[] key, byte[] option ) {

        if (key.length == 0 || option == null) 
            return false;

        BinaryComparator comp = new BinaryComparator(option);
        RowFilter rowFilter = new RowFilter(op, comp);
        return rowFilter.filterRowKey(key, 0, key.length);
    }
 
源代码19 项目: phoenix   文件: ViewTTLIT.java
private void assertViewHeaderRowsHaveViewTTLRelatedCells(String schemaName, long minTimestamp,
        boolean rawScan, int expectedRows) throws IOException, SQLException {

    FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
    RowFilter schemaNameFilter = new RowFilter(
            CompareFilter.CompareOp.EQUAL,
            new SubstringComparator(schemaName)
    );
    QualifierFilter viewTTLQualifierFilter = new QualifierFilter(CompareFilter.CompareOp.EQUAL,
            new BinaryComparator(PhoenixDatabaseMetaData.VIEW_TTL_BYTES));
    filterList.addFilter(schemaNameFilter);
    filterList.addFilter(viewTTLQualifierFilter);
    try (Table tbl = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES)
            .getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES)) {

        Scan allRows = new Scan();
        allRows.setRaw(rawScan);
        allRows.setTimeRange(minTimestamp, HConstants.LATEST_TIMESTAMP);
        allRows.setFilter(filterList);
        ResultScanner scanner = tbl.getScanner(allRows);
        int numMatchingRows = 0;
        for (Result result = scanner.next(); result != null; result = scanner.next()) {
            numMatchingRows +=
                    result.containsColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
                            PhoenixDatabaseMetaData.VIEW_TTL_BYTES) ? 1 : 0;
        }
        assertEquals(String.format("Expected rows do not match for table = %s at timestamp %d",
                Bytes.toString(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES), minTimestamp), expectedRows, numMatchingRows);
    }

}
 
源代码20 项目: phoenix   文件: MetaDataEndpointImpl.java
/**
 * @param tableName parent table's name
 * @return true if there exist a table that use this table as their base table.
 * TODO: should we pass a timestamp here?
 */
private boolean hasViews(HRegion region, byte[] tenantId, PTable table) throws IOException {
    byte[] schemaName = table.getSchemaName().getBytes();
    byte[] tableName = table.getTableName().getBytes();
    Scan scan = new Scan();
    // If the table is multi-tenant, we need to check across all tenant_ids,
    // so we can't constrain the row key. Otherwise, any views would have
    // the same tenantId.
    if (!table.isMultiTenant()) {
        byte[] startRow = ByteUtil.concat(tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY);
        byte[] stopRow = ByteUtil.nextKey(startRow);
        scan.setStartRow(startRow);
        scan.setStopRow(stopRow);
    }
    SingleColumnValueFilter filter1 = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, BASE_SCHEMA_NAME_BYTES, EQUAL, schemaName);
    filter1.setFilterIfMissing(schemaName.length > 0);
    SingleColumnValueFilter filter2 = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, BASE_TABLE_NAME_BYTES, EQUAL, tableName);
    filter2.setFilterIfMissing(true);
    BinaryComparator comparator = new BinaryComparator(ByteUtil.concat(tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY, schemaName, QueryConstants.SEPARATOR_BYTE_ARRAY, tableName));
    RowFilter filter3 = new RowFilter(CompareOp.NOT_EQUAL,comparator);
    Filter filter = new FilterList(filter1,filter2,filter3);
    scan.setFilter(filter);
    RegionScanner scanner = region.getScanner(scan);
    try {
        List<KeyValue> results = newArrayList();
        scanner.next(results);
        return results.size() > 0;
    }
    finally {
        scanner.close();
    }
}
 
void addFilterByMapping( FilterList fl, CompareFilter.CompareOp comp, Class<?> comparatorClass, Object comparator,
                         Mapping.TupleMapping tupleMapping )
  throws NoSuchMethodException, InstantiationException, IllegalAccessException,
  java.lang.reflect.InvocationTargetException {
  switch ( tupleMapping ) {
    case KEY: {
      addFilter( RowFilter.class, fl, comp, comparatorClass, comparator );
      return;
    }
    case FAMILY: {
      addFilter( FamilyFilter.class, fl, comp, comparatorClass, comparator );
      return;
    }
    case COLUMN: {
      //TODO Check if ColumnPrefixFilter works faster and suit more

      addFilter( QualifierFilter.class, fl, comp, comparatorClass, comparator );
      return;
    }
    case VALUE: {
      addFilter( ValueFilter.class, fl, comp, comparatorClass, comparator );
      return;
    }
    case TIMESTAMP: {
      addFilter( TimestampsFilter.class, fl, comp, comparatorClass, comparator );
      //        Constructor<TimestampsFilter> columnFilterConstructor =
      //          TimestampsFilter.class.getConstructor( CompareFilter.CompareOp.class, comparatorClass );
      //        TimestampsFilter scf = columnFilterConstructor.newInstance( comp, comparator );
      //        fl.addFilter( scf );
      return;
    }
  }
}
 
源代码22 项目: phoenix-omid   文件: TestUpdateScan.java
@Test(timeOut = 10_000)
public void testGet(ITestContext context) throws Exception {
    try {
        TransactionManager tm = newTransactionManager(context);
        TTable table = new TTable(connection, TEST_TABLE);
        Transaction t = tm.begin();
        int[] lInts = new int[]{100, 243, 2342, 22, 1, 5, 43, 56};
        for (int i = 0; i < lInts.length; i++) {
            byte[] data = Bytes.toBytes(lInts[i]);
            Put put = new Put(data);
            put.addColumn(Bytes.toBytes(TEST_FAMILY), Bytes.toBytes(TEST_COL), data);
            table.put(t, put);
        }
        int startKeyValue = lInts[3];
        int stopKeyValue = lInts[3];
        byte[] startKey = Bytes.toBytes(startKeyValue);
        byte[] stopKey = Bytes.toBytes(stopKeyValue);
        Get g = new Get(startKey);
        Result r = table.get(t, g);
        if (!r.isEmpty()) {
            int tmp = Bytes.toInt(r.getValue(Bytes.toBytes(TEST_FAMILY), Bytes.toBytes(TEST_COL)));
            LOG.info("Result:" + tmp);
            assertTrue(tmp == startKeyValue, "Bad value, should be " + startKeyValue + " but is " + tmp);
        } else {
            Assert.fail("Bad result");
        }
        tm.commit(t);

        Scan s = new Scan(startKey);
        CompareFilter.CompareOp op = CompareFilter.CompareOp.LESS_OR_EQUAL;
        RowFilter toFilter = new RowFilter(op, new BinaryPrefixComparator(stopKey));
        boolean startInclusive = true;
        if (!startInclusive) {
            FilterList filters = new FilterList(FilterList.Operator.MUST_PASS_ALL);
            filters.addFilter(new RowFilter(CompareFilter.CompareOp.GREATER, new BinaryPrefixComparator(startKey)));
            filters.addFilter(new WhileMatchFilter(toFilter));
            s.setFilter(filters);
        } else {
            s.setFilter(new WhileMatchFilter(toFilter));
        }
        t = tm.begin();
        ResultScanner res = table.getScanner(t, s);
        Result rr;
        int count = 0;
        while ((rr = res.next()) != null) {
            int iTmp = Bytes.toInt(rr.getValue(Bytes.toBytes(TEST_FAMILY), Bytes.toBytes(TEST_COL)));
            LOG.info("Result: " + iTmp);
            count++;
        }
        assertEquals(count, 1, "Count is wrong");
        LOG.info("Rows found " + count);
        tm.commit(t);
        table.close();
    } catch (Exception e) {
        LOG.error("Exception in test", e);
    }
}
 
源代码23 项目: java-docs-samples   文件: Filters.java
public static void filterLimitRowRegex(String projectId, String instanceId, String tableId) {
  // A filter that matches cells from rows whose keys satisfy the given regex
  Filter filter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator(".*#20190501$"));
  Scan scan = new Scan().setFilter(filter).setMaxVersions();
  readWithFilter(projectId, instanceId, tableId, scan);
}
 
源代码24 项目: hbase   文件: QuotaTableUtil.java
/**
 * converts quotafilter to serializeable filterlists.
 */
public static Filter makeFilter(final QuotaFilter filter) {
  FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
  if (StringUtils.isNotEmpty(filter.getUserFilter())) {
    FilterList userFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE);
    boolean hasFilter = false;

    if (StringUtils.isNotEmpty(filter.getNamespaceFilter())) {
      FilterList nsFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL);
      nsFilters.addFilter(new RowFilter(CompareOperator.EQUAL,
          new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0)));
      nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL,
          new RegexStringComparator(
            getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0)));
      userFilters.addFilter(nsFilters);
      hasFilter = true;
    }
    if (StringUtils.isNotEmpty(filter.getTableFilter())) {
      FilterList tableFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL);
      tableFilters.addFilter(new RowFilter(CompareOperator.EQUAL,
          new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0)));
      tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL,
          new RegexStringComparator(
            getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0)));
      userFilters.addFilter(tableFilters);
      hasFilter = true;
    }
    if (!hasFilter) {
      userFilters.addFilter(new RowFilter(CompareOperator.EQUAL,
          new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0)));
    }

    filterList.addFilter(userFilters);
  } else if (StringUtils.isNotEmpty(filter.getTableFilter())) {
    filterList.addFilter(new RowFilter(CompareOperator.EQUAL,
        new RegexStringComparator(getTableRowKeyRegex(filter.getTableFilter()), 0)));
  } else if (StringUtils.isNotEmpty(filter.getNamespaceFilter())) {
    filterList.addFilter(new RowFilter(CompareOperator.EQUAL,
        new RegexStringComparator(getNamespaceRowKeyRegex(filter.getNamespaceFilter()), 0)));
  } else if (StringUtils.isNotEmpty(filter.getRegionServerFilter())) {
    filterList.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(
        getRegionServerRowKeyRegex(filter.getRegionServerFilter()), 0)));
  }
  return filterList;
}
 
源代码25 项目: spork   文件: HBaseStorage.java
private void initScan() throws IOException{
    scan = new Scan();

    scan.setCacheBlocks(cacheBlocks_);
    scan.setCaching(caching_);

    // Set filters, if any.
    if (configuredOptions_.hasOption("gt")) {
        gt_ = Bytes.toBytesBinary(Utils.slashisize(configuredOptions_.getOptionValue("gt")));
        addRowFilter(CompareOp.GREATER, gt_);
        scan.setStartRow(gt_);
    }
    if (configuredOptions_.hasOption("lt")) {
        lt_ = Bytes.toBytesBinary(Utils.slashisize(configuredOptions_.getOptionValue("lt")));
        addRowFilter(CompareOp.LESS, lt_);
        scan.setStopRow(lt_);
    }
    if (configuredOptions_.hasOption("gte")) {
        gte_ = Bytes.toBytesBinary(Utils.slashisize(configuredOptions_.getOptionValue("gte")));
        scan.setStartRow(gte_);
    }
    if (configuredOptions_.hasOption("lte")) {
        lte_ = Bytes.toBytesBinary(Utils.slashisize(configuredOptions_.getOptionValue("lte")));
        byte[] lt = increment(lte_);
        if (LOG.isDebugEnabled()) {
            LOG.debug(String.format("Incrementing lte value of %s from bytes %s to %s to set stop row",
                      Bytes.toString(lte_), toString(lte_), toString(lt)));
        }

        if (lt != null) {
            scan.setStopRow(increment(lte_));
        }

        // The WhileMatchFilter will short-circuit the scan after we no longer match. The
        // setStopRow call will limit the number of regions we need to scan
        addFilter(new WhileMatchFilter(new RowFilter(CompareOp.LESS_OR_EQUAL, new BinaryComparator(lte_))));
    }
    if (configuredOptions_.hasOption("regex")) {
        regex_ = Utils.slashisize(configuredOptions_.getOptionValue("regex"));
        addFilter(new RowFilter(CompareOp.EQUAL, new RegexStringComparator(regex_)));
    }
    if (configuredOptions_.hasOption("minTimestamp") || configuredOptions_.hasOption("maxTimestamp")){
        scan.setTimeRange(minTimestamp_, maxTimestamp_);
    }
    if (configuredOptions_.hasOption("timestamp")){
        scan.setTimeStamp(timestamp_);
    }

    // if the group of columnInfos for this family doesn't contain a prefix, we don't need
    // to set any filters, we can just call addColumn or addFamily. See javadocs below.
    boolean columnPrefixExists = false;
    for (ColumnInfo columnInfo : columnInfo_) {
        if (columnInfo.getColumnPrefix() != null) {
            columnPrefixExists = true;
            break;
        }
    }

    if (!columnPrefixExists) {
        addFiltersWithoutColumnPrefix(columnInfo_);
    }
    else {
        addFiltersWithColumnPrefix(columnInfo_);
    }
}
 
 类所在包
 同包方法