类org.apache.hadoop.hbase.client.Append源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.client.Append的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: phoenix   文件: ConnectionQueryServicesImpl.java
@Override
public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) throws SQLException {
    SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets);
    Sequence newSequences = new Sequence(sequenceKey);
    Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
    if (sequence == null) {
        sequence = newSequences;
    }
    try {
        sequence.getLock().lock();
        // Now that we have the lock we need, create the sequence
        Append append = sequence.dropSequence(timestamp);
        HTableInterface htable = this.getTable(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES);
        try {
            Result result = htable.append(append);
            return sequence.dropSequence(result);
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } finally {
            Closeables.closeQuietly(htable);
        }
    } finally {
        sequence.getLock().unlock();
    }
}
 
源代码2 项目: phoenix   文件: Sequence.java
public Append createSequence(long startWith, long incrementBy, long cacheSize, long timestamp, long minValue, long maxValue, boolean cycle) {
    byte[] key = this.key.getKey();
    Append append = new Append(key);
    append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, new byte[] {(byte)MetaOp.CREATE_SEQUENCE.ordinal()});
    if (timestamp != HConstants.LATEST_TIMESTAMP) {
        append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
    }
    Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
    byte[] startWithBuf = PLong.INSTANCE.toBytes(startWith);
    familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PLong.INSTANCE.toBytes(incrementBy)),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PLong.INSTANCE.toBytes(cacheSize)),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(minValue)),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(maxValue)),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, timestamp, PBoolean.INSTANCE.toBytes(cycle)),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, timestamp, PDataType.FALSE_BYTES)
            ));
    return append;
}
 
源代码3 项目: hbase   文件: ThriftUtilities.java
public static Append appendFromThrift(TAppend append) throws IOException {
  Append out = new Append(append.getRow());
  for (TColumnValue column : append.getColumns()) {
    out.addColumn(column.getFamily(), column.getQualifier(), column.getValue());
  }

  if (append.isSetAttributes()) {
    addAttributes(out, append.getAttributes());
  }

  if (append.isSetDurability()) {
    out.setDurability(durabilityFromThrift(append.getDurability()));
  }

  if(append.getCellVisibility() != null) {
    out.setCellVisibility(new CellVisibility(append.getCellVisibility().getExpression()));
  }

  if (append.isSetReturnResults()) {
    out.setReturnResults(append.isReturnResults());
  }

  return out;
}
 
源代码4 项目: hbase   文件: ThriftUtilities.java
/**
 * From a {@link TAppend} create an {@link Append}.
 * @param tappend the Thrift version of an append.
 * @return an increment that the {@link TAppend} represented.
 */
public static Append appendFromThrift(TAppend tappend) {
  Append append = new Append(tappend.getRow());
  List<ByteBuffer> columns = tappend.getColumns();
  List<ByteBuffer> values = tappend.getValues();

  if (columns.size() != values.size()) {
    throw new IllegalArgumentException(
        "Sizes of columns and values in tappend object are not matching");
  }

  int length = columns.size();

  for (int i = 0; i < length; i++) {
    byte[][] famAndQf = CellUtil.parseColumn(getBytes(columns.get(i)));
    append.addColumn(famAndQf[0], famAndQf[1], getBytes(values.get(i)));
  }
  return append;
}
 
源代码5 项目: hbase   文件: AccessController.java
@Override
public Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Append append) throws IOException {
  if (append.getAttribute(CHECK_COVERING_PERM) != null) {
    // We had failure with table, cf and q perm checks and now giving a chance for cell
    // perm check
    TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
    AuthResult authResult = null;
    User user = getActiveUser(c);
    if (checkCoveringPermission(user, OpType.APPEND, c.getEnvironment(), append.getRow(),
        append.getFamilyCellMap(), append.getTimeRange().getMax(), Action.WRITE)) {
      authResult = AuthResult.allow(OpType.APPEND.toString(),
          "Covering cell set", user, Action.WRITE, table, append.getFamilyCellMap());
    } else {
      authResult = AuthResult.deny(OpType.APPEND.toString(),
          "Covering cell set", user, Action.WRITE, table, append.getFamilyCellMap());
    }
    AccessChecker.logResult(authResult);
    if (authorizationEnabled && !authResult.isAllowed()) {
      throw new AccessDeniedException("Insufficient permissions " +
        authResult.toContextString());
    }
  }
  return null;
}
 
源代码6 项目: hbase   文件: RSRpcServices.java
private static Get toGet(final Mutation mutation) throws IOException {
  if(!(mutation instanceof Increment) && !(mutation instanceof Append)) {
    throw new AssertionError("mutation must be a instance of Increment or Append");
  }
  Get get = new Get(mutation.getRow());
  CellScanner cellScanner = mutation.cellScanner();
  while (!cellScanner.advance()) {
    Cell cell = cellScanner.current();
    get.addColumn(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell));
  }
  if (mutation instanceof Increment) {
    // Increment
    Increment increment = (Increment) mutation;
    get.setTimeRange(increment.getTimeRange().getMin(), increment.getTimeRange().getMax());
  } else {
    // Append
    Append append = (Append) mutation;
    get.setTimeRange(append.getTimeRange().getMin(), append.getTimeRange().getMax());
  }
  for (Entry<String, byte[]> entry : mutation.getAttributesMap().entrySet()) {
    get.setAttribute(entry.getKey(), entry.getValue());
  }
  return get;
}
 
源代码7 项目: hbase   文件: RegionCoprocessorHost.java
/**
 * Supports Coprocessor 'bypass'.
 * @param append append object
 * @return result to return to client if default operation should be bypassed, null otherwise
 * @throws IOException if an error occurred on the coprocessor
 */
public Result preAppend(final Append append) throws IOException {
  boolean bypassable = true;
  Result defaultResult = null;
  if (this.coprocEnvironments.isEmpty()) {
    return defaultResult;
  }
  return execOperationWithResult(
    new ObserverOperationWithResult<RegionObserver, Result>(regionObserverGetter, defaultResult,
          bypassable) {
        @Override
        public Result call(RegionObserver observer) throws IOException {
          return observer.preAppend(this, append);
        }
      });
}
 
源代码8 项目: hbase   文件: RegionCoprocessorHost.java
/**
 * Supports Coprocessor 'bypass'.
 * @param append append object
 * @return result to return to client if default operation should be bypassed, null otherwise
 * @throws IOException if an error occurred on the coprocessor
 */
public Result preAppendAfterRowLock(final Append append) throws IOException {
  boolean bypassable = true;
  Result defaultResult = null;
  if (this.coprocEnvironments.isEmpty()) {
    return defaultResult;
  }
  return execOperationWithResult(
      new ObserverOperationWithResult<RegionObserver, Result>(regionObserverGetter,
          defaultResult, bypassable) {
        @Override
        public Result call(RegionObserver observer) throws IOException {
          return observer.preAppendAfterRowLock(this, append);
        }
      });
}
 
源代码9 项目: hbase   文件: TestAccessController.java
@Test
public void testAppend() throws Exception {

  AccessTestAction appendAction = new AccessTestAction() {
    @Override
    public Object run() throws Exception {
      byte[] row = TEST_ROW;
      byte[] qualifier = TEST_QUALIFIER;
      Put put = new Put(row);
      put.addColumn(TEST_FAMILY, qualifier, Bytes.toBytes(1));
      Append append = new Append(row);
      append.addColumn(TEST_FAMILY, qualifier, Bytes.toBytes(2));
      try(Connection conn = ConnectionFactory.createConnection(conf);
          Table t = conn.getTable(TEST_TABLE)) {
        t.put(put);
        t.append(append);
      }
      return null;
    }
  };

  verifyAllowed(appendAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW,
    USER_GROUP_WRITE);
  verifyDenied(appendAction, USER_RO, USER_NONE, USER_GROUP_CREATE, USER_GROUP_READ,
    USER_GROUP_ADMIN);
}
 
源代码10 项目: hbase   文件: TestRegionObserverInterface.java
@Test
public void testAppendHook() throws IOException {
  final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName());
  Table table = util.createTable(tableName, new byte[][] { A, B, C });
  try {
    Append app = new Append(Bytes.toBytes(0));
    app.addColumn(A, A, A);

    verifyMethodResult(SimpleRegionObserver.class,
      new String[] { "hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock" }, tableName,
      new Boolean[] { false, false, false });

    table.append(app);

    verifyMethodResult(SimpleRegionObserver.class,
      new String[] { "hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock" }, tableName,
      new Boolean[] { true, true, true });
  } finally {
    util.deleteTable(tableName);
    table.close();
  }
}
 
@Test
public void testChangeCellWithDifferntColumnFamily() throws Exception {
  TableName tableName = TableName.valueOf(name.getMethodName());
  createTableWithCoprocessor(tableName,
    ChangeCellWithDifferntColumnFamilyObserver.class.getName());

  try (Table table = connection.getTable(tableName)) {
    Increment increment = new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1);
    table.increment(increment);
    Get get = new Get(ROW).addColumn(CF2_BYTES, CQ1);
    Result result = table.get(get);
    assertEquals(1, result.size());
    assertEquals(1, Bytes.toLong(result.getValue(CF2_BYTES, CQ1)));

    Append append = new Append(ROW).addColumn(CF1_BYTES, CQ2, VALUE);
    table.append(append);
    get = new Get(ROW).addColumn(CF2_BYTES, CQ2);
    result = table.get(get);
    assertEquals(1, result.size());
    assertTrue(Bytes.equals(VALUE, result.getValue(CF2_BYTES, CQ2)));
  }
}
 
源代码12 项目: hbase   文件: TestAppendTimeRange.java
@Override
public Result preAppend(final ObserverContext<RegionCoprocessorEnvironment> e,
    final Append append) throws IOException {
  NavigableMap<byte [], List<Cell>> map = append.getFamilyCellMap();
  for (Map.Entry<byte [], List<Cell>> entry : map.entrySet()) {
    for (Cell cell : entry.getValue()) {
      String appendStr = Bytes.toString(cell.getValueArray(), cell.getValueOffset(),
          cell.getValueLength());
      if (appendStr.equals("b")) {
        tr10 = append.getTimeRange();
      } else if (appendStr.equals("c") && !append.getTimeRange().isAllTime()) {
        tr2 = append.getTimeRange();
      }
    }
  }
  return null;
}
 
源代码13 项目: hbase   文件: MultiThreadedUpdaterWithACL.java
@Override
public Object run() throws Exception {
  try {
    if (table == null) {
      table = connection.getTable(tableName);
    }
    if (m instanceof Increment) {
      table.increment((Increment) m);
    } else if (m instanceof Append) {
      table.append((Append) m);
    } else if (m instanceof Put) {
      table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenPut((Put) m);
    } else if (m instanceof Delete) {
      table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenDelete((Delete) m);
    } else {
      throw new IllegalArgumentException("unsupported mutation "
          + m.getClass().getSimpleName());
    }
    totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
  } catch (IOException e) {
    recordFailure(m, keyBase, start, e);
  }
  return null;
}
 
源代码14 项目: phoenix   文件: Sequence.java
public Append createSequence(long startWith, long incrementBy, long cacheSize, long timestamp, long minValue, long maxValue, boolean cycle) {
    byte[] key = this.key.getKey();
    Append append = new Append(key);
    append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, new byte[] {(byte)MetaOp.CREATE_SEQUENCE.ordinal()});
    if (timestamp != HConstants.LATEST_TIMESTAMP) {
        append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
    }
    Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
    byte[] startWithBuf = PLong.INSTANCE.toBytes(startWith);
    familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PLong.INSTANCE.toBytes(incrementBy)),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PLong.INSTANCE.toBytes(cacheSize)),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(minValue)),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(maxValue)),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, timestamp, PBoolean.INSTANCE.toBytes(cycle)),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, timestamp, PDataType.FALSE_BYTES)
            ));
    return append;
}
 
源代码15 项目: phoenix   文件: ConnectionQueryServicesImpl.java
@Override
public long createSequence(String tenantId, String schemaName, String sequenceName, long startWith, long incrementBy, int cacheSize, long timestamp) 
        throws SQLException {
    SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName);
    Sequence newSequences = new Sequence(sequenceKey);
    Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
    if (sequence == null) {
        sequence = newSequences;
    }
    try {
        sequence.getLock().lock();
        // Now that we have the lock we need, create the sequence
        Append append = sequence.createSequence(startWith, incrementBy, cacheSize, timestamp);
        HTableInterface htable = this.getTable(PhoenixDatabaseMetaData.SEQUENCE_TABLE_NAME_BYTES);
        try {
            Result result = htable.append(append);
            return sequence.createSequence(result);
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        }
    } finally {
        sequence.getLock().unlock();
    }
}
 
源代码16 项目: phoenix   文件: ConnectionQueryServicesImpl.java
@Override
public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) throws SQLException {
    SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName);
    Sequence newSequences = new Sequence(sequenceKey);
    Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
    if (sequence == null) {
        sequence = newSequences;
    }
    try {
        sequence.getLock().lock();
        // Now that we have the lock we need, create the sequence
        Append append = sequence.dropSequence(timestamp);
        HTableInterface htable = this.getTable(PhoenixDatabaseMetaData.SEQUENCE_TABLE_NAME_BYTES);
        try {
            Result result = htable.append(append);
            return sequence.dropSequence(result);
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        }
    } finally {
        sequence.getLock().unlock();
    }
}
 
源代码17 项目: phoenix   文件: Sequence.java
public Append createSequence(long startWith, long incrementBy, int cacheSize, long timestamp) {
    byte[] key = SchemaUtil.getSequenceKey(this.key.getTenantId(), this.key.getSchemaName(), this.key.getSequenceName());
    Append append = new Append(key);
    append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, new byte[] {(byte)SequenceRegionObserver.Op.CREATE_SEQUENCE.ordinal()});
    if (timestamp != HConstants.LATEST_TIMESTAMP) {
        append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
    }
    Map<byte[], List<KeyValue>> familyMap = append.getFamilyMap();
    byte[] startWithBuf = PDataType.LONG.toBytes(startWith);
    familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<KeyValue>asList(
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PDataType.LONG.toBytes(incrementBy)),
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PDataType.INTEGER.toBytes(cacheSize))
            ));
    return append;
}
 
源代码18 项目: DDMQ   文件: HBaseAction.java
@Override
public Status act(UpstreamJob job, byte[] bytes) {
    HBaseConnection connection = connectionMap.get(job.getTopic());
    if (connection == null) {
        LogUtils.logErrorInfo("HBASE_error", "no hbase connection for topic=" + job.getTopic());
        return FAIL;
    }

    if (CollectionUtils.isNotEmpty(job.getHbaseCommands())) {
        try {
            for (HbaseCommand hbaseCommand : job.getHbaseCommands()) {
                HTableInterface table = connection.getTable(hbaseCommand.getTableName());
                Mutation mutation = hbaseCommand.getMutation();

                if (mutation instanceof Put) {
                    table.put((Put) mutation);
                } else if (mutation instanceof Delete) {
                    table.delete((Delete) mutation);
                } else if (mutation instanceof Append) {
                    table.append((Append) mutation);
                } else if (mutation instanceof Increment) {
                    table.increment((Increment) mutation);
                }
            }
            MetricUtils.qpsAndFilterMetric(job, MetricUtils.ConsumeResult.SUCCESS);
            return FINISH;
        } catch (IOException e) {
            LogUtils.logErrorInfo("HBASE_error", "job=" + job, e);
            return FAIL;
        }
    } else {
        LogUtils.logErrorInfo("HBASE_error", "no hbase command found, group:{}, topic:{}", group, job.getTopic());
        return FAIL;
    }
}
 
源代码19 项目: DDMQ   文件: HBaseAction.java
@Override
public Status act(UpstreamJob job, byte[] bytes) {
    HBaseConnection connection = connectionMap.get(job.getTopic());
    if (connection == null) {
        LogUtils.logErrorInfo("HBASE_error", "no hbase connection for topic=" + job.getTopic());
        return FAIL;
    }

    if (CollectionUtils.isNotEmpty(job.getHbaseCommands())) {
        try {
            for (HbaseCommand hbaseCommand : job.getHbaseCommands()) {
                HTableInterface table = connection.getTable(hbaseCommand.getTableName());
                Mutation mutation = hbaseCommand.getMutation();

                if (mutation instanceof Put) {
                    table.put((Put) mutation);
                } else if (mutation instanceof Delete) {
                    table.delete((Delete) mutation);
                } else if (mutation instanceof Append) {
                    table.append((Append) mutation);
                } else if (mutation instanceof Increment) {
                    table.increment((Increment) mutation);
                }
            }
            MetricUtils.qpsAndFilterMetric(job, MetricUtils.ConsumeResult.SUCCESS);
            return FINISH;
        } catch (IOException e) {
            LogUtils.logErrorInfo("HBASE_error", "job=" + job, e);
            return FAIL;
        }
    } else {
        LogUtils.logErrorInfo("HBASE_error", "no hbase command found, group:{}, topic:{}", group, job.getTopic());
        return FAIL;
    }
}
 
@Override
public void processTuple(T tuple, HTable table)
{
  try {
    Append append = operationAppend(tuple);
    table.append(append);
  } catch (IOException e) {
    logger.error("Could not output tuple", e);
    DTThrowable.rethrow(e);
  }
}
 
@Override
public void processTuple(T tuple, HTable table)
{
  Append append = operationAppend(tuple);
  try {
    table.append(append);
  } catch (IOException e) {
    logger.error("Could not append tuple", e);
    DTThrowable.rethrow(e);
  }

}
 
@Override
public Append operationAppend(HBaseTuple t)
{
  Append append = new Append(t.getRow().getBytes());
  append.add(t.getColFamily().getBytes(), t.getColName().getBytes(), t.getColValue().getBytes());
  return append;
}
 
源代码23 项目: phoenix   文件: ConnectionQueryServicesImpl.java
@Override
public long createSequence(String tenantId, String schemaName, String sequenceName,
        long startWith, long incrementBy, long cacheSize, long minValue, long maxValue,
        boolean cycle, long timestamp) throws SQLException {
    SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets);
    Sequence newSequences = new Sequence(sequenceKey);
    Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
    if (sequence == null) {
        sequence = newSequences;
    }
    try {
        sequence.getLock().lock();
        // Now that we have the lock we need, create the sequence
        Append append = sequence.createSequence(startWith, incrementBy, cacheSize, timestamp, minValue, maxValue, cycle);
        HTableInterface htable =
                this.getTable(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES);
        htable.setAutoFlush(true);
        try {
            Result result = htable.append(append);
            return sequence.createSequence(result, minValue, maxValue, cycle);
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } finally {
            Closeables.closeQuietly(htable);
        }
    } finally {
        sequence.getLock().unlock();
    }
}
 
源代码24 项目: phoenix   文件: Sequence.java
public List<Append> newReturns() {
    if (values == null) {
        return Collections.emptyList();
    }
    List<Append> appends = Lists.newArrayListWithExpectedSize(values.size());
    for (SequenceValue value : values) {
        if (value.isInitialized() && value.currentValue != value.nextValue) {
            appends.add(newReturn(value));
        }
    }
    return appends;
}
 
源代码25 项目: phoenix   文件: Sequence.java
public Append newReturn(long timestamp) throws EmptySequenceCacheException {
    SequenceValue value = findSequenceValue(timestamp);
    if (value == null) {
        throw EMPTY_SEQUENCE_CACHE_EXCEPTION;
    }
    if (value.currentValue == value.nextValue) {
        throw EMPTY_SEQUENCE_CACHE_EXCEPTION;
    }
    return newReturn(value);
}
 
源代码26 项目: phoenix   文件: Sequence.java
private Append newReturn(SequenceValue value) {
    byte[] key = this.key.getKey();
    Append append = new Append(key);
    byte[] opBuf = new byte[] {(byte)MetaOp.RETURN_SEQUENCE.ordinal()};
    append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, opBuf);
    append.setAttribute(SequenceRegionObserver.CURRENT_VALUE_ATTRIB, PLong.INSTANCE.toBytes(value.nextValue));
    Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
    familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
    		(Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, value.timestamp, PLong.INSTANCE.toBytes(value.currentValue)),
    		// set LIMIT_REACHED flag to false since we are returning unused sequence values
    		(Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, value.timestamp, PDataType.FALSE_BYTES)
            ));
    return append;
}
 
源代码27 项目: phoenix   文件: Sequence.java
public Append dropSequence(long timestamp) {
    byte[] key =  this.key.getKey();
    Append append = new Append(key);
    append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, new byte[] {(byte)MetaOp.DROP_SEQUENCE.ordinal()});
    if (timestamp != HConstants.LATEST_TIMESTAMP) {
        append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
    }
    Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
    familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
            (Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY)));
    return append;
}
 
源代码28 项目: hbase   文件: ThriftUtilities.java
public static TAppend appendFromHBase(Append in) throws IOException {
  TAppend out = new TAppend();
  out.setRow(in.getRow());

  if (in.getDurability() != Durability.USE_DEFAULT) {
    out.setDurability(durabilityFromHBase(in.getDurability()));
  }
  for (Map.Entry<byte [], List<Cell>> entry : in.getFamilyCellMap().entrySet()) {
    byte[] family = entry.getKey();
    for (Cell cell : entry.getValue()) {
      TColumnValue columnValue = new TColumnValue();
      columnValue.setFamily(family)
          .setQualifier(CellUtil.cloneQualifier(cell))
          .setType(cell.getType().getCode())
          .setTimestamp(cell.getTimestamp())
          .setValue(CellUtil.cloneValue(cell));
      if (cell.getTagsLength() != 0) {
        columnValue.setTags(PrivateCellUtil.cloneTags(cell));
      }
      out.addToColumns(columnValue);
    }
  }
  for (Map.Entry<String, byte[]> attribute : in.getAttributesMap().entrySet()) {
    out.putToAttributes(ByteBuffer.wrap(Bytes.toBytes(attribute.getKey())),
        ByteBuffer.wrap(attribute.getValue()));
  }
  try {
    CellVisibility cellVisibility = in.getCellVisibility();
    if (cellVisibility != null) {
      TCellVisibility tCellVisibility = new TCellVisibility();
      tCellVisibility.setExpression(cellVisibility.getExpression());
      out.setCellVisibility(tCellVisibility);
    }
  } catch (DeserializationException e) {
    throw new RuntimeException(e);
  }
  out.setReturnResults(in.isReturnResults());
  return out;
}
 
源代码29 项目: hbase   文件: VisibilityController.java
@Override
public Result preAppend(ObserverContext<RegionCoprocessorEnvironment> e, Append append)
    throws IOException {
  // If authorization is not enabled, we don't care about reserved tags
  if (!authorizationEnabled) {
    return null;
  }
  for (CellScanner cellScanner = append.cellScanner(); cellScanner.advance();) {
    if (!checkForReservedVisibilityTagPresence(cellScanner.current())) {
      throw new FailedSanityCheckException("Append contains cell with reserved type tag");
    }
  }
  return null;
}
 
源代码30 项目: hbase   文件: AccessController.java
@Override
public Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append)
    throws IOException {
  User user = getActiveUser(c);
  checkForReservedTagPresence(user, append);

  // Require WRITE permission to the table, CF, and the KV to be appended
  RegionCoprocessorEnvironment env = c.getEnvironment();
  Map<byte[],? extends Collection<Cell>> families = append.getFamilyCellMap();
  AuthResult authResult = permissionGranted(OpType.APPEND, user,
      env, families, Action.WRITE);
  AccessChecker.logResult(authResult);
  if (!authResult.isAllowed()) {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
      append.setAttribute(CHECK_COVERING_PERM, TRUE);
    } else if (authorizationEnabled)  {
      throw new AccessDeniedException("Insufficient permissions " +
        authResult.toContextString());
    }
  }

  byte[] bytes = append.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
  if (bytes != null) {
    if (cellFeaturesEnabled) {
      addCellPermissions(bytes, append.getFamilyCellMap());
    } else {
      throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
    }
  }

  return null;
}
 
 同包方法