org.apache.hadoop.hbase.HConstants#LATEST_TIMESTAMP源码实例Demo

下面列出了org.apache.hadoop.hbase.HConstants#LATEST_TIMESTAMP 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: phoenix   文件: ValueGetterTuple.java
@Override
public KeyValue getValue(byte[] family, byte[] qualifier) {
    ImmutableBytesWritable value = null;
    try {
        value = valueGetter.getLatestValue(new ColumnReference(family, qualifier), ts);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    byte[] rowKey = valueGetter.getRowKey();
    int valueOffset = 0;
    int valueLength = 0;
    byte[] valueBytes = HConstants.EMPTY_BYTE_ARRAY;
    if (value != null) {
        valueBytes = value.get();
        valueOffset = value.getOffset();
        valueLength = value.getLength();
    }
	return new KeyValue(rowKey, 0, rowKey.length, family, 0, family.length, qualifier, 0, qualifier.length, HConstants.LATEST_TIMESTAMP, Type.Put, valueBytes, valueOffset, valueLength);
}
 
源代码2 项目: phoenix   文件: MetaDataEndpointImpl.java
private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region, long clientTimeStamp) throws IOException {
    if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
        return null;
    }
    
    Scan scan = newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.setRaw(true);
    RegionScanner scanner = region.getScanner(scan);
    List<KeyValue> results = Lists.<KeyValue>newArrayList();
    scanner.next(results);
    // HBase ignores the time range on a raw scan (HBASE-7362)
    if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
        KeyValue kv = results.get(0);
        if (kv.isDelete()) {
            Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
            PTable table = newDeletedTableMarker(kv.getTimestamp());
            metaDataCache.put(cacheKey, table);
            return table;
        }
    }
    return null;
}
 
源代码3 项目: phoenix   文件: Sequence.java
public Append createSequence(long startWith, long incrementBy, long cacheSize, long timestamp, long minValue, long maxValue, boolean cycle) {
    byte[] key = this.key.getKey();
    Append append = new Append(key);
    append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, new byte[] {(byte)MetaOp.CREATE_SEQUENCE.ordinal()});
    if (timestamp != HConstants.LATEST_TIMESTAMP) {
        append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
    }
    Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
    byte[] startWithBuf = PLong.INSTANCE.toBytes(startWith);
    familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PLong.INSTANCE.toBytes(incrementBy)),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PLong.INSTANCE.toBytes(cacheSize)),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(minValue)),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(maxValue)),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, timestamp, PBoolean.INSTANCE.toBytes(cycle)),
            PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, timestamp, PDataType.FALSE_BYTES)
            ));
    return append;
}
 
源代码4 项目: phoenix   文件: MetaDataClient.java
public MutationState dropSequence(DropSequenceStatement statement) throws SQLException {
    Long scn = connection.getSCN();
    long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
    String schemaName = statement.getSequenceName().getSchemaName();
    String sequenceName = statement.getSequenceName().getTableName();
    String tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getString();
    try {
        connection.getQueryServices().dropSequence(tenantId, schemaName, sequenceName, timestamp);
    } catch (SequenceNotFoundException e) {
        if (statement.ifExists()) {
            return new MutationState(0, connection);
        }
        throw e;
    }
    return new MutationState(1, connection);
}
 
源代码5 项目: HBase.MCC   文件: HTableMultiCluster.java
private Put setTimeStampOfUnsetValues(final Put put, long ts)
        throws IOException {
  final Put newPut = new Put(put.getRow());
  for (Entry<byte[], List<Cell>> entity : put.getFamilyCellMap().entrySet()) {
    for (Cell cell : entity.getValue()) {
      // If no timestamp was given then use now.
      // This will protect us from a multicluster sumbission
      if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP) {
        newPut
                .add(cell.getFamily(), cell.getQualifier(), ts, cell.getValue());
      } else {
        newPut.add(cell);
      }
    }
  }
  return newPut;
}
 
源代码6 项目: terrapin   文件: HFileReader.java
static KeyValue buildKeyValueForLookup(byte[] key) {
  return new KeyValue(key,
                      Constants.HFILE_COLUMN_FAMILY,
                      EMPTY_COLUMN,
                      HConstants.LATEST_TIMESTAMP,
                      KeyValue.Type.Put);
}
 
源代码7 项目: phoenix   文件: SequenceManager.java
public void validateSequences(Sequence.ValueOp action) throws SQLException {
    if (sequenceMap.isEmpty()) {
        return;
    }
    int maxSize = sequenceMap.size();
    long[] dstSequenceValues = new long[maxSize];
    sequencePosition = new int[maxSize];
    nextSequences = Lists.newArrayListWithExpectedSize(maxSize);
    currentSequences = Lists.newArrayListWithExpectedSize(maxSize);
    for (Map.Entry<SequenceKey, SequenceValueExpression> entry : sequenceMap.entrySet()) {
        if (isNextSequence.get(entry.getValue().getIndex())) {
            nextSequences.add(new SequenceAllocation(entry.getKey(), entry.getValue().getNumToAllocate()));
        } else {
            currentSequences.add(entry.getKey());
        }
    }
    long[] srcSequenceValues = new long[nextSequences.size()];
    SQLException[] sqlExceptions = new SQLException[nextSequences.size()];
    
    // Sort the next sequences to prevent deadlocks
    Collections.sort(nextSequences);

    // Create reverse indexes
    for (int i = 0; i < nextSequences.size(); i++) {
        sequencePosition[i] = sequenceMap.get(nextSequences.get(i).getSequenceKey()).getIndex();
    }
    int offset = nextSequences.size();
    for (int i = 0; i < currentSequences.size(); i++) {
        sequencePosition[i+offset] = sequenceMap.get(currentSequences.get(i)).getIndex();
    }
    ConnectionQueryServices services = this.statement.getConnection().getQueryServices();
    Long scn = statement.getConnection().getSCN();
    long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
    services.validateSequences(nextSequences, timestamp, srcSequenceValues, sqlExceptions, action);
    setSequenceValues(srcSequenceValues, dstSequenceValues, sqlExceptions);
}
 
源代码8 项目: hbase   文件: TestHFileOutputFormat2.java
/**
 * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into
 * hfile.
 */
@Test
public void test_WritingTagData()
    throws Exception {
  Configuration conf = new Configuration(this.util.getConfiguration());
  final String HFILE_FORMAT_VERSION_CONF_KEY = "hfile.format.version";
  conf.setInt(HFILE_FORMAT_VERSION_CONF_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS);
  RecordWriter<ImmutableBytesWritable, Cell> writer = null;
  TaskAttemptContext context = null;
  Path dir =
      util.getDataTestDir("WritingTagData");
  try {
    conf.set(HFileOutputFormat2.OUTPUT_TABLE_NAME_CONF_KEY, TABLE_NAMES[0].getNameAsString());
    // turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs
    conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, false);
    Job job = new Job(conf);
    FileOutputFormat.setOutputPath(job, dir);
    context = createTestTaskAttemptContext(job);
    HFileOutputFormat2 hof = new HFileOutputFormat2();
    writer = hof.getRecordWriter(context);
    final byte [] b = Bytes.toBytes("b");

    List< Tag > tags = new ArrayList<>();
    tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(978670)));
    KeyValue kv = new KeyValue(b, b, b, HConstants.LATEST_TIMESTAMP, b, tags);
    writer.write(new ImmutableBytesWritable(), kv);
    writer.close(context);
    writer = null;
    FileSystem fs = dir.getFileSystem(conf);
    RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(dir, true);
    while(iterator.hasNext()) {
      LocatedFileStatus keyFileStatus = iterator.next();
      HFile.Reader reader =
          HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf);
      HFileScanner scanner = reader.getScanner(false, false, false);
      scanner.seekTo();
      Cell cell = scanner.getCell();
      List<Tag> tagsFromCell = PrivateCellUtil.getTags(cell);
      assertTrue(tagsFromCell.size() > 0);
      for (Tag tag : tagsFromCell) {
        assertTrue(tag.getType() == TagType.TTL_TAG_TYPE);
      }
    }
  } finally {
    if (writer != null && context != null) writer.close(context);
    dir.getFileSystem(conf).delete(dir, true);
  }
}
 
源代码9 项目: phoenix   文件: ValueGetterTuple.java
public ValueGetterTuple() {
    this.valueGetter = null;
    this.ts = HConstants.LATEST_TIMESTAMP;
}
 
源代码10 项目: phoenix   文件: PSchema.java
public PSchema(String schemaName) {
    this(schemaName, HConstants.LATEST_TIMESTAMP);
}
 
源代码11 项目: phoenix   文件: MetaDataClient.java
private MutationState dropTable(String schemaName, String tableName, String parentTableName, PTableType tableType,
        boolean ifExists, boolean cascade) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        PName tenantId = connection.getTenantId();
        String tenantIdStr = tenantId == null ? null : tenantId.getString();
        byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName);
        Long scn = connection.getSCN();
        long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
        Delete tableDelete = new Delete(key, clientTimeStamp);
        tableMetaData.add(tableDelete);
        boolean hasViewIndexTable = false;
        boolean hasLocalIndexTable = false;
        if (parentTableName != null) {
            byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
            Delete linkDelete = new Delete(linkKey, clientTimeStamp);
            tableMetaData.add(linkDelete);
        } else {
            hasViewIndexTable = MetaDataUtil.hasViewIndexTable(connection, schemaName, tableName);
            hasLocalIndexTable = MetaDataUtil.hasLocalIndexTable(connection, schemaName, tableName);
        }

        MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade);
        MutationCode code = result.getMutationCode();
        switch (code) {
        case TABLE_NOT_FOUND:
            if (!ifExists) { throw new TableNotFoundException(schemaName, tableName); }
            break;
        case NEWER_TABLE_FOUND:
            throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable());
        case UNALLOWED_TABLE_MUTATION:
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)

            .setSchemaName(schemaName).setTableName(tableName).build().buildException();
        default:
            connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName,
                    result.getMutationTime());

            if (result.getTable() != null && tableType != PTableType.VIEW) {
                connection.setAutoCommit(true);
                PTable table = result.getTable();
                boolean dropMetaData = result.getTable().getViewIndexId() == null &&
                        connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                long ts = (scn == null ? result.getMutationTime() : scn);
                // Create empty table and schema - they're only used to get the name from
                // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn> columns
                List<TableRef> tableRefs = Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size());
                // All multi-tenant tables have a view index table, so no need to check in that case
                if (tableType == PTableType.TABLE
                        && (table.isMultiTenant() || hasViewIndexTable || hasLocalIndexTable)) {

                    MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName());
                    if (hasViewIndexTable) {
                        String viewIndexSchemaName = null;
                        String viewIndexTableName = null;
                        if (schemaName != null) {
                            viewIndexSchemaName = MetaDataUtil.getViewIndexTableName(schemaName);
                            viewIndexTableName = tableName;
                        } else {
                            viewIndexTableName = MetaDataUtil.getViewIndexTableName(tableName);
                        }
                        PTable viewIndexTable = new PTableImpl(null, viewIndexSchemaName, viewIndexTableName, ts,
                                table.getColumnFamilies());
                        tableRefs.add(new TableRef(null, viewIndexTable, ts, false));
                    }
                    if (hasLocalIndexTable) {
                        String localIndexSchemaName = null;
                        String localIndexTableName = null;
                        if (schemaName != null) {
                            localIndexSchemaName = MetaDataUtil.getLocalIndexTableName(schemaName);
                            localIndexTableName = tableName;
                        } else {
                            localIndexTableName = MetaDataUtil.getLocalIndexTableName(tableName);
                        }
                        PTable localIndexTable = new PTableImpl(null, localIndexSchemaName, localIndexTableName,
                                ts, Collections.<PColumnFamily> emptyList());
                        tableRefs.add(new TableRef(null, localIndexTable, ts, false));
                    }
                }
                tableRefs.add(new TableRef(null, table, ts, false));
                // TODO: Let the standard mutable secondary index maintenance handle this?
                for (PTable index : table.getIndexes()) {
                    tableRefs.add(new TableRef(null, index, ts, false));
                }
                deleteFromStatsTable(tableRefs, ts);
                if (!dropMetaData) {
                    MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null,
                            Collections.<PColumn> emptyList(), ts);
                    // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                    return connection.getQueryServices().updateData(plan);
                }
            }
            break;
        }
        return new MutationState(0, connection);
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
 
源代码12 项目: phoenix   文件: TableNotFoundException.java
public TableNotFoundException(String schemaName, String tableName) {
    this(schemaName, tableName, HConstants.LATEST_TIMESTAMP);
}
 
源代码13 项目: phoenix   文件: ScanRanges.java
private static long safelyIncrement(long value) {
    return value < HConstants.LATEST_TIMESTAMP ? (value + 1) : HConstants.LATEST_TIMESTAMP;
}
 
源代码14 项目: phoenix   文件: TephraTransactionContext.java
@Override
public long getTransactionId() {
    Transaction tx = getCurrentTransaction();
    return tx == null ? HConstants.LATEST_TIMESTAMP : tx.getTransactionId(); // First write pointer - won't change with checkpointing
}
 
源代码15 项目: phoenix   文件: TephraTransactionContext.java
@Override
public long getWritePointer() {
    Transaction tx = getCurrentTransaction();
    return tx == null ? HConstants.LATEST_TIMESTAMP : tx.getWritePointer();
}
 
源代码16 项目: phoenix   文件: FunctionNotFoundException.java
public FunctionNotFoundException(String functionName) {
    this(functionName, HConstants.LATEST_TIMESTAMP);
}
 
源代码17 项目: phoenix   文件: IndexNotFoundException.java
public IndexNotFoundException(String schemaName, String tableName) {
    this(schemaName, tableName, HConstants.LATEST_TIMESTAMP);
}
 
源代码18 项目: hbase   文件: Delete.java
/**
 * Create a Delete operation for the specified row and timestamp.<p>
 *
 * If no further operations are done, this will delete all columns in all
 * families of the specified row with a timestamp less than or equal to the
 * specified timestamp.<p>
 *
 * This timestamp is ONLY used for a delete row operation.  If specifying
 * families or columns, you must specify each timestamp individually.
 * @param row We make a local copy of this passed in row.
 * @param rowOffset
 * @param rowLength
 */
public Delete(final byte[] row, final int rowOffset, final int rowLength) {
  this(row, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
}
 
源代码19 项目: hbase   文件: CellModel.java
/**
 * Constructor
 * @param column
 * @param qualifier
 * @param value
 */
public CellModel(byte[] column, byte[] qualifier, byte[] value) {
  this(column, qualifier, HConstants.LATEST_TIMESTAMP, value);
}
 
源代码20 项目: hbase   文件: Put.java
/**
 * We make a copy of the passed in row key to keep local.
 * @param rowArray
 * @param rowOffset
 * @param rowLength
 */
public Put(byte [] rowArray, int rowOffset, int rowLength) {
  this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
}