类org.apache.lucene.index.IndexOptions源码实例Demo

下面列出了怎么用org.apache.lucene.index.IndexOptions的API类实例代码及写法,或者点击链接到github查看源代码。

@Override
public StandardnumberMapper build(BuilderContext context) {
    if (fieldType.indexOptions() != IndexOptions.NONE && !fieldType.tokenized()) {
        defaultFieldType.setOmitNorms(true);
        defaultFieldType.setIndexOptions(IndexOptions.DOCS);
        if (!omitNormsSet && Float.compare(fieldType.boost(), 1.0f) == 0) {
            fieldType.setOmitNorms(true);
        }
        if (!indexOptionsSet) {
            fieldType.setIndexOptions(IndexOptions.DOCS);
        }
    }
    setupFieldType(context);
    return new StandardnumberMapper(settingsBuilder.build(),
            name,
            fieldType,
            defaultFieldType,
            context.indexSettings(),
            multiFieldsBuilder.build(this, context),
            copyTo,
            service);
}
 
@Override
public void prepare(PercolateContext context, ParsedDocument parsedDocument) {
    MemoryIndex memoryIndex = cache.get();
    for (IndexableField field : parsedDocument.rootDoc().getFields()) {
        if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) {
            continue;
        }
        try {
            Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer();
            // TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous,
            // like the indexer does
            try (TokenStream tokenStream = field.tokenStream(analyzer, null)) {
                if (tokenStream != null) {
                    memoryIndex.addField(field.name(), tokenStream, field.boost());
                }
             }
        } catch (Exception e) {
            throw new ElasticsearchException("Failed to create token stream for [" + field.name() + "]", e);
        }
    }
    context.initialize(new DocEngineSearcher(memoryIndex), parsedDocument);
}
 
源代码3 项目: Elasticsearch   文件: GeoPointFieldMapper.java
@Override
protected void parse(ParseContext context, GeoPoint point, String geoHash) throws IOException {
    if (ignoreMalformed.value() == false) {
        if (point.lat() > 90.0 || point.lat() < -90.0) {
            throw new IllegalArgumentException("illegal latitude value [" + point.lat() + "] for " + name());
        }
        if (point.lon() > 180.0 || point.lon() < -180) {
            throw new IllegalArgumentException("illegal longitude value [" + point.lon() + "] for " + name());
        }
    } else {
        // LUCENE WATCH: This will be folded back into Lucene's GeoPointField
        GeoUtils.normalizePoint(point);
    }
    if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
        context.doc().add(new GeoPointField(fieldType().names().indexName(), point.lon(), point.lat(), fieldType() ));
    }
    super.parse(context, point, geoHash);
}
 
源代码4 项目: lucene-solr   文件: LegacyFieldType.java
/** Prints a Field for human consumption. */
@Override
public String toString() {
  StringBuilder result = new StringBuilder();
  result.append(super.toString());
  if (indexOptions() != IndexOptions.NONE) {
    if (result.length() > 0) {
      result.append(",");
    }
    if (numericType != null) {
      result.append(",numericType=");
      result.append(numericType);
      result.append(",numericPrecisionStep=");
      result.append(numericPrecisionStep);
    }
  }
  return result.toString();
}
 
源代码5 项目: Elasticsearch   文件: TypeFieldMapper.java
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
    if (indexCreatedBefore2x == false) {
        return builder;
    }
    boolean includeDefaults = params.paramAsBoolean("include_defaults", false);

    // if all are defaults, no sense to write it at all
    boolean indexed = fieldType().indexOptions() != IndexOptions.NONE;
    boolean defaultIndexed = Defaults.FIELD_TYPE.indexOptions() != IndexOptions.NONE;
    if (!includeDefaults && fieldType().stored() == Defaults.FIELD_TYPE.stored() && indexed == defaultIndexed) {
        return builder;
    }
    builder.startObject(CONTENT_TYPE);
    if (includeDefaults || fieldType().stored() != Defaults.FIELD_TYPE.stored()) {
        builder.field("store", fieldType().stored());
    }
    if (includeDefaults || indexed != defaultIndexed) {
        builder.field("index", indexTokenizeOptionToString(indexed, fieldType().tokenized()));
    }
    builder.endObject();
    return builder;
}
 
源代码6 项目: lucene-solr   文件: PreAnalyzedField.java
/**
 * Utility method to create a {@link org.apache.lucene.document.FieldType}
 * based on the {@link SchemaField}
 */
public static org.apache.lucene.document.FieldType createFieldType(SchemaField field) {
  if (!field.indexed() && !field.stored()) {
    log.trace("Ignoring unindexed/unstored field: {}", field);
    return null;
  }
  org.apache.lucene.document.FieldType newType = new org.apache.lucene.document.FieldType();
  newType.setTokenized(field.isTokenized());
  newType.setStored(field.stored());
  newType.setOmitNorms(field.omitNorms());
  IndexOptions options = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
  if (field.omitTermFreqAndPositions()) {
    options = IndexOptions.DOCS;
  } else if (field.omitPositions()) {
    options = IndexOptions.DOCS_AND_FREQS;
  } else if (field.storeOffsetsWithPositions()) {
    options = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
  }
  newType.setIndexOptions(options);
  newType.setStoreTermVectors(field.storeTermVector());
  newType.setStoreTermVectorOffsets(field.storeTermOffsets());
  newType.setStoreTermVectorPositions(field.storeTermPositions());
  newType.setStoreTermVectorPayloads(field.storeTermPayloads());
  return newType;
}
 
源代码7 项目: lucene-solr   文件: TestBooleanSimilarity.java
public void testSameNormsAsBM25() {
  BooleanSimilarity sim1 = new BooleanSimilarity();
  BM25Similarity sim2 = new BM25Similarity();
  sim2.setDiscountOverlaps(true);
  for (int iter = 0; iter < 100; ++iter) {
    final int length = TestUtil.nextInt(random(), 1, 100);
    final int position = random().nextInt(length);
    final int numOverlaps = random().nextInt(length);
    final int maxTermFrequency = 1;
    final int uniqueTermCount = 1;
    FieldInvertState state = new FieldInvertState(Version.LATEST.major, "foo", IndexOptions.DOCS_AND_FREQS, position, length, numOverlaps, 100, maxTermFrequency, uniqueTermCount);
    assertEquals(
        sim2.computeNorm(state),
        sim1.computeNorm(state),
        0f);
  }
}
 
源代码8 项目: Elasticsearch   文件: StringFieldMapper.java
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
    ValueAndBoost valueAndBoost = parseCreateFieldForString(context, fieldType().nullValueAsString(), fieldType().boost());
    if (valueAndBoost.value() == null) {
        return;
    }
    if (ignoreAbove > 0 && valueAndBoost.value().length() > ignoreAbove) {
        return;
    }
    if (context.includeInAll(includeInAll, this)) {
        context.allEntries().addText(fieldType().names().fullName(), valueAndBoost.value(), valueAndBoost.boost());
    }

    if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
        Field field = new Field(fieldType().names().indexName(), valueAndBoost.value(), fieldType());
        field.setBoost(valueAndBoost.boost());
        fields.add(field);
    }
    if (fieldType().hasDocValues()) {
        fields.add(new SortedSetDocValuesField(fieldType().names().indexName(), new BytesRef(valueAndBoost.value())));
    }
}
 
源代码9 项目: lucene-solr   文件: Lucene60FieldInfosFormat.java
private static IndexOptions getIndexOptions(IndexInput input, byte b) throws IOException {
  switch (b) {
  case 0:
    return IndexOptions.NONE;
  case 1:
    return IndexOptions.DOCS;
  case 2:
    return IndexOptions.DOCS_AND_FREQS;
  case 3:
    return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
  case 4:
    return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
  default:
    // BUG
    throw new CorruptIndexException("invalid IndexOptions byte: " + b, input);
  }
}
 
源代码10 项目: lucene-solr   文件: TestUnifiedHighlighter.java
private IndexReader indexSomeFields() throws IOException {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);
  FieldType ft = new FieldType();
  ft.setIndexOptions(IndexOptions.NONE);
  ft.setTokenized(false);
  ft.setStored(true);
  ft.freeze();

  Field title = new Field("title", "", fieldType);
  Field text = new Field("text", "", fieldType);
  Field category = new Field("category", "", fieldType);

  Document doc = new Document();
  doc.add(title);
  doc.add(text);
  doc.add(category);
  title.setStringValue("This is the title field.");
  text.setStringValue("This is the text field. You can put some text if you want.");
  category.setStringValue("This is the category field.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();
  return ir;
}
 
源代码11 项目: lucene-solr   文件: ReadTokensTask.java
@Override
public int doLogic() throws Exception {
  List<IndexableField> fields = doc.getFields();
  Analyzer analyzer = getRunData().getAnalyzer();
  int tokenCount = 0;
  for(final IndexableField field : fields) {
    if (field.fieldType().indexOptions() == IndexOptions.NONE ||
        field.fieldType().tokenized() == false) {
      continue;
    }
    
    final TokenStream stream = field.tokenStream(analyzer, null);
    // reset the TokenStream to the first token
    stream.reset();

    TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
    while(stream.incrementToken()) {
      termAtt.getBytesRef();
      tokenCount++;
    }
    stream.end();
    stream.close();
  }
  totalTokenCount += tokenCount;
  return tokenCount;
}
 
源代码12 项目: lucene-solr   文件: Field.java
/**
 * Create field with Reader value.
 * @param name field name
 * @param reader reader value
 * @param type field type
 * @throws IllegalArgumentException if either the name or type
 *         is null, or if the field's type is stored(), or
 *         if tokenized() is false.
 * @throws NullPointerException if the reader is null
 */
public Field(String name, Reader reader, IndexableFieldType type) {
  if (name == null) {
    throw new IllegalArgumentException("name must not be null");
  }
  if (type == null) {
    throw new IllegalArgumentException("type must not be null");
  }
  if (reader == null) {
    throw new NullPointerException("reader must not be null");
  }
  if (type.stored()) {
    throw new IllegalArgumentException("fields with a Reader value cannot be stored");
  }
  if (type.indexOptions() != IndexOptions.NONE && !type.tokenized()) {
    throw new IllegalArgumentException("non-tokenized fields must use String values");
  }
  
  this.name = name;
  this.fieldsData = reader;
  this.type = type;
}
 
源代码13 项目: lucene-solr   文件: PresearcherTestBase.java
public void testNonStringTermHandling() throws IOException {

    FieldType ft = new FieldType();
    ft.setTokenized(true);
    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);

    try (Monitor monitor = newMonitor()) {
      monitor.register(new MonitorQuery("1", new TermQuery(new Term("f", NON_STRING_TERM))));

      Document doc = new Document();
      doc.add(new Field("f", new NonStringTokenStream(), ft));
      MatchingQueries<QueryMatch> m = monitor.match(doc, QueryMatch.SIMPLE_MATCHER);
      assertEquals(1, m.getMatchCount());
      assertEquals(1, m.getQueriesRun());
    }

  }
 
源代码14 项目: lucene-solr   文件: TestBlockPostingsFormat2.java
private Document newDocument() {
  Document doc = new Document();
  for (IndexOptions option : IndexOptions.values()) {
    if (option == IndexOptions.NONE) {
      continue;
    }
    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
    // turn on tvs for a cross-check, since we rely upon checkindex in this test (for now)
    ft.setStoreTermVectors(true);
    ft.setStoreTermVectorOffsets(true);
    ft.setStoreTermVectorPositions(true);
    ft.setStoreTermVectorPayloads(true);
    ft.setIndexOptions(option);
    doc.add(new Field(option.toString(), "", ft));
  }
  return doc;
}
 
源代码15 项目: lucene-solr   文件: Lucene50FieldInfosFormat.java
private static IndexOptions getIndexOptions(IndexInput input, byte b) throws IOException {
  switch (b) {
  case 0:
    return IndexOptions.NONE;
  case 1:
    return IndexOptions.DOCS;
  case 2:
    return IndexOptions.DOCS_AND_FREQS;
  case 3:
    return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
  case 4:
    return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
  default:
    // BUG
    throw new CorruptIndexException("invalid IndexOptions byte: " + b, input);
  }
}
 
源代码16 项目: lucene-solr   文件: Lucene84PostingsReader.java
public BlockImpactsPostingsEnum(FieldInfo fieldInfo, IntBlockTermState termState) throws IOException {
  indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  indexHasPayloads = fieldInfo.hasPayloads();

  this.docIn = Lucene84PostingsReader.this.docIn.clone();

  this.posIn = Lucene84PostingsReader.this.posIn.clone();

  docFreq = termState.docFreq;
  docTermStartFP = termState.docStartFP;
  posTermStartFP = termState.posStartFP;
  payTermStartFP = termState.payStartFP;
  totalTermFreq = termState.totalTermFreq;
  docIn.seek(docTermStartFP);
  posPendingFP = posTermStartFP;
  posPendingCount = 0;
  if (termState.totalTermFreq < BLOCK_SIZE) {
    lastPosBlockFP = posTermStartFP;
  } else if (termState.totalTermFreq == BLOCK_SIZE) {
    lastPosBlockFP = -1;
  } else {
    lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
  }

  doc = -1;
  accum = 0;
  docUpto = 0;
  docBufferUpto = BLOCK_SIZE;

  skipper = new Lucene84ScoreSkipReader(docIn.clone(),
      MAX_SKIP_LEVELS,
      true,
      indexHasOffsets,
      indexHasPayloads);
  skipper.init(docTermStartFP+termState.skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq);
}
 
源代码17 项目: lucene-solr   文件: PerFieldPostingsFormat.java
public FieldsReader(final SegmentReadState readState) throws IOException {

      // Read _X.per and init each format:
      boolean success = false;
      try {
        // Read field name -> format name
        for (FieldInfo fi : readState.fieldInfos) {
          if (fi.getIndexOptions() != IndexOptions.NONE) {
            final String fieldName = fi.name;
            final String formatName = fi.getAttribute(PER_FIELD_FORMAT_KEY);
            if (formatName != null) {
              // null formatName means the field is in fieldInfos, but has no postings!
              final String suffix = fi.getAttribute(PER_FIELD_SUFFIX_KEY);
              if (suffix == null) {
                throw new IllegalStateException("missing attribute: " + PER_FIELD_SUFFIX_KEY + " for field: " + fieldName);
              }
              PostingsFormat format = PostingsFormat.forName(formatName);
              String segmentSuffix = getSuffix(formatName, suffix);
              if (!formats.containsKey(segmentSuffix)) {
                formats.put(segmentSuffix, format.fieldsProducer(new SegmentReadState(readState, segmentSuffix)));
              }
              fields.put(fieldName, formats.get(segmentSuffix));
            }
          }
        }
        success = true;
      } finally {
        if (!success) {
          IOUtils.closeWhileHandlingException(formats.values());
        }
      }

      this.segment = readState.segmentInfo.name;
    }
 
源代码18 项目: Elasticsearch   文件: BaseGeoPointFieldMapper.java
public Y build(Mapper.BuilderContext context) {
    ContentPath.Type origPathType = context.path().pathType();
    context.path().pathType(pathType);

    GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType;

    DoubleFieldMapper latMapper = null;
    DoubleFieldMapper lonMapper = null;

    context.path().add(name);
    if (enableLatLon) {
        NumberFieldMapper.Builder<?, ?> latMapperBuilder = doubleField(Names.LAT).includeInAll(false);
        NumberFieldMapper.Builder<?, ?> lonMapperBuilder = doubleField(Names.LON).includeInAll(false);
        if (precisionStep != null) {
            latMapperBuilder.precisionStep(precisionStep);
            lonMapperBuilder.precisionStep(precisionStep);
        }
        latMapper = (DoubleFieldMapper) latMapperBuilder.includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
        lonMapper = (DoubleFieldMapper) lonMapperBuilder.includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
        geoPointFieldType.setLatLonEnabled(latMapper.fieldType(), lonMapper.fieldType());
    }
    StringFieldMapper geoHashMapper = null;
    if (enableGeoHash || enableGeoHashPrefix) {
        // TODO: possible also implicitly enable geohash if geohash precision is set
        geoHashMapper = stringField(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).store(fieldType.stored())
                .omitNorms(true).indexOptions(IndexOptions.DOCS).build(context);
        geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix);
    }
    context.path().remove();
    context.path().pathType(origPathType);

    return build(context, name, fieldType, defaultFieldType, context.indexSettings(), origPathType,
            latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo);
}
 
源代码19 项目: Elasticsearch   文件: IpFieldMapper.java
@Override
protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
    String ipAsString;
    if (context.externalValueSet()) {
        ipAsString = (String) context.externalValue();
        if (ipAsString == null) {
            ipAsString = fieldType().nullValueAsString();
        }
    } else {
        if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) {
            ipAsString = fieldType().nullValueAsString();
        } else {
            ipAsString = context.parser().text();
        }
    }

    if (ipAsString == null) {
        return;
    }
    if (context.includeInAll(includeInAll, this)) {
        context.allEntries().addText(fieldType().names().fullName(), ipAsString, fieldType().boost());
    }

    final long value = ipToLong(ipAsString);
    if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
        CustomLongNumericField field = new CustomLongNumericField(value, fieldType());
        field.setBoost(fieldType().boost());
        fields.add(field);
    }
    if (fieldType().hasDocValues()) {
        addDocValue(context, fields, value);
    }
}
 
源代码20 项目: Elasticsearch   文件: FieldMapper.java
protected static String indexOptionToString(IndexOptions indexOption) {
    switch (indexOption) {
        case DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS:
            return TypeParsers.INDEX_OPTIONS_OFFSETS;
        case DOCS_AND_FREQS:
            return TypeParsers.INDEX_OPTIONS_FREQS;
        case DOCS_AND_FREQS_AND_POSITIONS:
            return TypeParsers.INDEX_OPTIONS_POSITIONS;
        case DOCS:
            return TypeParsers.INDEX_OPTIONS_DOCS;
        default:
            throw new IllegalArgumentException("Unknown IndexOptions [" + indexOption + "]");
    }
}
 
源代码21 项目: lucene-solr   文件: Lucene50PostingsReader.java
@Override
public PostingsEnum postings(FieldInfo fieldInfo, BlockTermState termState, PostingsEnum reuse, int flags) throws IOException {
  
  boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;

  if (indexHasPositions == false || PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS) == false) {
    BlockDocsEnum docsEnum;
    if (reuse instanceof BlockDocsEnum) {
      docsEnum = (BlockDocsEnum) reuse;
      if (!docsEnum.canReuse(docIn, fieldInfo)) {
        docsEnum = new BlockDocsEnum(fieldInfo);
      }
    } else {
      docsEnum = new BlockDocsEnum(fieldInfo);
    }
    return docsEnum.reset((IntBlockTermState) termState, flags);
  } else {
    EverythingEnum everythingEnum;
    if (reuse instanceof EverythingEnum) {
      everythingEnum = (EverythingEnum) reuse;
      if (!everythingEnum.canReuse(docIn, fieldInfo)) {
        everythingEnum = new EverythingEnum(fieldInfo);
      }
    } else {
      everythingEnum = new EverythingEnum(fieldInfo);
    }
    return everythingEnum.reset((IntBlockTermState) termState, flags);
  }
}
 
Field newField(String name, String value, Store stored) {
	FieldType tagsFieldType = new FieldType();
	tagsFieldType.setStored(stored == Store.YES);
	IndexOptions opts = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
	tagsFieldType.setIndexOptions(opts);
	return new Field(name, value, tagsFieldType);
}
 
源代码23 项目: Elasticsearch   文件: IdFieldMapper.java
@Override
public Query termQuery(Object value, @Nullable QueryParseContext context) {
    if (indexOptions() != IndexOptions.NONE || context == null) {
        return super.termQuery(value, context);
    }
    final BytesRef[] uids = Uid.createUidsForTypesAndId(context.queryTypes(), value);
    return new TermsQuery(UidFieldMapper.NAME, uids);
}
 
源代码24 项目: crate   文件: TextFieldMapper.java
PhraseFieldType(TextFieldType parent) {
    setTokenized(true);
    setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
    if (parent.indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
        setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
    }
    if (parent.storeTermVectorOffsets()) {
        setStoreTermVectors(true);
        setStoreTermVectorPositions(true);
        setStoreTermVectorOffsets(true);
    }
    setAnalyzer(parent.indexAnalyzer().name(), parent.indexAnalyzer().analyzer());
    setName(parent.name() + FAST_PHRASE_SUFFIX);
    this.parent = parent;
}
 
源代码25 项目: lucene-solr   文件: BlendedInfixSuggester.java
@Override
protected FieldType getTextFieldType() {
  FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
  ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
  ft.setStoreTermVectors(true);
  ft.setStoreTermVectorPositions(true);
  ft.setOmitNorms(true);

  return ft;
}
 
源代码26 项目: lucene-solr   文件: FieldType.java
/**
 * Sets the indexing options for the field:
 * @param value indexing options
 * @throws IllegalStateException if this FieldType is frozen against
 *         future modifications.
 * @see #indexOptions()
 */
public void setIndexOptions(IndexOptions value) {
  checkIfFrozen();
  if (value == null) {
    throw new NullPointerException("IndexOptions must not be null");
  }
  this.indexOptions = value;
}
 
源代码27 项目: Elasticsearch   文件: TimestampFieldMapper.java
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
    if (enabledState.enabled) {
        long timestamp = context.sourceToParse().timestamp();
        if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
            fields.add(new LongFieldMapper.CustomLongNumericField(timestamp, fieldType()));
        }
        if (fieldType().hasDocValues()) {
            fields.add(new NumericDocValuesField(fieldType().names().indexName(), timestamp));
        }
    }
}
 
源代码28 项目: Elasticsearch   文件: TypeFieldMapper.java
@Override
public Query termQuery(Object value, @Nullable QueryParseContext context) {
    if (indexOptions() == IndexOptions.NONE) {
        return new ConstantScoreQuery(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value)))));
    }
    return new ConstantScoreQuery(new TermQuery(createTerm(value)));
}
 
源代码29 项目: crate   文件: BooleanFieldMapper.java
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
    if (fieldType().indexOptions() == IndexOptions.NONE && !fieldType().stored() && !fieldType().hasDocValues()) {
        return;
    }

    Boolean value = context.parseExternalValue(Boolean.class);
    if (value == null) {
        XContentParser.Token token = context.parser().currentToken();
        if (token == XContentParser.Token.VALUE_NULL) {
            if (fieldType().nullValue() != null) {
                value = fieldType().nullValue();
            }
        } else {
            value = context.parser().booleanValue();
        }
    }

    if (value == null) {
        return;
    }
    if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
        fields.add(new Field(fieldType().name(), value ? "T" : "F", fieldType()));
    }
    if (fieldType().hasDocValues()) {
        fields.add(new SortedNumericDocValuesField(fieldType().name(), value ? 1 : 0));
    } else {
        createFieldNamesField(context, fields);
    }
}
 
private Field newField(String name, String value, Store stored) {
    FieldType tagsFieldType = new FieldType();
    tagsFieldType.setStored(stored == Store.YES);
    IndexOptions idxOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
    tagsFieldType.setIndexOptions(idxOptions);
    return new Field(name, value, tagsFieldType);
}
 
 类所在包
 同包方法