org.apache.lucene.search.DocValuesFieldExistsQuery#org.apache.solr.schema.SchemaField源码实例Demo

下面列出了org.apache.lucene.search.DocValuesFieldExistsQuery#org.apache.solr.schema.SchemaField 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: lucene-solr   文件: UniqueBlockFieldAgg.java
@Override
public SlotAcc createSlotAcc(FacetContext fcontext, long numDocs, int numSlots) throws IOException {
  final String fieldName = getArg();
  SchemaField sf = fcontext.qcontext.searcher().getSchema().getField(fieldName);
  if (sf.multiValued() || sf.getType().multiValuedFieldCache()) {
    throw new IllegalArgumentException(name+"("+fieldName+
        ") doesn't allow multivalue fields, got " + sf);
  } else {
    if (sf.getType().getNumberType() != null) {
      throw new IllegalArgumentException(name+"("+fieldName+
          ") not yet support numbers " + sf);
    } else {
      return new UniqueBlockSlotAcc(fcontext, sf, numSlots);
    }
  }
}
 
/**
 * This is a destructive call... the queue is empty at the end
 */
public NamedList<Integer> toNamedList(IndexSchema schema) {
	// reverse the list..
	List<TermInfo> aslist = new LinkedList<>();
	while (size() > 0) {
		aslist.add(0, (TermInfo) pop());
	}

	NamedList<Integer> list = new NamedList<>();
	for (TermInfo i : aslist) {
		String txt = i.term.text();
		SchemaField ft = schema.getFieldOrNull(i.term.field());
		if (ft != null) {
			txt = ft.getType().indexedToReadable(txt);
		}
		list.add(txt, i.docFreq);
	}
	return list;
}
 
源代码3 项目: lucene-solr   文件: GeoDistValueSourceParser.java
private MultiValueSource parseSfield(FunctionQParser fp) throws SyntaxError {
  String sfield = fp.getParam(SpatialParams.FIELD);
  if (sfield == null) return null;
  SchemaField sf = fp.getReq().getSchema().getField(sfield);
  FieldType type = sf.getType();
  if (type instanceof AbstractSpatialFieldType) {
    @SuppressWarnings({"rawtypes"})
    AbstractSpatialFieldType asft = (AbstractSpatialFieldType) type;
    return new SpatialStrategyMultiValueSource(asft.getStrategy(sfield), asft.getDistanceUnits());
  }
  ValueSource vs = type.getValueSource(sf, fp);
  if (vs instanceof MultiValueSource) {
    return (MultiValueSource)vs;
  }
  throw new SyntaxError("Spatial field must implement MultiValueSource or extend AbstractSpatialFieldType:" + sf);
}
 
/**
 * @param doc SolrDocument to check
 * @param idField field where the id is stored
 * @param fieldType type of id field
 * @param filterQuery Query to filter by
 * @param searcher SolrIndexSearcher on which to apply the filter query
 * @returns the internal docid, or -1 if doc is not found or doesn't match filter
 */
private static int getFilteredInternalDocId(SolrDocument doc, SchemaField idField, FieldType fieldType,
      Query filterQuery, SolrIndexSearcher searcher) throws IOException {
  int docid = -1;
  Field f = (Field)doc.getFieldValue(idField.getName());
  String idStr = f.stringValue();
  BytesRef idBytes = new BytesRef();
  fieldType.readableToIndexed(idStr, idBytes);
  // get the internal document id
  long segAndId = searcher.lookupId(idBytes);

    // if docid is valid, run it through the filter
  if (segAndId >= 0) {
    int segid = (int) segAndId;
    AtomicReaderContext ctx = searcher.getTopReaderContext().leaves().get((int) (segAndId >> 32));
    docid = segid + ctx.docBase;
    Weight weight = filterQuery.createWeight(searcher);
    Scorer scorer = weight.scorer(ctx, null);
    if (scorer == null || segid != scorer.advance(segid)) {
      // filter doesn't match.
      docid = -1;
    }
  }
  return docid;
}
 
源代码5 项目: lucene-solr   文件: SolrIndexSearcher.java
/** Returns a weighted sort spec according to this searcher */
private SortSpec implWeightSortSpec(Sort originalSort, int num, int offset, Sort nullEquivalent) throws IOException {
  Sort rewrittenSort = weightSort(originalSort);
  if (rewrittenSort == null) {
    rewrittenSort = nullEquivalent;
  }

  final SortField[] rewrittenSortFields = rewrittenSort.getSort();
  final SchemaField[] rewrittenSchemaFields = new SchemaField[rewrittenSortFields.length];
  for (int ii = 0; ii < rewrittenSortFields.length; ++ii) {
    final String fieldName = rewrittenSortFields[ii].getField();
    rewrittenSchemaFields[ii] = (fieldName == null ? null : schema.getFieldOrNull(fieldName));
  }

  return new SortSpec(rewrittenSort, rewrittenSchemaFields, num, offset);
}
 
@SuppressWarnings({"rawtypes"})
private SearchGroup<BytesRef> deserializeOneSearchGroup(SchemaField groupField, String groupValue,
    SortField[] groupSortField, List<Comparable> rawSearchGroupData) {
  SearchGroup<BytesRef> searchGroup = new SearchGroup<>();
  searchGroup.groupValue = null;
  if (groupValue != null) {
    if (groupField != null) {
      BytesRefBuilder builder = new BytesRefBuilder();
      groupField.getType().readableToIndexed(groupValue, builder);
      searchGroup.groupValue = builder.get();
    } else {
      searchGroup.groupValue = new BytesRef(groupValue);
    }
  }
  searchGroup.sortValues = rawSearchGroupData.toArray(new Comparable[rawSearchGroupData.size()]);
  for (int i = 0; i < searchGroup.sortValues.length; i++) {
    SchemaField field = groupSortField[i].getField() != null ? searcher.getSchema().getFieldOrNull(groupSortField[i].getField()) : null;
    searchGroup.sortValues[i] = ShardResultTransformerUtils.unmarshalSortValue(searchGroup.sortValues[i], field);
  }
  return searchGroup;
}
 
源代码7 项目: lucene-solr   文件: RangeFacet.java
@Override
public void createFacetValueExecuters(final Filter filter, SolrQueryRequest queryRequest, Consumer<FacetValueQueryExecuter> consumer) {
  // Computes the end points of the ranges in the rangeFacet
  final FacetRangeGenerator<? extends Comparable<?>> rec = FacetRangeGenerator.create(this);
  final SchemaField sf = field;

  // Create a rangeFacetAccumulator for each range and
  // collect the documents for that range.
  for (FacetRange range : rec.getRanges()) {
    Query q = sf.getType().getRangeQuery(null, sf, range.lower, range.upper, range.includeLower,range.includeUpper);
    // The searcher sends docIds to the RangeFacetAccumulator which forwards
    // them to <code>collectRange()</code> in this class for collection.
    Query rangeQuery = QueryUtils.combineQueryAndFilter(q, filter);

    ReductionDataCollection dataCol = collectionManager.newDataCollection();
    reductionData.put(range.toString(), dataCol);
    consumer.accept(new FacetValueQueryExecuter(dataCol, rangeQuery));
  }
}
 
源代码8 项目: lucene-solr   文件: SimpleFacets.java
/**
  * @param existsRequested facet.exists=true is passed for the given field
  * */
static FacetMethod selectFacetMethod(String fieldName, 
                                     SchemaField field, FacetMethod method, Integer mincount,
                                     boolean existsRequested) {
  if (existsRequested) {
    checkMincountOnExists(fieldName, mincount);
    if (method == null) {
      method = FacetMethod.ENUM;
    }
  }
  final FacetMethod facetMethod = selectFacetMethod(field, method, mincount);
  
  if (existsRequested && facetMethod!=FacetMethod.ENUM) {
    throw new SolrException (ErrorCode.BAD_REQUEST, 
        FacetParams.FACET_EXISTS + "=true is requested, but "+
        FacetParams.FACET_METHOD+"="+FacetParams.FACET_METHOD_enum+ " can't be used with "+fieldName
    );
  }
  return facetMethod;
}
 
源代码9 项目: lucene-solr   文件: UnifiedSolrHighlighter.java
/**
 * Retrieves the unique keys for the topdocs to key the results
 */
protected String[] getUniqueKeys(SolrIndexSearcher searcher, int[] docIDs) throws IOException {
  IndexSchema schema = searcher.getSchema();
  SchemaField keyField = schema.getUniqueKeyField();
  if (keyField != null) {
    SolrReturnFields returnFields = new SolrReturnFields(keyField.getName(), null);
    String[] uniqueKeys = new String[docIDs.length];
    for (int i = 0; i < docIDs.length; i++) {
      int docid = docIDs[i];
      SolrDocument solrDoc = searcher.getDocFetcher().solrDoc(docid, returnFields);
      uniqueKeys[i] = schema.printableUniqueKey(solrDoc);
    }
    return uniqueKeys;
  } else {
    return new String[docIDs.length];
  }
}
 
源代码10 项目: lucene-solr   文件: SimpleFacets.java
private Collector getInsanityWrapper(final String field, Collector collector) {
  SchemaField sf = searcher.getSchema().getFieldOrNull(field);
  if (sf != null && !sf.hasDocValues() && !sf.multiValued() && sf.getType().getNumberType() != null) {
    // it's a single-valued numeric field: we must currently create insanity :(
    // there isn't a GroupedFacetCollector that works on numerics right now...
    return new FilterCollector(collector) {
      @Override
      public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
        LeafReader insane = Insanity.wrapInsanity(context.reader(), field);
        return in.getLeafCollector(insane.getContext());
      }
    };
  } else {
    return collector;
  }
}
 
源代码11 项目: lucene-solr   文件: SolrDocumentFetcher.java
/**
 * Returns a collection of the names of all stored fields which can be highlighted the index reader knows about.
 */
public Collection<String> getStoredHighlightFieldNames() {
  synchronized (this) {
    if (storedHighlightFieldNames == null) {
      storedHighlightFieldNames = new LinkedList<>();
      for (FieldInfo fieldInfo : searcher.getFieldInfos()) {
        final String fieldName = fieldInfo.name;
        try {
          SchemaField field = searcher.getSchema().getField(fieldName);
          if (field.stored() && ((field.getType() instanceof org.apache.solr.schema.TextField)
              || (field.getType() instanceof org.apache.solr.schema.StrField))) {
            storedHighlightFieldNames.add(fieldName);
          }
        } catch (RuntimeException e) { // getField() throws a SolrException, but it arrives as a RuntimeException
          log.warn("Field [{}] found in index, but not defined in schema.", fieldName);
        }
      }
    }
    return storedHighlightFieldNames;
  }
}
 
源代码12 项目: lucene-solr   文件: TestFacetMethods.java
@Test
public void testNumericSingleValuedDV() {

  for (int props : Arrays.asList(DOC_VALUES ^ UNINVERTIBLE,
                                 DOC_VALUES)) {
    SchemaField field = new SchemaField("field", new TrieIntField(), props, null);
    // default is FCS, can't use ENUM due to trie-field terms, FC rewrites to FCS for efficiency
    for (int mincount : Arrays.asList(0, 1)) {
      // behavior should be independent of mincount
      assertEquals(FacetMethod.FCS, SimpleFacets.selectFacetMethod(field, null, mincount));
      assertEquals(FacetMethod.FCS, SimpleFacets.selectFacetMethod(field, FacetMethod.ENUM, mincount));
      assertEquals(FacetMethod.FCS, SimpleFacets.selectFacetMethod(field, FacetMethod.FC, mincount));
      assertEquals(FacetMethod.FCS, SimpleFacets.selectFacetMethod(field, FacetMethod.FCS, mincount));
      
      // UIF only allowed if field is UNINVERTIBLE
      assertEquals(propsMatch(props, UNINVERTIBLE) ? FacetMethod.UIF : FacetMethod.FCS,
                   SimpleFacets.selectFacetMethod(field, FacetMethod.UIF, 0));
    }
  }
}
 
源代码13 项目: lucene-solr   文件: TestFacetMethods.java
@Test
public void testStringSingleValuedDV() {

  for (int props : Arrays.asList(DOC_VALUES ^ UNINVERTIBLE,
                                 DOC_VALUES)) {
    SchemaField field = new SchemaField("field", new StrField(), props, null);
    // default is FC, otherwise just uses the passed-in method as is unless UIF...
    for (int mincount : Arrays.asList(0, 1)) {
      // behavior should be independent of mincount
      assertEquals(FacetMethod.FC, SimpleFacets.selectFacetMethod(field, null, mincount));
      assertEquals(FacetMethod.ENUM, SimpleFacets.selectFacetMethod(field, FacetMethod.ENUM, mincount));
      assertEquals(FacetMethod.FC, SimpleFacets.selectFacetMethod(field, FacetMethod.FC, mincount));
      assertEquals(FacetMethod.FCS, SimpleFacets.selectFacetMethod(field, FacetMethod.FCS, mincount));
      // UIF only allowed if field is UNINVERTIBLE
      assertEquals(propsMatch(props, UNINVERTIBLE) ? FacetMethod.UIF : FacetMethod.FCS,
                   SimpleFacets.selectFacetMethod(field, FacetMethod.UIF, mincount));
    }
  }
}
 
源代码14 项目: lucene-solr   文件: GroupConverter.java
static Collection<SearchGroup<BytesRef>> fromMutable(SchemaField field, Collection<SearchGroup<MutableValue>> values) {
  if (values == null) {
    return null;
  }
  FieldType fieldType = field.getType();
  List<SearchGroup<BytesRef>> result = new ArrayList<>(values.size());
  for (SearchGroup<MutableValue> original : values) {
    SearchGroup<BytesRef> converted = new SearchGroup<>();
    converted.sortValues = original.sortValues;
    if (original.groupValue.exists) {
      BytesRefBuilder binary = new BytesRefBuilder();
      fieldType.readableToIndexed(Utils.OBJECT_TO_STRING.apply(original.groupValue.toObject()), binary);
      converted.groupValue = binary.get();
    } else {
      converted.groupValue = null;
    }
    result.add(converted);
  }
  return result;
}
 
源代码15 项目: lucene-solr   文件: AddUpdateCommand.java
/** Returns the indexed ID for this document.  The returned BytesRef is retained across multiple calls, and should not be modified. */
public BytesRef getIndexedId() {
  if (indexedId == null) {
    IndexSchema schema = req.getSchema();
    SchemaField sf = schema.getUniqueKeyField();
    if (sf != null) {
      if (solrDoc != null) {
        SolrInputField field = solrDoc.getField(sf.getName());

        int count = field==null ? 0 : field.getValueCount();
        if (count == 0) {
          if (overwrite) {
            throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Document is missing mandatory uniqueKey field: " + sf.getName());
          }
        } else if (count  > 1) {
          throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Document contains multiple values for uniqueKey field: " + field);
        } else {
          BytesRefBuilder b = new BytesRefBuilder();
          sf.getType().readableToIndexed(field.getFirstValue().toString(), b);
          indexedId = b.get();
        }
      }
    }
  }
  return indexedId;
}
 
源代码16 项目: lucene-solr   文件: TestFacetMethods.java
@Test
public void testNumericMultiValuedNoDV() {

  for (int props : Arrays.asList(MULTIVALUED ^ UNINVERTIBLE,
                                 MULTIVALUED)) {
    SchemaField field = new SchemaField("field", new TrieIntField(), props, null);
    // FC is used by default for most requested methods other then UIF -- regardless of mincount
    for (int mincount : Arrays.asList(0, 1)) {
      assertEquals(FacetMethod.FC, SimpleFacets.selectFacetMethod(field, null, mincount));
      assertEquals(FacetMethod.FC, SimpleFacets.selectFacetMethod(field, FacetMethod.ENUM, mincount));
      assertEquals(FacetMethod.FC, SimpleFacets.selectFacetMethod(field, FacetMethod.FC, mincount));
      assertEquals(FacetMethod.FC, SimpleFacets.selectFacetMethod(field, FacetMethod.FCS, mincount));
    }
    // UIF allowed only if UNINVERTIBLE *AND* mincount > 0
    assertEquals(FacetMethod.FC, SimpleFacets.selectFacetMethod(field, FacetMethod.UIF, 0));
    assertEquals(propsMatch(props, UNINVERTIBLE) ? FacetMethod.UIF : FacetMethod.FC,
                 SimpleFacets.selectFacetMethod(field, FacetMethod.UIF, 1));
  }
}
 
源代码17 项目: lucene-solr   文件: DirectUpdateHandler2.java
private Query getQuery(DeleteUpdateCommand cmd) {
  Query q;
  try {
    // move this higher in the stack?
    QParser parser = QParser.getParser(cmd.getQuery(), cmd.req);
    q = parser.getQuery();
    q = QueryUtils.makeQueryable(q);

    // Make sure not to delete newer versions
    if (ulog != null && cmd.getVersion() != 0 && cmd.getVersion() != -Long.MAX_VALUE) {
      BooleanQuery.Builder bq = new BooleanQuery.Builder();
      bq.add(q, Occur.MUST);
      SchemaField sf = ulog.getVersionInfo().getVersionField();
      ValueSource vs = sf.getType().getValueSource(sf, null);
      ValueSourceRangeFilter filt = new ValueSourceRangeFilter(vs, Long.toString(Math.abs(cmd.getVersion())), null, true, true);
      FunctionRangeQuery range = new FunctionRangeQuery(filt);
      bq.add(range, Occur.MUST_NOT);  // formulated in the "MUST_NOT" sense so we can delete docs w/o a version (some tests depend on this...)
      q = bq.build();
    }

    return q;

  } catch (SyntaxError e) {
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
  }
}
 
@Override
public void inform(SolrCore core) {
  final SchemaField field = core.getLatestSchema().getFieldOrNull(getSignatureField());
  if (null == field) {
    throw new SolrException
      (ErrorCode.SERVER_ERROR,
       "Can't use signatureField which does not exist in schema: "
       + getSignatureField());
  }

  if (getOverwriteDupes() && ( ! field.indexed() ) ) {
    throw new SolrException
      (ErrorCode.SERVER_ERROR,
       "Can't set overwriteDupes when signatureField is not indexed: "
       + getSignatureField());
  }
}
 
private static SolrDocument toSolrDoc(Document doc, IndexSchema schema) {
  SolrDocument out = new SolrDocument();
  for ( IndexableField f : doc.getFields() ) {
    // Make sure multivalued fields are represented as lists
    Object existing = out.get(f.name());
    if (existing == null) {
      SchemaField sf = schema.getFieldOrNull(f.name());

      // don't return copyField targets
      if (sf != null && schema.isCopyFieldTarget(sf)) continue;

      if (sf != null && sf.multiValued()) {
        List<Object> vals = new ArrayList<>();
        vals.add( f );
        out.setField( f.name(), vals );
      }
      else{
        out.setField( f.name(), f );
      }
    }
    else {
      out.addField( f.name(), f );
    }
  }
  return out;
}
 
源代码20 项目: lucene-solr   文件: TopGroupsFieldCommand.java
private TopGroupsFieldCommand(Query query,
                              SchemaField field,
                              Sort groupSort,
                              Sort withinGroupSort,
                              Collection<SearchGroup<BytesRef>> firstPhaseGroups,
                              int maxDocPerGroup,
                              boolean needScores,
                              boolean needMaxScore) {
  this.query = query;
  this.field = field;
  this.groupSort = groupSort;
  this.withinGroupSort = withinGroupSort;
  this.firstPhaseGroups = firstPhaseGroups;
  this.maxDocPerGroup = maxDocPerGroup;
  this.needScores = needScores;
  this.needMaxScore = needMaxScore;
}
 
源代码21 项目: lucene-solr   文件: UniqueAgg.java
@Override
public SlotAcc createSlotAcc(FacetContext fcontext, long numDocs, int numSlots) throws IOException {
  SchemaField sf = fcontext.qcontext.searcher().getSchema().getField(getArg());
  if (sf.multiValued() || sf.getType().multiValuedFieldCache()) {
    if (sf.getType().isPointField()) {
      return new SortedNumericAcc(fcontext, getArg(), numSlots);
    } else if (sf.hasDocValues()) {
      return new UniqueMultiDvSlotAcc(fcontext, sf, numSlots, null);
    } else {
      return new UniqueMultivaluedSlotAcc(fcontext, sf, numSlots, null);
    }
  } else {
    if (sf.getType().getNumberType() != null) {
      return new NumericAcc(fcontext, getArg(), numSlots);
    } else {
      return new UniqueSinglevaluedSlotAcc(fcontext, sf, numSlots, null);
    }
  }
}
 
源代码22 项目: SearchServices   文件: StripLocaleStrField.java
@Override
public List<IndexableField> createFields(SchemaField field, Object value, float boost)
{
    Object newValue =
            ofNullable(value).map(String.class::cast)
                    .map(v -> v.replaceFirst("\\x{0000}.*\\x{0000}", ""))
                    .orElse(null);
    return super.createFields(field, newValue, boost);
}
 
源代码23 项目: lucene-solr   文件: CursorMarkTest.java
private static Object getRandomCollation(SchemaField sf) throws IOException {
  Object val;
  Analyzer analyzer = sf.getType().getIndexAnalyzer();
  String term = TestUtil.randomRealisticUnicodeString(random());
  try (TokenStream ts = analyzer.tokenStream("fake", term)) {
    TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
    ts.reset();
    assertTrue(ts.incrementToken());
    val = BytesRef.deepCopyOf(termAtt.getBytesRef());
    assertFalse(ts.incrementToken());
    ts.end();
  }
  return val;
}
 
源代码24 项目: lucene-solr   文件: LukeRequestHandler.java
private static List<String> toListOfStrings(SchemaField[] raw) {
  List<String> result = new ArrayList<>(raw.length);
  for (SchemaField f : raw) {
    result.add(f.getName());
  }
  return result;
}
 
@SuppressWarnings("unchecked")
@Override
public void transform(SolrDocument doc, int docid, float score)
{
    Collection<String> fieldNames = new ArrayList<>(doc.getFieldNames());
    solrReturnFields = new SolrReturnFields(context.getRequest().getParams().get("originalFl"), context.getRequest());

    for (String fieldName : fieldNames)
    {
       SchemaField schemaField = context.getSearcher().getSchema().getFieldOrNull(fieldName);
       if(schemaField != null)
       {
           String alfrescoFieldName = AlfrescoSolrDataModel.getInstance().getAlfrescoPropertyFromSchemaField(fieldName);
           if (isRequestedField(alfrescoFieldName) || alfrescoFieldName.equals("id"))
           {
               Object value = doc.getFieldValue(fieldName);
               doc.removeFields(fieldName);
               if (schemaField.multiValued())
               {
                   Object collectionValue =
                           ((Collection<Object>) value).stream()
                                .map(elem -> getFieldValue(schemaField, elem))
                                .collect(Collectors.toSet());
                   doc.setField(alfrescoFieldName, collectionValue);
               }
               else
               {
                   doc.setField(transformToUnderscoreNotation(alfrescoFieldName), getFieldValue(schemaField, value));
               }
           }
           else
           {
               doc.removeFields(alfrescoFieldName);
               doc.removeFields(fieldName);
           }
       }
    }
}
 
源代码26 项目: lucene-solr   文件: LukeRequestHandler.java
/**
 * @return a string representing a SchemaField's flags.  
 */
private static String getFieldFlags( SchemaField f )
{
  FieldType t = (f==null) ? null : f.getType();

  // see: http://www.nabble.com/schema-field-properties-tf3437753.html#a9585549
  boolean lazy = false; // "lazy" is purely a property of reading fields
  boolean binary = false; // Currently not possible

  StringBuilder flags = new StringBuilder();
  flags.append( (f != null && f.indexed())             ? FieldFlag.INDEXED.getAbbreviation() : '-' );
  flags.append( (t != null && t.isTokenized())         ? FieldFlag.TOKENIZED.getAbbreviation() : '-' );
  flags.append( (f != null && f.stored())              ? FieldFlag.STORED.getAbbreviation() : '-' );
  flags.append( (f != null && f.hasDocValues())        ? FieldFlag.DOC_VALUES.getAbbreviation() : "-" );
  flags.append( (f != null && f.isUninvertible())      ? FieldFlag.UNINVERTIBLE.getAbbreviation() : "-" );
  flags.append( (f != null && f.multiValued())         ? FieldFlag.MULTI_VALUED.getAbbreviation() : '-' );
  flags.append( (f != null && f.storeTermVector() )    ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-' );
  flags.append( (f != null && f.storeTermOffsets() )   ? FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation() : '-' );
  flags.append( (f != null && f.storeTermPositions() ) ? FieldFlag.TERM_VECTOR_POSITION.getAbbreviation() : '-' );
  flags.append( (f != null && f.storeTermPayloads() )  ? FieldFlag.TERM_VECTOR_PAYLOADS.getAbbreviation() : '-' );
  flags.append( (f != null && f.omitNorms())           ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-' );
  flags.append( (f != null &&
      f.omitTermFreqAndPositions() )        ? FieldFlag.OMIT_TF.getAbbreviation() : '-' );
  flags.append( (f != null && f.omitPositions() )      ? FieldFlag.OMIT_POSITIONS.getAbbreviation() : '-' );
  flags.append( (f != null && f.storeOffsetsWithPositions() )      ? FieldFlag.STORE_OFFSETS_WITH_POSITIONS.getAbbreviation() : '-' );
  flags.append( (lazy)                                 ? FieldFlag.LAZY.getAbbreviation() : '-' );
  flags.append( (binary)                               ? FieldFlag.BINARY.getAbbreviation() : '-' );
  flags.append( (f != null && f.sortMissingFirst() )   ? FieldFlag.SORT_MISSING_FIRST.getAbbreviation() : '-' );
  flags.append( (f != null && f.sortMissingLast() )    ? FieldFlag.SORT_MISSING_LAST.getAbbreviation() : '-' );
  return flags.toString();
}
 
private static void implUpdateSchemaField(TestHarness h, String fieldName, IntUnaryOperator propertiesModifier) {
  try (SolrCore core = h.getCoreInc()) {

    // Add docvalues to the field type
    IndexSchema schema = core.getLatestSchema();
    SchemaField oldSchemaField = schema.getField(fieldName);
    SchemaField newSchemaField = new SchemaField(
        fieldName,
        oldSchemaField.getType(),
        propertiesModifier.applyAsInt(oldSchemaField.getProperties()),
        oldSchemaField.getDefaultValue());
    schema.getFields().put(fieldName, newSchemaField);
  }
}
 
源代码28 项目: lucene-solr   文件: DIHConfiguration.java
public DIHConfiguration(Element element, DataImporter di,
    List<Map<String,String>> functions, Script script,
    Map<String,Map<String,String>> dataSources, PropertyWriter pw) {
  schema = di.getSchema();
  lowerNameVsSchemaField = null == schema ? Collections.<String,SchemaField>emptyMap() : loadSchemaFieldMap();
  this.deleteQuery = ConfigParseUtil.getStringAttribute(element, "deleteQuery", null);
  this.onImportStart = ConfigParseUtil.getStringAttribute(element, "onImportStart", null);
  this.onImportEnd = ConfigParseUtil.getStringAttribute(element, "onImportEnd", null);
  this.onError = ConfigParseUtil.getStringAttribute(element, "onError", null);
  List<Entity> modEntities = new ArrayList<>();
  List<Element> l = ConfigParseUtil.getChildNodes(element, "entity");
  boolean docRootFound = false;
  for (Element e : l) {
    Entity entity = new Entity(docRootFound, e, di, this, null);
    Map<String, EntityField> fields = gatherAllFields(di, entity);
    verifyWithSchema(fields);    
    modEntities.add(entity);
  }
  this.entities = Collections.unmodifiableList(modEntities);
  if(functions==null) {
    functions = Collections.emptyList();
  }
  List<Map<String, String>> modFunc = new ArrayList<>(functions.size());
  for(Map<String, String> f : functions) {
    modFunc.add(Collections.unmodifiableMap(f));
  }
  this.functions = Collections.unmodifiableList(modFunc);
  this.script = script;
  this.dataSources = Collections.unmodifiableMap(dataSources);
  this.propertyWriter = pw;
}
 
源代码29 项目: lucene-solr   文件: StddevAgg.java
@Override
public SlotAcc createSlotAcc(FacetContext fcontext, long numDocs, int numSlots) throws IOException {
  ValueSource vs = getArg();

  if (vs instanceof FieldNameValueSource) {
    String field = ((FieldNameValueSource) vs).getFieldName();
    SchemaField sf = fcontext.qcontext.searcher().getSchema().getField(field);
    if (sf.getType().getNumberType() == null) {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
          name() + " aggregation not supported for " + sf.getType().getTypeName());
    }
    if (sf.multiValued() || sf.getType().multiValuedFieldCache()) {
      if (sf.hasDocValues()) {
        if (sf.getType().isPointField()) {
          return new StddevSortedNumericAcc(fcontext, sf, numSlots);
        }
        return new StddevSortedSetAcc(fcontext, sf, numSlots);
      }
      if (sf.getType().isPointField()) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
            name() + " aggregation not supported for PointField w/o docValues");
      }
      return new StddevUnInvertedFieldAcc(fcontext, sf, numSlots);
    }
    vs = sf.getType().getValueSource(sf, null);
  }
  return new SlotAcc.StddevSlotAcc(vs, fcontext, numSlots);
}
 
源代码30 项目: lucene-solr   文件: MultiFieldWriter.java
public MultiFieldWriter(String field, FieldType fieldType, SchemaField schemaField, boolean numeric) {
  this.field = field;
  this.fieldType = fieldType;
  this.schemaField = schemaField;
  this.numeric = numeric;
  if (this.fieldType.isPointField()) {
    bitsToValue = bitsToValue(fieldType);
  } else {
    bitsToValue = null;
  }
}