com.google.protobuf.TextFormat.ParseException#org.apache.solr.common.SolrException源码实例Demo

下面列出了com.google.protobuf.TextFormat.ParseException#org.apache.solr.common.SolrException 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

@Override
public UpdateRequestProcessor getInstance(SolrQueryRequest req, SolrQueryResponse rsp, UpdateRequestProcessor next) {
  String trainingFilterQueryString = (params.get(KNN_FILTER_QUERY));
  try {
    if (trainingFilterQueryString != null && !trainingFilterQueryString.isEmpty()) {
      Query trainingFilterQuery = this.parseFilterQuery(trainingFilterQueryString, params, req);
      classificationParams.setTrainingFilterQuery(trainingFilterQuery);
    }
  } catch (SyntaxError | RuntimeException syntaxError) {
    throw new SolrException
        (SolrException.ErrorCode.SERVER_ERROR,
            "Classification UpdateProcessor Training Filter Query: '" + trainingFilterQueryString + "' is not supported", syntaxError);
  }

  IndexSchema schema = req.getSchema();
  IndexReader indexReader = req.getSearcher().getIndexReader();

  return new ClassificationUpdateProcessor(classificationParams, next, indexReader, schema);
}
 
@Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
  final SolrInputDocument doc = cmd.getSolrInputDocument();

  final String math = doc.containsKey(ttlField) 
    ? doc.getFieldValue(ttlField).toString() : defaultTtl;

  if (null != math) {
    try {
      final DateMathParser dmp = new DateMathParser();
      // TODO: should we try to accept things like "1DAY" as well as "+1DAY" ?
      // How? 
      // 'startsWith("+")' is a bad idea because it would cause problems with
      // things like "/DAY+1YEAR"
      // Maybe catch ParseException and retry with "+" prepended?
      doc.addField(expireField, dmp.parseMath(math));
    } catch (ParseException pe) {
      throw new SolrException(BAD_REQUEST, "Can't parse ttl as date math: " + math, pe);
    }
  }

  super.processAdd(cmd);
}
 
源代码3 项目: lucene-solr   文件: TestSolrQueryParser.java
@Test
public void testManyClauses_Solr() throws Exception {
  final String a = "1 a 2 b 3 c 10 d 11 12 "; // 10 terms
  
  // this should exceed our solrconfig.xml level (solr specific) maxBooleanClauses limit
  // even though it's not long enough to trip the Lucene level (global) limit
  final String too_long = "id:(" + a + a + a + a + a + ")";

  final String expectedMsg = "Too many clauses";
  ignoreException(expectedMsg);
  SolrException e = expectThrows(SolrException.class, "expected SolrException",
                                 () -> assertJQ(req("q", too_long), "/response/numFound==6"));
  assertThat(e.getMessage(), containsString(expectedMsg));
  
  // but should still work as a filter query since TermsQuery can be used...
  assertJQ(req("q","*:*", "fq", too_long)
           ,"/response/numFound==6");
  assertJQ(req("q","*:*", "fq", too_long, "sow", "false")
           ,"/response/numFound==6");
  assertJQ(req("q","*:*", "fq", too_long, "sow", "true")
           ,"/response/numFound==6");
}
 
源代码4 项目: lucene-solr   文件: ClusterStateUtil.java
public static boolean waitForLiveAndActiveReplicaCount(ZkStateReader zkStateReader,
    String collection, int replicaCount, int timeoutInMs) {
  long timeout = System.nanoTime()
      + TimeUnit.NANOSECONDS.convert(timeoutInMs, TimeUnit.MILLISECONDS);
  boolean success = false;
  while (!success && System.nanoTime() < timeout) {
    success = getLiveAndActiveReplicaCount(zkStateReader, collection) == replicaCount;
    
    if (!success) {
      try {
        Thread.sleep(TIMEOUT_POLL_MS);
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted");
      }
    }
    
  }
  
  return success;
}
 
源代码5 项目: carbon-apimgt   文件: PlainTextIndexer.java
public IndexDocument getIndexedDocument(File2Index fileData) throws SolrException,
           RegistryException {
			
	IndexDocument indexDoc = new IndexDocument(fileData.path, RegistryUtils.decodeBytes(fileData.data), null);
			
	Map<String, List<String>> fields = new HashMap<String, List<String>>();
	fields.put("path", Arrays.asList(fileData.path));
			
	if (fileData.mediaType != null) {
		fields.put(IndexingConstants.FIELD_MEDIA_TYPE, Arrays.asList(fileData.mediaType));
	} else {
		fields.put(IndexingConstants.FIELD_MEDIA_TYPE, Arrays.asList("text/(.)"));
	}
	
	indexDoc.setFields(fields);
	
	return indexDoc;
}
 
源代码6 项目: lucene-solr   文件: PackageManager.java
/**
 * Given a package, return a map of collections where this package is
 * installed to the installed version (which can be {@link PackagePluginHolder#LATEST})
 */
public Map<String, String> getDeployedCollections(String packageName) {
  List<String> allCollections;
  try {
    allCollections = zkClient.getChildren(ZkStateReader.COLLECTIONS_ZKNODE, null, true);
  } catch (KeeperException | InterruptedException e) {
    throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, e);
  }
  Map<String, String> deployed = new HashMap<String, String>();
  for (String collection: allCollections) {
    // Check package version installed
    String paramsJson = PackageUtils.getJsonStringFromUrl(solrClient.getHttpClient(), solrBaseUrl + PackageUtils.getCollectionParamsPath(collection) + "/PKG_VERSIONS?omitHeader=true");
    String version = null;
    try {
      version = JsonPath.parse(paramsJson, PackageUtils.jsonPathConfiguration())
          .read("$['response'].['params'].['PKG_VERSIONS'].['"+packageName+"'])");
    } catch (PathNotFoundException ex) {
      // Don't worry if PKG_VERSION wasn't found. It just means this collection was never touched by the package manager.
    }
    if (version != null) {
      deployed.put(collection, version);
    }
  }
  return deployed;
}
 
源代码7 项目: lucene-solr   文件: DeleteReplicaCmd.java
/**
 * Validate if there is less replicas than requested to remove. Also error out if there is
 * only one replica available
 */
private void validateReplicaAvailability(Slice slice, String shard, String collectionName, int count) {
  //If there is a specific shard passed, validate if there any or just 1 replica left
  if (slice != null) {
    Collection<Replica> allReplicasForShard = slice.getReplicas();
    if (allReplicasForShard == null) {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No replicas found  in shard/collection: " +
              shard + "/"  + collectionName);
    }


    if (allReplicasForShard.size() == 1) {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There is only one replica available in shard/collection: " +
              shard + "/" + collectionName + ". Cannot delete that.");
    }

    if (allReplicasForShard.size() <= count) {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There are lesser num replicas requested to be deleted than are available in shard/collection : " +
              shard + "/"  + collectionName  + " Requested: "  + count + " Available: " + allReplicasForShard.size() + ".");
    }
  }
}
 
源代码8 项目: lucene-solr   文件: ExtendedDismaxQParser.java
protected DynamicField(String wildcard) {
  this.wildcard = wildcard;
  if (wildcard.equals("*")) {
    type=CATCHALL;
    str=null;
  }
  else if (wildcard.startsWith("*")) {
    type=ENDS_WITH;
    str=wildcard.substring(1);
  }
  else if (wildcard.endsWith("*")) {
    type=STARTS_WITH;
    str=wildcard.substring(0,wildcard.length()-1);
  }
  else {
    throw new SolrException(ErrorCode.BAD_REQUEST, "dynamic field name must start or end with *");
  }
}
 
源代码9 项目: lucene-solr   文件: HttpPartitionOnCommitTest.java
protected void sendCommitWithRetry(Replica replica) throws Exception {
  String replicaCoreUrl = replica.getCoreUrl();
  log.info("Sending commit request to: {}", replicaCoreUrl);
  final RTimer timer = new RTimer();
  try (HttpSolrClient client = getHttpSolrClient(replicaCoreUrl)) {
    try {
      client.commit();

      if (log.isInfoEnabled()) {
        log.info("Sent commit request to {} OK, took {}ms", replicaCoreUrl, timer.getTime());
      }
    } catch (Exception exc) {
      Throwable rootCause = SolrException.getRootCause(exc);
      if (rootCause instanceof NoHttpResponseException) {
        log.warn("No HTTP response from sending commit request to {}; will re-try after waiting 3 seconds", replicaCoreUrl);
        Thread.sleep(3000);
        client.commit();
        log.info("Second attempt at sending commit to {} succeeded", replicaCoreUrl);
      } else {
        throw exc;
      }
    }
  }
}
 
源代码10 项目: lucene-solr   文件: IndexFetcher.java
private void downloadConfFiles(List<Map<String, Object>> confFilesToDownload, long latestGeneration) throws Exception {
  log.info("Starting download of configuration files from master: {}", confFilesToDownload);
  confFilesDownloaded = Collections.synchronizedList(new ArrayList<>());
  File tmpconfDir = new File(solrCore.getResourceLoader().getConfigDir(), "conf." + getDateAsStr(new Date()));
  try {
    boolean status = tmpconfDir.mkdirs();
    if (!status) {
      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
              "Failed to create temporary config folder: " + tmpconfDir.getName());
    }
    for (Map<String, Object> file : confFilesToDownload) {
      String saveAs = (String) (file.get(ALIAS) == null ? file.get(NAME) : file.get(ALIAS));
      localFileFetcher = new LocalFsFileFetcher(tmpconfDir, file, saveAs, CONF_FILE_SHORT, latestGeneration);
      currentFile = file;
      localFileFetcher.fetchFile();
      confFilesDownloaded.add(new HashMap<>(file));
    }
    // this is called before copying the files to the original conf dir
    // so that if there is an exception avoid corrupting the original files.
    terminateAndWaitFsyncService();
    copyTmpConfFiles2Conf(tmpconfDir);
  } finally {
    delTree(tmpconfDir);
  }
}
 
源代码11 项目: lucene-solr   文件: SolrRrdBackendFactory.java
/**
 * Remove a database.
 * @param path database path.
 * @throws IOException on Solr exception
 */
public void remove(String path) throws IOException {
  SolrRrdBackend backend = backends.remove(path);
  if (backend != null) {
    IOUtils.closeQuietly(backend);
  }
  if (!persistent) {
    return;
  }
  // remove Solr doc
  try {
    solrClient.deleteByQuery(collection, "{!term f=id}" + ID_PREFIX + ID_SEP + path);
  } catch (SolrServerException | SolrException e) {
    log.warn("Error deleting RRD for path {}", path, e);
  }
}
 
源代码12 项目: lucene-solr   文件: TrieField.java
/** expert internal use, subject to change.
 * Returns null if no prefix or prefix not needed, or the prefix of the main value of a trie field
 * that indexes multiple precisions per value.
 */
public static String getMainValuePrefix(org.apache.solr.schema.FieldType ft) {
  if (ft instanceof TrieField) {
    final TrieField trie = (TrieField)ft;
    if (trie.precisionStep  == Integer.MAX_VALUE)
      return null;
    switch (trie.type) {
      case INTEGER:
      case FLOAT:
        return INT_PREFIX;
      case LONG:
      case DOUBLE:
      case DATE:
        return LONG_PREFIX;
      default:
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + trie.type);
    }
  }
  return null;
}
 
源代码13 项目: lucene-solr   文件: CurrencyValue.java
@Override
public int compareTo(CurrencyValue o) {
  if(o == null) {
    throw new NullPointerException("Cannot compare CurrencyValue to a null values");
  }
  if(!getCurrencyCode().equals(o.getCurrencyCode())) {
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
        "Cannot compare CurrencyValues when their currencies are not equal");
  }
  if(o.getAmount() < getAmount()) {
    return 1;
  }
  if(o.getAmount() == getAmount()) {
    return 0;
  }
  return -1;
}
 
源代码14 项目: lucene-solr   文件: DeleteCollectionCmd.java
private List<String> checkAliasReference(ZkStateReader zkStateReader, String extCollection, boolean followAliases) throws Exception {
  Aliases aliases = zkStateReader.getAliases();
  List<String> aliasesRefs = referencedByAlias(extCollection, aliases, followAliases);
  List<String> aliasesToDelete = new ArrayList<>();
  if (aliasesRefs.size() > 0) {
    zkStateReader.aliasesManager.update(); // aliases may have been stale; get latest from ZK
    aliases = zkStateReader.getAliases();
    aliasesRefs = referencedByAlias(extCollection, aliases, followAliases);
    String collection = followAliases ? aliases.resolveSimpleAlias(extCollection) : extCollection;
    if (aliasesRefs.size() > 0) {
      for (String alias : aliasesRefs) {
        // for back-compat in 8.x we don't automatically remove other
        // aliases that point only to this collection
        if (!extCollection.equals(alias)) {
          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
              "Collection : " + collection + " is part of aliases: " + aliasesRefs + ", remove or modify the aliases before removing this collection.");
        } else {
          aliasesToDelete.add(alias);
        }
      }
    }
  }
  return aliasesToDelete;
}
 
源代码15 项目: lucene-solr   文件: ZkController.java
private void waitForShardId(CoreDescriptor cd) {
  if (log.isDebugEnabled()) {
    log.debug("waiting to find shard id in clusterstate for {}", cd.getName());
  }
  int retryCount = 320;
  while (retryCount-- > 0) {
    final String shardId = zkStateReader.getClusterState().getShardId(cd.getCollectionName(), getNodeName(), cd.getName());
    if (shardId != null) {
      cd.getCloudDescriptor().setShardId(shardId);
      return;
    }
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
    }
  }

  throw new SolrException(ErrorCode.SERVER_ERROR,
      "Could not get shard id for core: " + cd.getName());
}
 
源代码16 项目: lucene-solr   文件: SumsqAgg.java
@Override
public SlotAcc createSlotAcc(FacetContext fcontext, long numDocs, int numSlots) throws IOException {
  ValueSource vs = getArg();

  if (vs instanceof FieldNameValueSource) {
    String field = ((FieldNameValueSource)vs).getFieldName();
    SchemaField sf = fcontext.qcontext.searcher().getSchema().getField(field);
    if (sf.getType().getNumberType() == null) {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
          name() + " aggregation not supported for " + sf.getType().getTypeName());
    }
    if (sf.multiValued() || sf.getType().multiValuedFieldCache()) {
      if (sf.hasDocValues()) {
        if (sf.getType().isPointField()) {
          return new SumSqSortedNumericAcc(fcontext, sf, numSlots);
        }
        return new SumSqSortedSetAcc(fcontext, sf, numSlots);
      }
      if (sf.getType().isPointField()) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
            name() + " aggregation not supported for PointField w/o docValues");
      }
      return new SumSqUnInvertedFieldAcc(fcontext, sf, numSlots);
    }
    vs = sf.getType().getValueSource(sf, null);
  }
  return new SlotAcc.SumsqSlotAcc(vs, fcontext, numSlots);
}
 
源代码17 项目: lucene-solr   文件: SortableTextField.java
@Override
public ValueSource getValueSource(SchemaField field, QParser parser) {
  if (! field.hasDocValues()) {
    // type defaults to docValues=true, so error msg from perspective that
    // either type or field must have docValues="false"
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
                            "Can not use ValueSource on this type of field when docValues=\"false\", field: " + field.getName());
  }
  return super.getValueSource(field, parser);
}
 
源代码18 项目: lucene-solr   文件: Overseer.java
private ClusterState processQueueItem(ZkNodeProps message, ClusterState clusterState, ZkStateWriter zkStateWriter, boolean enableBatching, ZkStateWriter.ZkWriteCallback callback) throws Exception {
  final String operation = message.getStr(QUEUE_OPERATION);
  if (operation == null) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Message missing " + QUEUE_OPERATION + ":" + message);
  }
  List<ZkWriteCommand> zkWriteCommands = null;
  final Timer.Context timerContext = stats.time(operation);
  try {
    zkWriteCommands = processMessage(clusterState, message, operation);
    stats.success(operation);
  } catch (Exception e) {
    // generally there is nothing we can do - in most cases, we have
    // an issue that will fail again on retry or we cannot communicate with     a
    // ZooKeeper in which case another Overseer should take over
    // TODO: if ordering for the message is not important, we could
    // track retries and put it back on the end of the queue
    log.error("Overseer could not process the current clusterstate state update message, skipping the message: {}", message, e);
    stats.error(operation);
  } finally {
    timerContext.stop();
  }
  if (zkWriteCommands != null) {
    clusterState = zkStateWriter.enqueueUpdate(clusterState, zkWriteCommands, callback);
    if (!enableBatching)  {
      clusterState = zkStateWriter.writePendingUpdates();
    }
  }
  return clusterState;
}
 
源代码19 项目: lucene-solr   文件: ConfigSetsHandler.java
@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
  if (coreContainer == null) {
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
        "Core container instance missing");
  }

  // Make sure that the core is ZKAware
  if (!coreContainer.isZooKeeperAware()) {
    throw new SolrException(ErrorCode.BAD_REQUEST,
        "Solr instance is not running in SolrCloud mode.");
  }

  // Pick the action
  SolrParams params = req.getParams();
  String a = params.get(ConfigSetParams.ACTION);
  if (a != null) {
    ConfigSetAction action = ConfigSetAction.get(a);
    if (action == null)
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown action: " + a);
    if (action == ConfigSetAction.UPLOAD) {
      handleConfigUploadRequest(req, rsp);
      return;
    }
    invokeAction(req, rsp, action);
  } else {
    throw new SolrException(ErrorCode.BAD_REQUEST, "action is a required param");
  }

  rsp.setHttpCaching(false);
}
 
private void tryDelete() throws Exception {
  long start = System.nanoTime();
  long timeout = start + TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS);
  while (System.nanoTime() < timeout) {
    try {
      del("*:*");
      break;
    } catch (SolrServerException | SolrException e) {
      // cluster may not be up yet
      e.printStackTrace();
    }
    Thread.sleep(100);
  }
}
 
源代码21 项目: lucene-solr   文件: FacetComponent.java
private void refinePivotFacets(ResponseBuilder rb, ShardRequest sreq) {
  // This is after the shard has returned the refinement request
  FacetInfo fi = rb._facetInfo;
  for (ShardResponse srsp : sreq.responses) {
    
    int shardNumber = rb.getShardNum(srsp.getShard());
    
    NamedList facetCounts = (NamedList) srsp.getSolrResponse().getResponse().get("facet_counts");
    
    @SuppressWarnings("unchecked")
    NamedList<List<NamedList<Object>>> pivotFacetResponsesFromShard 
      = (NamedList<List<NamedList<Object>>>) facetCounts.get(PIVOT_KEY);

    if (null == pivotFacetResponsesFromShard) {
      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
                              "No pivot refinement response from shard: " + srsp.getShard());
    }
    
    for (Entry<String,List<NamedList<Object>>> pivotFacetResponseFromShard : pivotFacetResponsesFromShard) {
      PivotFacet masterPivotFacet = fi.pivotFacets.get(pivotFacetResponseFromShard.getKey());
      masterPivotFacet.mergeResponseFromShard(shardNumber, rb, pivotFacetResponseFromShard.getValue());  
      masterPivotFacet.removeAllRefinementsForShard(shardNumber);
    }
  }
  
  if (allPivotFacetsAreFullyRefined(fi)) {
    for (Entry<String,PivotFacet> pf : fi.pivotFacets) {
      pf.getValue().queuePivotRefinementRequests();
    }
    reQueuePivotFacetShardRequests(rb);
  }
}
 
源代码22 项目: lucene-solr   文件: QueryUtils.java
/** @lucene.experimental throw exception if max boolean clauses are exceeded */
public static BooleanQuery build(BooleanQuery.Builder builder, QParser parser) {
  int configuredMax = parser != null ? parser.getReq().getCore().getSolrConfig().booleanQueryMaxClauseCount : IndexSearcher.getMaxClauseCount();
  BooleanQuery bq = builder.build();
  if (bq.clauses().size() > configuredMax) {
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
        "Too many clauses in boolean query: encountered=" + bq.clauses().size() + " configured in solrconfig.xml via maxBooleanClauses=" + configuredMax);
  }
  return bq;
}
 
@Slow
@Test
public void testInvalidMustMatch() throws Exception {
  String configName = getSaferTestName();
  createConfigSet(configName);
  // Not a valid regex
  final String mustMatchRegex = "+_solr";

  final int maxCardinality = Integer.MAX_VALUE; // max cardinality for current test

  List<String> retrievedConfigSetNames = new ConfigSetAdminRequest.List().process(solrClient).getConfigSets();
  List<String> expectedConfigSetNames = Arrays.asList("_default", configName);

  // config sets leak between tests so we can't be any more specific than this on the next 2 asserts
  assertTrue("We expect at least 2 configSets",
      retrievedConfigSetNames.size() >= expectedConfigSetNames.size());
  assertTrue("ConfigNames should include :" + expectedConfigSetNames, retrievedConfigSetNames.containsAll(expectedConfigSetNames));

  SolrException e = expectThrows(SolrException.class, () -> CollectionAdminRequest.createCategoryRoutedAlias(getAlias(), categoryField, maxCardinality,
      CollectionAdminRequest.createCollection("_unused_", configName, 1, 1)
          .setMaxShardsPerNode(2))
      .setMustMatch(mustMatchRegex)
      .process(solrClient)
  );

  assertTrue("Create Alias should fail since router.mustMatch must be a valid regular expression",
      e.getMessage().contains("router.mustMatch must be a valid regular expression"));
}
 
源代码24 项目: lucene-solr   文件: ZkStateReader.java
/**
 * Get and cache collection properties for a given collection. If the collection is watched, or still cached
 * simply return it from the cache, otherwise fetch it directly from zookeeper and retain the value for at
 * least cacheForMillis milliseconds. Cached properties are watched in zookeeper and updated automatically.
 * This version of {@code getCollectionProperties} should be used when properties need to be consulted
 * frequently in the absence of an active {@link CollectionPropsWatcher}.
 *
 * @param collection     The collection for which properties are desired
 * @param cacheForMillis The minimum number of milliseconds to maintain a cache for the specified collection's
 *                       properties. Setting a {@code CollectionPropsWatcher} will override this value and retain
 *                       the cache for the life of the watcher. A lack of changes in zookeeper may allow the
 *                       caching to remain for a greater duration up to the cycle time of {@link CacheCleaner}.
 *                       Passing zero for this value will explicitly remove the cached copy if and only if it is
 *                       due to expire and no watch exists. Any positive value will extend the expiration time
 *                       if required.
 * @return a map representing the key/value properties for the collection.
 */
public Map<String, String> getCollectionProperties(final String collection, long cacheForMillis) {
  synchronized (watchedCollectionProps) { // making decisions based on the result of a get...
    Watcher watcher = null;
    if (cacheForMillis > 0) {
      watcher = collectionPropsWatchers.compute(collection,
          (c, w) -> w == null ? new PropsWatcher(c, cacheForMillis) : w.renew(cacheForMillis));
    }
    VersionedCollectionProps vprops = watchedCollectionProps.get(collection);
    boolean haveUnexpiredProps = vprops != null && vprops.cacheUntilNs > System.nanoTime();
    long untilNs = System.nanoTime() + TimeUnit.NANOSECONDS.convert(cacheForMillis, TimeUnit.MILLISECONDS);
    Map<String, String> properties;
    if (haveUnexpiredProps) {
      properties = vprops.props;
      vprops.cacheUntilNs = Math.max(vprops.cacheUntilNs, untilNs);
    } else {
      try {
        VersionedCollectionProps vcp = fetchCollectionProperties(collection, watcher);
        properties = vcp.props;
        if (cacheForMillis > 0) {
          vcp.cacheUntilNs = untilNs;
          watchedCollectionProps.put(collection, vcp);
        } else {
          // we're synchronized on watchedCollectionProps and we can only get here if we have found an expired
          // vprops above, so it is safe to remove the cached value and let the GC free up some mem a bit sooner.
          if (!collectionPropsObservers.containsKey(collection)) {
            watchedCollectionProps.remove(collection);
          }
        }
      } catch (Exception e) {
        throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading collection properties", SolrZkClient.checkInterrupted(e));
      }
    }
    return properties;
  }
}
 
源代码25 项目: incubator-sentry   文件: SecureAdminHandlersTest.java
private void verifyUnauthorized(RequestHandlerBase handler,
    String collection, String user, boolean shouldFailAdmin) throws Exception {
  String exMsgContains = "User " + user + " does not have privileges for " + (shouldFailAdmin?"admin":collection);
  SolrQueryRequest req = getRequest();
  prepareCollAndUser(core, req, collection, user, false);
  try {
    handler.handleRequestBody(req, new SolrQueryResponse());
    Assert.fail("Expected SolrException");
  } catch (SolrException ex) {
    assertEquals(ex.code(), SolrException.ErrorCode.UNAUTHORIZED.code);
    assertTrue(ex.getMessage().contains(exMsgContains));
  }
}
 
源代码26 项目: lucene-solr   文件: CollectionsHandler.java
@SuppressWarnings({"unchecked"})
void invokeAction(SolrQueryRequest req, SolrQueryResponse rsp, CoreContainer cores, CollectionAction action, CollectionOperation operation) throws Exception {
  if (!coreContainer.isZooKeeperAware()) {
    throw new SolrException(BAD_REQUEST,
        "Invalid request. collections can be accessed only in SolrCloud mode");
  }
  Map<String, Object> props = operation.execute(req, rsp, this);
  if (props == null) {
    return;
  }

  String asyncId = req.getParams().get(ASYNC);
  if (asyncId != null) {
    props.put(ASYNC, asyncId);
  }

  props.put(QUEUE_OPERATION, operation.action.toLower());

  if (operation.sendToOCPQueue) {
    ZkNodeProps zkProps = new ZkNodeProps(props);
    SolrResponse overseerResponse = sendToOCPQueue(zkProps, operation.timeOut);
    rsp.getValues().addAll(overseerResponse.getResponse());
    Exception exp = overseerResponse.getException();
    if (exp != null) {
      rsp.setException(exp);
    }

    //TODO yuck; shouldn't create-collection at the overseer do this?  (conditionally perhaps)
    if (action.equals(CollectionAction.CREATE) && asyncId == null) {
      if (rsp.getException() == null) {
        waitForActiveCollection(zkProps.getStr(NAME), cores, overseerResponse);
      }
    }

  } else {
    // submits and doesn't wait for anything (no response)
    coreContainer.getZkController().getOverseer().offerStateUpdate(Utils.toJSON(props));
  }

}
 
源代码27 项目: lucene-solr   文件: BackupRepositoryFactory.java
public BackupRepositoryFactory(PluginInfo[] backupRepoPlugins) {
  if (backupRepoPlugins != null) {
    for (int i = 0; i < backupRepoPlugins.length; i++) {
      String name = backupRepoPlugins[i].name;
      boolean isDefault = backupRepoPlugins[i].isDefault();

      if (backupRepoPluginByName.containsKey(name)) {
        throw new SolrException(ErrorCode.SERVER_ERROR, "Duplicate backup repository with name " + name);
      }
      if (isDefault) {
        if (this.defaultBackupRepoPlugin != null) {
          throw new SolrException(ErrorCode.SERVER_ERROR, "More than one backup repository is configured as default");
        }
        this.defaultBackupRepoPlugin = backupRepoPlugins[i];
      }
      backupRepoPluginByName.put(name, backupRepoPlugins[i]);
      log.info("Added backup repository with configuration params {}", backupRepoPlugins[i]);
    }
    if (backupRepoPlugins.length == 1) {
      this.defaultBackupRepoPlugin = backupRepoPlugins[0];
    }

    if (this.defaultBackupRepoPlugin != null) {
      log.info("Default configuration for backup repository is with configuration params {}",
          defaultBackupRepoPlugin);
    }
  }
}
 
源代码28 项目: lucene-solr   文件: ManagedModelStore.java
private void addModelFromMap(Map<String,Object> modelMap) {
  try {
    final LTRScoringModel algo = fromLTRScoringModelMap(solrResourceLoader, modelMap, managedFeatureStore);
    addModel(algo);
  } catch (final ModelException e) {
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
  }
}
 
源代码29 项目: lucene-solr   文件: FacetRangeProcessor.java
/**
 * Parses the given list of maps and returns list of Ranges
 *
 * @param input - list of map containing the ranges
 * @return list of {@link Range}
 */
private List<Range> parseRanges(Object input) {
  if (!(input instanceof List)) {
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
        "Expected List for ranges but got " + input.getClass().getSimpleName() + " = " + input
    );
  }
  @SuppressWarnings({"rawtypes"})
  List intervals = (List) input;
  List<Range> ranges = new ArrayList<>();
  for (Object obj : intervals) {
    if (!(obj instanceof Map)) {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
          "Expected Map for range but got " + obj.getClass().getSimpleName() + " = " + obj);
    }
    @SuppressWarnings({"unchecked"})
    Range range;
    @SuppressWarnings({"unchecked"})
    Map<String, Object> interval = (Map<String, Object>) obj;
    if (interval.containsKey("range")) {
      range = getRangeByOldFormat(interval);
    } else {
      range = getRangeByNewFormat(interval);
    }
    ranges.add(range);
  }
  return ranges;
}
 
源代码30 项目: lucene-solr   文件: SplitShardCmd.java
public static void checkDiskSpace(String collection, String shard, Replica parentShardLeader, SolrIndexSplitter.SplitMethod method, SolrCloudManager cloudManager) throws SolrException {
  // check that enough disk space is available on the parent leader node
  // otherwise the actual index splitting will always fail
  NodeStateProvider nodeStateProvider = cloudManager.getNodeStateProvider();
  Map<String, Object> nodeValues = nodeStateProvider.getNodeValues(parentShardLeader.getNodeName(),
      Collections.singletonList(ImplicitSnitch.DISK));
  Map<String, Map<String, List<ReplicaInfo>>> infos = nodeStateProvider.getReplicaInfo(parentShardLeader.getNodeName(),
      Collections.singletonList(Type.CORE_IDX.metricsAttribute));
  if (infos.get(collection) == null || infos.get(collection).get(shard) == null) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing replica information for parent shard leader");
  }
  // find the leader
  List<ReplicaInfo> lst = infos.get(collection).get(shard);
  Double indexSize = null;
  for (ReplicaInfo info : lst) {
    if (info.getCore().equals(parentShardLeader.getCoreName())) {
      Number size = (Number)info.getVariable(Type.CORE_IDX.metricsAttribute);
      if (size == null) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing index size information for parent shard leader");
      }
      indexSize = (Double) Type.CORE_IDX.convertVal(size);
      break;
    }
  }
  if (indexSize == null) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing replica information for parent shard leader");
  }
  Number freeSize = (Number)nodeValues.get(ImplicitSnitch.DISK);
  if (freeSize == null) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing node disk space information for parent shard leader");
  }
  // 100% more for REWRITE, 5% more for LINK
  double neededSpace = method == SolrIndexSplitter.SplitMethod.REWRITE ? 2.0 * indexSize : 1.05 * indexSize;
  if (freeSize.doubleValue() < neededSpace) {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "not enough free disk space to perform index split on node " +
        parentShardLeader.getNodeName() + ", required: " + neededSpace + ", available: " + freeSize);
  }
}