com.google.protobuf.TextFormat.ParseException#org.apache.solr.client.solrj.request.CollectionAdminRequest源码实例Demo

下面列出了com.google.protobuf.TextFormat.ParseException#org.apache.solr.client.solrj.request.CollectionAdminRequest 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

private boolean createCollection(CloudSolrClient solrClient, SolrPropsConfig solrPropsConfig, List<String> allCollectionList)
    throws SolrServerException, IOException {

  if (allCollectionList.contains(solrPropsConfig.getCollection())) {
    logger.info("Collection " + solrPropsConfig.getCollection() + " is already there. Won't create it");
    return true;
  }

  logger.info("Creating collection " + solrPropsConfig.getCollection() + ", numberOfShards=" + solrPropsConfig.getNumberOfShards() +
    ", replicationFactor=" + solrPropsConfig.getReplicationFactor());

  CollectionAdminRequest.Create collectionCreateRequest = CollectionAdminRequest.createCollection(
      solrPropsConfig.getCollection(), solrPropsConfig.getConfigName(), solrPropsConfig.getNumberOfShards(),
      solrPropsConfig.getReplicationFactor());
  collectionCreateRequest.setMaxShardsPerNode(calculateMaxShardsPerNode(solrPropsConfig));
  CollectionAdminResponse createResponse = collectionCreateRequest.process(solrClient);
  if (createResponse.getStatus() != 0) {
    logger.error("Error creating collection. collectionName=" + solrPropsConfig.getCollection() + ", response=" + createResponse);
    return false;
  } else {
    logger.info("Created collection " + solrPropsConfig.getCollection() + ", numberOfShards=" + solrPropsConfig.getNumberOfShards() +
      ", replicationFactor=" + solrPropsConfig.getReplicationFactor());
    return true;
  }
}
 
源代码2 项目: vind   文件: CollectionManagementService.java
public CollectionManagementService(List<String> zkHost, String ... repositories) throws IOException {
    this(repositories);
    this.zkHosts = zkHost;

    try (CloudSolrClient client = new CloudSolrClient.Builder(zkHost, Optional.empty()).build()) {
        NamedList result = client.request(new CollectionAdminRequest.ClusterStatus());

        if (((NamedList) ((NamedList) result.get("cluster")).get("collections")).get(".system") == null) {
            logger.warn("Blob store '.system' for runtime libs is not yet created. Will create one");

            try {
                Create create = CollectionAdminRequest
                        .createCollection(".system", BLOB_STORE_SHARDS, BLOB_STORE_REPLICAS);
                create.process(client);
                logger.info("Blob store has been created successfully");
            } catch (SolrServerException e1) {
                throw new IOException("Blob store is not available and cannot be created");
            }
        }

    } catch (SolrServerException | IOException e) {
        logger.error("Error in collection management service: {}", e.getMessage(), e);
        throw new IOException("Error in collection management service: " + e.getMessage(), e);
    }
}
 
源代码3 项目: vind   文件: CollectionManagementService.java
/**
 * 1. Check if config set is already deployed on the solr server, if not: download from repo and upload to zK
 * 2. Create collection
 * 3. Check if dependencies (runtime-libs) are installed, if not download and install (and name it with group:artifact:version)
 * 4. Add/Update collection runtime-libs
 *
 * @param collectionName {@link String} name of the collection to create.
 * @param configName should be either the name of an already defined configuration in the solr cloud or the full
 *                   name of an artifact accessible in one of the default repositories.
 * @param numOfShards integer number of shards
 * @param numOfReplicas integer number of replicas
 * @param autoAddReplicas boolean sets the Solr auto replication functionality on.
 * @throws {@link IOException} thrown if is not possible to create the collection.
 */
public void createCollection(String collectionName, String configName, int numOfShards, int numOfReplicas, Boolean autoAddReplicas) throws IOException {
    checkAndInstallConfiguration(configName);

    try (CloudSolrClient client = createCloudSolrClient()) {
        Create create = CollectionAdminRequest.
                createCollection(collectionName, configName, numOfShards, numOfReplicas);
        if(Objects.nonNull(autoAddReplicas)) {
            create.setAutoAddReplicas(autoAddReplicas);
        }
        create.process(client);
        logger.info("Collection '{}' created", collectionName);
    } catch (IOException | SolrServerException e) {
        throw new IOException("Cannot create collection", e);
    }

    Map<String,Long> runtimeDependencies = checkAndInstallRuntimeDependencies(collectionName);

    addOrUpdateRuntimeDependencies(runtimeDependencies, collectionName);
}
 
源代码4 项目: lucene-solr   文件: TestSQLHandler.java
@BeforeClass
public static void setupCluster() throws Exception {
  configureCluster(4)
      .addConfig("conf", configset("sql"))
      .configure();

  String collection;
  useAlias = random().nextBoolean();
  if (useAlias) {
    collection = COLLECTIONORALIAS + "_collection";
  } else {
    collection = COLLECTIONORALIAS;
  }

  CollectionAdminRequest.createCollection(collection, "conf", 2, 1).process(cluster.getSolrClient());
  cluster.waitForActiveCollection(collection, 2, 2);
  if (useAlias) {
    CollectionAdminRequest.createAlias(COLLECTIONORALIAS, collection).process(cluster.getSolrClient());
  }
}
 
源代码5 项目: lucene-solr   文件: CollectionsAPISolrJTest.java
@Test
public void testCloudInfoInCoreStatus() throws IOException, SolrServerException {
  String collectionName = "corestatus_test";
  CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
      .process(cluster.getSolrClient());

  assertEquals(0, response.getStatus());
  assertTrue(response.isSuccess());
  
  cluster.waitForActiveCollection(collectionName, 2, 4);
  
  String nodeName = (String) response._get("success[0]/key", null);
  String corename = (String) response._get(asList("success", nodeName, "core"), null);

  try (HttpSolrClient coreclient = getHttpSolrClient(cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeName))) {
    CoreAdminResponse status = CoreAdminRequest.getStatus(corename, coreclient);
    assertEquals(collectionName, status._get(asList("status", corename, "cloud", "collection"), null));
    assertNotNull(status._get(asList("status", corename, "cloud", "shard"), null));
    assertNotNull(status._get(asList("status", corename, "cloud", "replica"), null));
  }
}
 
源代码6 项目: lucene-solr   文件: AliasIntegrationTest.java
@Test
public void testModifyPropertiesCAR() throws Exception {
  // note we don't use TZ in this test, thus it's UTC
  final String aliasName = getSaferTestName();
  ZkStateReader zkStateReader = createColectionsAndAlias(aliasName);
  CollectionAdminRequest.SetAliasProperty setAliasProperty = CollectionAdminRequest.setAliasProperty(aliasName);
  setAliasProperty.addProperty("foo","baz");
  setAliasProperty.addProperty("bar","bam");
  setAliasProperty.process(cluster.getSolrClient());
  checkFooAndBarMeta(aliasName, zkStateReader);

  // now verify we can delete
  setAliasProperty = CollectionAdminRequest.setAliasProperty(aliasName);
  setAliasProperty.addProperty("foo","");
  setAliasProperty.process(cluster.getSolrClient());
  setAliasProperty = CollectionAdminRequest.setAliasProperty(aliasName);
  setAliasProperty.addProperty("bar",null);
  setAliasProperty.process(cluster.getSolrClient());
  setAliasProperty = CollectionAdminRequest.setAliasProperty(aliasName);

  // whitespace value
  setAliasProperty.addProperty("foo"," ");
  setAliasProperty.process(cluster.getSolrClient());


}
 
@BeforeClass
public static void setupCluster() throws Exception {
  configureCluster(1)
      .addConfig(CONFIG_NAME, new File(ExternalPaths.TECHPRODUCTS_CONFIGSET).toPath())
      .configure();

  final List<String> solrUrls = new ArrayList<>();
  solrUrls.add(cluster.getJettySolrRunner(0).getBaseUrl().toString());

  CollectionAdminRequest.createCollection(COLLECTION_NAME, CONFIG_NAME, 1, 1).process(cluster.getSolrClient());

  ContentStreamUpdateRequest up = new ContentStreamUpdateRequest("/update");
  up.setParam("collection", COLLECTION_NAME);
  up.addFile(getFile("solrj/techproducts.xml"), "application/xml");
  up.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
  UpdateResponse updateResponse = up.process(cluster.getSolrClient());
  assertEquals(0, updateResponse.getStatus());
}
 
public void testAddReplica() {
  // with shard parameter and "client side" implicit type param
  CollectionAdminRequest.AddReplica request = CollectionAdminRequest.addReplicaToShard("collection", "shard");
  assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD, ZkStateReader.REPLICA_TYPE);
  
  // with only shard parameter and "server side" implicit type, so no param
  request = CollectionAdminRequest.addReplicaToShard("collection", "shard", null);
  assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD);
  
  // with route parameter
  request = CollectionAdminRequest.addReplicaByRouteKey("collection","route");
  assertContainsParams(request.getParams(), ACTION, COLLECTION, ShardParams._ROUTE_);
  
  // with explicit type parameter
  request = CollectionAdminRequest.addReplicaToShard("collection", "shard", Replica.Type.NRT);
  assertContainsParams(request.getParams(), ACTION, COLLECTION, SHARD, ZkStateReader.REPLICA_TYPE);
}
 
源代码9 项目: lucene-solr   文件: CustomCollectionTest.java
@Test
public void testRouteFieldForImplicitRouter() throws Exception {

  int numShards = 4;
  int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
  int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
  String shard_fld = "shard_s";

  final String collection = "withShardField";

  CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c,d", replicationFactor)
      .setMaxShardsPerNode(maxShardsPerNode)
      .setRouterField(shard_fld)
      .process(cluster.getSolrClient());

  new UpdateRequest()
      .add("id", "6", shard_fld, "a")
      .add("id", "7", shard_fld, "a")
      .add("id", "8", shard_fld, "b")
      .commit(cluster.getSolrClient(), collection);

  assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
  assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
  assertEquals(2, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());

}
 
源代码10 项目: lucene-solr   文件: TestUtilizeNode.java
@BeforeClass
public static void setupCluster() throws Exception {
  configureCluster(3)
      .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-dynamic").resolve("conf"))
      .configure();
  NamedList<Object> overSeerStatus = cluster.getSolrClient().request(CollectionAdminRequest.getOverseerStatus());
  JettySolrRunner overseerJetty = null;
  String overseerLeader = (String) overSeerStatus.get("leader");
  for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
    JettySolrRunner jetty = cluster.getJettySolrRunner(i);
    if (jetty.getNodeName().equals(overseerLeader)) {
      overseerJetty = jetty;
      break;
    }
  }
  if (overseerJetty == null) {
    fail("no overseer leader!");
  }
}
 
源代码11 项目: lucene-solr   文件: CollectionsAPISolrJTest.java
@Test
public void testClusterProp() throws InterruptedException, IOException, SolrServerException {

  // sanity check our expected default
  final ClusterProperties props = new ClusterProperties(zkClient());
  assertEquals("Expecting prop to default to unset, test needs upated",
               props.getClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, null), null);
  
  CollectionAdminResponse response = CollectionAdminRequest.setClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, "true")
    .process(cluster.getSolrClient());
  assertEquals(0, response.getStatus());
  assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, null), "true");

  // Unset ClusterProp that we set.
  CollectionAdminRequest.setClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, null).process(cluster.getSolrClient());
  assertEquals("Cluster property was not unset", props.getClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, null), null);

  response = CollectionAdminRequest.setClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, "false")
      .process(cluster.getSolrClient());
  assertEquals(0, response.getStatus());
  assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, null), "false");
}
 
源代码12 项目: lucene-solr   文件: TestPullReplica.java
@Override
public void tearDown() throws Exception {
  for (JettySolrRunner jetty:cluster.getJettySolrRunners()) {
    if (!jetty.isRunning()) {
      log.warn("Jetty {} not running, probably some bad test. Starting it", jetty.getLocalPort());
      jetty.start();
    }
  }
  if (cluster.getSolrClient().getZkStateReader().getClusterState().getCollectionOrNull(collectionName) != null) {
    log.info("tearDown deleting collection");
    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
    log.info("Collection deleted");
    waitForDeletion(collectionName);
  }
  super.tearDown();
}
 
源代码13 项目: lucene-solr   文件: TestRebalanceLeaders.java
@BeforeClass
public static void setupCluster() throws Exception {

  numNodes = random().nextInt(4) + 4;
  numShards = random().nextInt(3) + 3;
  numReplicas = random().nextInt(2) + 2;
  useAdminToSetProps = random().nextBoolean();

  configureCluster(numNodes)
      .addConfig(COLLECTION_NAME, configset("cloud-minimal"))
      .configure();

  CollectionAdminResponse resp = CollectionAdminRequest.createCollection(COLLECTION_NAME, COLLECTION_NAME,
      numShards, numReplicas, 0, 0)
      .setMaxShardsPerNode((numShards * numReplicas) / numNodes + 1)
      .process(cluster.getSolrClient());
  assertEquals("Admin request failed; ", 0, resp.getStatus());
  cluster.waitForActiveCollection(COLLECTION_NAME, numShards, numShards * numReplicas);

}
 
private void waitForStable(int cnt, CollectionAdminRequest.Create[] createRequests) throws InterruptedException {
  for (int i = 0; i < cnt; i++) {
    String collectionName = "awhollynewcollection_" + i;
    final int j = i;
    waitForState("Expected to see collection " + collectionName, collectionName,
        (n, c) -> {
          CollectionAdminRequest.Create req = createRequests[j];
          return DocCollection.isFullyActive(n, c, req.getNumShards(), req.getReplicationFactor());
        });
    
    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
    // make sure we have leaders for each shard
    for (int z = 1; z < createRequests[j].getNumShards(); z++) {
      zkStateReader.getLeaderRetry(collectionName, "shard" + z, 10000);
    }      // make sure we again have leaders for each shard
  }
}
 
源代码15 项目: lucene-solr   文件: MultiThreadedOCPTest.java
private void testDeduplicationOfSubmittedTasks() throws IOException, SolrServerException {
  try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
    CollectionAdminRequest.createCollection("ocptest_shardsplit2","conf1",3,1).processAsync("3000",client);

    SplitShard splitShardRequest = CollectionAdminRequest.splitShard("ocptest_shardsplit2").setShardName(SHARD1);
    splitShardRequest.processAsync("3001",client);
    
    splitShardRequest = CollectionAdminRequest.splitShard("ocptest_shardsplit2").setShardName(SHARD2);
    splitShardRequest.processAsync("3002",client);

    // Now submit another task with the same id. At this time, hopefully the previous 3002 should still be in the queue.
    expectThrows(SolrServerException.class, () -> {
        CollectionAdminRequest.splitShard("ocptest_shardsplit2").setShardName(SHARD1).processAsync("3002",client);
        // more helpful assertion failure
        fail("Duplicate request was supposed to exist but wasn't found. De-duplication of submitted task failed.");
      });
    
    for (int i = 3001; i <= 3002; i++) {
      final RequestStatusState state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
      assertSame("Task " + i + " did not complete, final state: " + state, RequestStatusState.COMPLETED, state);
    }
  }
}
 
源代码16 项目: lucene-solr   文件: TestDynamicURP.java
@BeforeClass
public static void setupCluster() throws Exception {
  System.setProperty("enable.runtime.lib", "true");
  configureCluster(3)
      .addConfig("conf", configset("cloud-minimal"))
      .configure();
  SolrZkClient zkClient = cluster.getSolrClient().getZkStateReader().getZkClient();
  String path = ZkStateReader.CONFIGS_ZKNODE + "/conf/solrconfig.xml";
  byte[] data = zkClient.getData(path, null, null, true);

  String solrconfigStr = new String(data, StandardCharsets.UTF_8);
  zkClient.setData(path, solrconfigStr.replace("</config>",
      "<updateRequestProcessorChain name=\"test_urp\" processor=\"testURP\" default=\"true\">\n" +
      "    <processor class=\"solr.RunUpdateProcessorFactory\"/>\n" +
      "  </updateRequestProcessorChain>\n" +
      "\n" +
      "  <updateProcessor class=\"runtimecode.TestURP\" name=\"testURP\" runtimeLib=\"true\"></updateProcessor>\n" +
      "</config>").getBytes(StandardCharsets.UTF_8), true );


  CollectionAdminRequest.createCollection(COLLECTION, "conf", 3, 1).process(cluster.getSolrClient());
  waitForState("", COLLECTION, clusterShape(3, 3));
}
 
@Test
public void test() throws Exception {

  final String collectionName = "customcollreplicadeletion";

  CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
      .setMaxShardsPerNode(5)
      .process(cluster.getSolrClient());

  DocCollection collectionState = getCollectionState(collectionName);
  Replica replica = getRandomReplica(collectionState.getSlice("a"));

  CollectionAdminRequest.deleteReplica(collectionName, "a", replica.getName())
      .process(cluster.getSolrClient());

  waitForState("Expected shard 'a' to have no replicas", collectionName, (n, c) -> {
    return c.getSlice("a") == null || c.getSlice("a").getReplicas().size() == 0;
  });

}
 
源代码18 项目: lucene-solr   文件: CreateCollectionCleanupTest.java
@Test
public void testAsyncCreateCollectionCleanup() throws Exception {
  final CloudSolrClient cloudClient = cluster.getSolrClient();
  String collectionName = "foo2";
  assertThat(CollectionAdminRequest.listCollections(cloudClient), not(hasItem(collectionName)));
  
  // Create a collection that would fail
  CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,"conf1",1,1);

  Properties properties = new Properties();
  Path tmpDir = createTempDir();
  tmpDir = tmpDir.resolve("foo");
  Files.createFile(tmpDir);
  properties.put(CoreAdminParams.DATA_DIR, tmpDir.toString());
  create.setProperties(properties);
  create.setAsyncId("testAsyncCreateCollectionCleanup");
  create.process(cloudClient);
  RequestStatusState state = AbstractFullDistribZkTestBase.getRequestStateAfterCompletion("testAsyncCreateCollectionCleanup", 30, cloudClient);
  assertThat(state.getKey(), is("failed"));

  // Confirm using LIST that the collection does not exist
  assertThat("Failed collection is still in the clusterstate: " + cluster.getSolrClient().getClusterStateProvider().getClusterState().getCollectionOrNull(collectionName), 
      CollectionAdminRequest.listCollections(cloudClient), not(hasItem(collectionName)));

}
 
源代码19 项目: lucene-solr   文件: ShardSplitTest.java
private void doSplitShardWithRule(SolrIndexSplitter.SplitMethod splitMethod) throws Exception {
  waitForThingsToLevelOut(15, TimeUnit.SECONDS);

  log.info("Starting testSplitShardWithRule");
  String collectionName = "shardSplitWithRule_" + splitMethod.toLower();
  CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2)
      .setRule("shard:*,replica:<2,node:*");

  CollectionAdminResponse response = createRequest.process(cloudClient);
  assertEquals(0, response.getStatus());
  
  try {
    cloudClient.waitForState(collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 2));
  } catch (TimeoutException e) {
    new RuntimeException("Timeout waiting for 1shards and 2 replicas.", e);
  }

  CollectionAdminRequest.SplitShard splitShardRequest = CollectionAdminRequest.splitShard(collectionName)
      .setShardName("shard1").setSplitMethod(splitMethod.toLower());
  response = splitShardRequest.process(cloudClient);
  assertEquals(String.valueOf(response.getErrorMessages()), 0, response.getStatus());
}
 
源代码20 项目: lucene-solr   文件: CloudHttp2SolrClientTest.java
/**
 * Tests if the 'shards.preference' parameter works with single-sharded collections.
 */
@Test
public void singleShardedPreferenceRules() throws Exception {
  String collectionName = "singleShardPreferenceTestColl";

  int liveNodes = cluster.getJettySolrRunners().size();

  // For testing replica.type, we want to have all replica types available for the collection
  CollectionAdminRequest.createCollection(collectionName, "conf", 1, liveNodes/3, liveNodes/3, liveNodes/3)
      .setMaxShardsPerNode(liveNodes)
      .processAndWait(cluster.getSolrClient(), TIMEOUT);
  cluster.waitForActiveCollection(collectionName, 1, liveNodes);

  // Add some new documents
  new UpdateRequest()
      .add(id, "0", "a_t", "hello1")
      .add(id, "2", "a_t", "hello2")
      .add(id, "3", "a_t", "hello2")
      .commit(getRandomClient(), collectionName);

  // Run the actual test for 'queryReplicaType'
  queryReplicaType(getRandomClient(), Replica.Type.PULL, collectionName);
  queryReplicaType(getRandomClient(), Replica.Type.TLOG, collectionName);
  queryReplicaType(getRandomClient(), Replica.Type.NRT, collectionName);
}
 
源代码21 项目: lucene-solr   文件: DeleteReplicaTest.java
@Test
public void deleteReplicaByCount() throws Exception {

  final String collectionName = "deleteByCount";

  CollectionAdminRequest.createCollection(collectionName, "conf", 1, 3).process(cluster.getSolrClient());
  waitForState("Expected a single shard with three replicas", collectionName, clusterShape(1, 3));

  CollectionAdminRequest.deleteReplicasFromShard(collectionName, "shard1", 2).process(cluster.getSolrClient());
  waitForState("Expected a single shard with a single replica", collectionName, clusterShape(1, 1));

  SolrException e = expectThrows(SolrException.class,
      "Can't delete the last replica by count",
      () -> CollectionAdminRequest.deleteReplicasFromShard(collectionName, "shard1", 1).process(cluster.getSolrClient())
  );
  assertEquals(SolrException.ErrorCode.BAD_REQUEST.code, e.code());
  assertTrue(e.getMessage().contains("There is only one replica available"));
  DocCollection docCollection = getCollectionState(collectionName);
  // We know that since leaders are preserved, PULL replicas should not be left alone in the shard
  assertEquals(0, docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.PULL)).size());
}
 
@Test
public void otherSolrApisExample() throws Exception {
  expectLine("Found "+NUM_LIVE_NODES+" live nodes");
  // tag::solrj-other-apis[]
  final SolrClient client = getSolrClient();

  @SuppressWarnings({"rawtypes"})
  final SolrRequest request = new CollectionAdminRequest.ClusterStatus();

  final NamedList<Object> response = client.request(request);
  @SuppressWarnings({"unchecked"})
  final NamedList<Object> cluster = (NamedList<Object>) response.get("cluster");
  @SuppressWarnings({"unchecked"})
  final List<String> liveNodes = (List<String>) cluster.get("live_nodes");

  print("Found " + liveNodes.size() + " live nodes");
  // end::solrj-other-apis[]
}
 
源代码23 项目: lucene-solr   文件: TestHdfsCloudBackupRestore.java
protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
  String backupName = "configonlybackup";
  CloudSolrClient solrClient = cluster.getSolrClient();

  CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
      .setRepositoryName(getBackupRepoName())
      .setIndexBackupStrategy(CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY);
  backup.process(solrClient);

  Map<String,String> params = new HashMap<>();
  params.put("location", "/backup");
  params.put("solr.hdfs.home", hdfsUri + "/solr");

  HdfsBackupRepository repo = new HdfsBackupRepository();
  repo.init(new NamedList<>(params));
  BackupManager mgr = new BackupManager(repo, solrClient.getZkStateReader());

  URI baseLoc = repo.createURI("/backup");

  Properties props = mgr.readBackupProperties(baseLoc, backupName);
  assertNotNull(props);
  assertEquals(collectionName, props.getProperty(COLLECTION_NAME_PROP));
  assertEquals(backupName, props.getProperty(BACKUP_NAME_PROP));
  assertEquals(configName, props.getProperty(COLL_CONF));

  DocCollection collectionState = mgr.readCollectionState(baseLoc, backupName, collectionName);
  assertNotNull(collectionState);
  assertEquals(collectionName, collectionState.getName());

  URI configDirLoc = repo.resolve(baseLoc, backupName, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
  assertTrue(repo.exists(configDirLoc));

  Collection<String> expected = Arrays.asList(BACKUP_PROPS_FILE, ZK_STATE_DIR);
  URI backupLoc = repo.resolve(baseLoc, backupName);
  String[] dirs = repo.listAll(backupLoc);
  for (String d : dirs) {
    assertTrue(expected.contains(d));
  }
}
 
源代码24 项目: lucene-solr   文件: ConnectionReuseTest.java
@BeforeClass
public static void setupCluster() throws Exception {
  TestInjection.failUpdateRequests = "true:100";
  configureCluster(1)
      .addConfig("config", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
      .configure();

  CollectionAdminRequest.createCollection(COLLECTION, "config", 1, 1)
      .processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);

  cluster.getSolrClient().waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
      (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
}
 
源代码25 项目: lucene-solr   文件: ShardSplitTest.java
private void doSplitMixedReplicaTypes(SolrIndexSplitter.SplitMethod splitMethod) throws Exception {
  waitForThingsToLevelOut(15, TimeUnit.SECONDS);
  String collectionName = "testSplitMixedReplicaTypes_" + splitMethod.toLower();
  CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2, 0, 2); // TODO tlog replicas disabled right now.
  create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
  create.process(cloudClient);
  
  cloudClient.waitForState(collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 4));
  
  waitForRecoveriesToFinish(collectionName, false);

  for (int i = 0; i < 100; i++) {
    cloudClient.add(collectionName, getDoc("id", "id-" + i, "foo_s", "bar " + i));
  }
  cloudClient.commit(collectionName);

  CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName);
  splitShard.setShardName(SHARD1);
  splitShard.setSplitMethod(splitMethod.toLower());
  CollectionAdminResponse rsp = splitShard.process(cloudClient);
  waitForThingsToLevelOut(30, TimeUnit.SECONDS);
 
  cloudClient.waitForState(collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(2, 12));

  cloudClient.getZkStateReader().forceUpdateCollection(collectionName);
  ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
  DocCollection coll = clusterState.getCollection(collectionName);
  log.info("coll: {}", coll);

  // verify the original shard
  verifyShard(coll, SHARD1, Slice.State.INACTIVE, 2, 0, 2);
  // verify new sub-shards
  verifyShard(coll, SHARD1_0, Slice.State.ACTIVE, 2, 0, 2);
  verifyShard(coll, SHARD1_1, Slice.State.ACTIVE, 2, 0, 2);
}
 
源代码26 项目: lucene-solr   文件: SplitShardSuggester.java
@Override
@SuppressWarnings({"rawtypes"})
SolrRequest init() {
  @SuppressWarnings({"unchecked"})
  Set<Pair<String, String>> shards = (Set<Pair<String, String>>) hints.getOrDefault(Hint.COLL_SHARD, Collections.emptySet());
  if (shards.isEmpty()) {
    throw new RuntimeException("split-shard requires 'collection' and 'shard'");
  }
  if (shards.size() > 1) {
    throw new RuntimeException("split-shard requires exactly one pair of 'collection' and 'shard'");
  }
  Pair<String, String> collShard = shards.iterator().next();
  @SuppressWarnings({"unchecked"})
  Map<String, Object> params = (Map<String, Object>)hints.getOrDefault(Hint.PARAMS, Collections.emptyMap());
  Float splitFuzz = (Float)params.get(CommonAdminParams.SPLIT_FUZZ);
  CollectionAdminRequest.SplitShard req = CollectionAdminRequest.splitShard(collShard.first()).setShardName(collShard.second());
  if (splitFuzz != null) {
    req.setSplitFuzz(splitFuzz);
  }
  String splitMethod = (String)params.get(CommonAdminParams.SPLIT_METHOD);
  if (splitMethod != null) {
    req.setSplitMethod(splitMethod);
  }
  Boolean splitByPrefix = (Boolean)params.get(CommonAdminParams.SPLIT_BY_PREFIX);
  if (splitByPrefix != null) {
    req.setSplitByPrefix(splitByPrefix);
  }
  return req;
}
 
@Slow
@Test
public void testInvalidMustMatch() throws Exception {
  String configName = getSaferTestName();
  createConfigSet(configName);
  // Not a valid regex
  final String mustMatchRegex = "+_solr";

  final int maxCardinality = Integer.MAX_VALUE; // max cardinality for current test

  List<String> retrievedConfigSetNames = new ConfigSetAdminRequest.List().process(solrClient).getConfigSets();
  List<String> expectedConfigSetNames = Arrays.asList("_default", configName);

  // config sets leak between tests so we can't be any more specific than this on the next 2 asserts
  assertTrue("We expect at least 2 configSets",
      retrievedConfigSetNames.size() >= expectedConfigSetNames.size());
  assertTrue("ConfigNames should include :" + expectedConfigSetNames, retrievedConfigSetNames.containsAll(expectedConfigSetNames));

  SolrException e = expectThrows(SolrException.class, () -> CollectionAdminRequest.createCategoryRoutedAlias(getAlias(), categoryField, maxCardinality,
      CollectionAdminRequest.createCollection("_unused_", configName, 1, 1)
          .setMaxShardsPerNode(2))
      .setMustMatch(mustMatchRegex)
      .process(solrClient)
  );

  assertTrue("Create Alias should fail since router.mustMatch must be a valid regular expression",
      e.getMessage().contains("router.mustMatch must be a valid regular expression"));
}
 
源代码28 项目: lucene-solr   文件: LeaderTragicEventTest.java
@Test
public void test() throws Exception {
  final String collection = "collection1";
  cluster.getSolrClient().setDefaultCollection(collection);
  CollectionAdminRequest
      .createCollection(collection, "config", 1, 2)
      .process(cluster.getSolrClient());
  cluster.waitForActiveCollection(collection, 1, 2);
  try {
    List<String> addedIds = new ArrayList<>();
    Replica oldLeader = corruptLeader(collection, addedIds);

    waitForState("Timeout waiting for new replica become leader", collection, (liveNodes, collectionState) -> {
      Slice slice = collectionState.getSlice("shard1");

      if (slice.getReplicas().size() != 2) return false;
      if (slice.getLeader() == null) return false;
      if (slice.getLeader().getName().equals(oldLeader.getName())) return false;

      return true;
    });
    ClusterStateUtil.waitForAllActiveAndLiveReplicas(cluster.getSolrClient().getZkStateReader(), collection, 120000);
    Slice shard = getCollectionState(collection).getSlice("shard1");
    assertNotSame(shard.getLeader().getNodeName(), oldLeader.getNodeName());
    assertEquals(getNonLeader(shard).getNodeName(), oldLeader.getNodeName());

    for (String id : addedIds) {
      assertNotNull(cluster.getSolrClient().getById(collection,id));
    }
    if (log.isInfoEnabled()) {
      log.info("The test success oldLeader:{} currentState:{}", oldLeader, getCollectionState(collection));
    }

  } finally {
    CollectionAdminRequest.deleteCollection(collection).process(cluster.getSolrClient());
  }
}
 
源代码29 项目: lucene-solr   文件: OverseerModifyCollectionTest.java
@Test
public void testModifyColl() throws Exception {

  final String collName = "modifyColl";

  CollectionAdminRequest.createCollection(collName, "conf1", 1, 2)
      .process(cluster.getSolrClient());

  // TODO create a modifyCollection() method on CollectionAdminRequest
  ModifiableSolrParams p1 = new ModifiableSolrParams();
  p1.add("collection", collName);
  p1.add("action", "MODIFYCOLLECTION");
  p1.add("collection.configName", "conf2");
  cluster.getSolrClient().request(new GenericSolrRequest(POST, COLLECTIONS_HANDLER_PATH, p1));

  assertEquals("conf2", getConfigNameFromZk(collName));
  
  //Try an invalid config name
  ModifiableSolrParams p2 = new ModifiableSolrParams();
  p2.add("collection", collName);
  p2.add("action", "MODIFYCOLLECTION");
  p2.add("collection.configName", "notARealConfigName");
  Exception e = expectThrows(Exception.class, () -> {
    cluster.getSolrClient().request(new GenericSolrRequest(POST, COLLECTIONS_HANDLER_PATH, p2));
  });

  assertTrue(e.getMessage(), e.getMessage().contains("Can not find the specified config set"));

}
 
public void testBalanceShardUnique() {
  CollectionAdminRequest.BalanceShardUnique request = CollectionAdminRequest.balanceReplicaProperty("foo","prop");
  assertContainsParams(request.getParams(), ACTION, COLLECTION, "property");

  request.setShardUnique(true);
  assertContainsParams(request.getParams(), ACTION, COLLECTION, "property","shardUnique");
  
  request.setOnlyActiveNodes(false);
  assertContainsParams(request.getParams(), ACTION, COLLECTION, "property","shardUnique","onlyactivenodes");
  
  request.setShardUnique(null);
  assertContainsParams(request.getParams(), ACTION, COLLECTION, "property","onlyactivenodes");
  
}