sonatype-lift[bot] commented on a change in pull request #324:
URL: https://github.com/apache/solr/pull/324#discussion_r790893626



##########
File path: 
solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZk2TestBase.java
##########
@@ -0,0 +1,457 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.lucene.mockfile.FilterPath;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.handler.BackupStatusChecker;
+import org.apache.solr.handler.ReplicationHandler;
+import org.junit.Test;
+
+/**
+ * This test simply does a bunch of basic things in solrcloud mode and asserts 
things
+ * work as expected.
+ */
+public abstract class AbstractBasicDistributedZk2TestBase extends 
AbstractFullDistribZkTestBase {
+  private static final String SHARD2 = "shard2";
+  private static final String SHARD1 = "shard1";
+  private static final String ONE_NODE_COLLECTION = "onenodecollection";
+  private final boolean onlyLeaderIndexes = random().nextBoolean();
+
+
+  public AbstractBasicDistributedZk2TestBase() {
+    super();
+    // we need DVs on point fields to compute stats & facets
+    if (Boolean.getBoolean(NUMERIC_POINTS_SYSPROP)) 
System.setProperty(NUMERIC_DOCVALUES_SYSPROP,"true");
+
+    sliceCount = 2;
+  }
+
+  @Override
+  protected boolean useTlogReplicas() {
+    return false; // TODO: tlog replicas makes commits take way to long due to 
what is likely a bug and it's TestInjection use
+  }
+
+  @Test
+  @ShardsFixed(num = 4)
+  public void test() throws Exception {
+    boolean testFinished = false;
+    try {
+      handle.clear();
+      handle.put("timestamp", SKIPVAL);
+
+      testNodeWithoutCollectionForwarding();
+
+      indexr(id, 1, i1, 100, tlong, 100, t1,
+          "now is the time for all good men", "foo_f", 1.414f, "foo_b", "true",
+          "foo_d", 1.414d);
+
+      commit();
+
+      // make sure we are in a steady state...
+      waitForRecoveriesToFinish(false);
+
+      assertDocCounts(false);
+
+      indexAbunchOfDocs();
+
+      // check again
+      waitForRecoveriesToFinish(false);
+
+      commit();
+
+      assertDocCounts(VERBOSE);
+      checkQueries();
+
+      assertDocCounts(VERBOSE);
+
+      query("q", "*:*", "sort", "n_tl1 desc");
+
+      bringDownShardIndexSomeDocsAndRecover();
+
+      query("q", "*:*", "sort", "n_tl1 desc");
+
+      // test adding another replica to a shard - it should do a
+      // recovery/replication to pick up the index from the leader
+      addNewReplica();
+
+      long docId = testUpdateAndDelete();
+
+      // index a bad doc...
+      expectThrows(SolrException.class, () -> indexr(t1, "a doc with no id"));
+
+      // TODO: bring this to its own method?
+      // try indexing to a leader that has no replicas up
+      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ZkNodeProps leaderProps = zkStateReader.getLeaderRetry(
+          DEFAULT_COLLECTION, SHARD2);
+
+      String nodeName = leaderProps.getStr(ZkStateReader.NODE_NAME_PROP);
+      chaosMonkey.stopShardExcept(SHARD2, nodeName);
+
+      SolrClient client = getClient(nodeName);
+
+      index_specific(client, "id", docId + 1, t1, "what happens here?");
+
+      // expire a session...
+      CloudJettyRunner cloudJetty = shardToJetty.get(SHARD1).get(0);
+      chaosMonkey.expireSession(cloudJetty.jetty);
+
+      indexr("id", docId + 1, t1, "slip this doc in");
+
+      waitForRecoveriesToFinish(false);
+
+      checkShardConsistency(SHARD1);
+      checkShardConsistency(SHARD2);
+
+      testFinished = true;
+    } finally {
+      if (!testFinished) {
+        printLayoutOnTearDown = true;
+      }
+    }
+
+  }
+
+  private void testNodeWithoutCollectionForwarding() throws Exception {
+    assertEquals(0, CollectionAdminRequest
+        .createCollection(ONE_NODE_COLLECTION, "conf1", 1, 1)
+        .setCreateNodeSet("")
+        .process(cloudClient).getStatus());
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard(ONE_NODE_COLLECTION, "shard1")
+        .setCoreName(ONE_NODE_COLLECTION + "core")
+        .process(cloudClient).isSuccess());
+
+    waitForCollection(cloudClient.getZkStateReader(), ONE_NODE_COLLECTION, 1);
+    waitForRecoveriesToFinish(ONE_NODE_COLLECTION, 
cloudClient.getZkStateReader(), false);
+
+    cloudClient.getZkStateReader().getLeaderRetry(ONE_NODE_COLLECTION, SHARD1, 
30000);
+
+    int docs = 2;
+    for (SolrClient client : clients) {
+      final String clientUrl = getBaseUrl((HttpSolrClient) client);
+      addAndQueryDocs(clientUrl, docs);
+      docs += 2;
+    }
+  }
+
+  // 2 docs added every call
+  private void addAndQueryDocs(final String baseUrl, int docs)
+      throws Exception {
+
+    SolrQuery query = new SolrQuery("*:*");
+
+    try (HttpSolrClient qclient = getHttpSolrClient(baseUrl + 
"/onenodecollection" + "core")) {
+
+      // it might take a moment for the proxy node to see us in their cloud 
state
+      waitForNon403or404or503(qclient);
+
+      // add a doc
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", docs);
+      qclient.add(doc);
+      qclient.commit();
+
+
+      QueryResponse results = qclient.query(query);
+      assertEquals(docs - 1, results.getResults().getNumFound());
+    }
+
+    try (HttpSolrClient qclient = getHttpSolrClient(baseUrl + 
"/onenodecollection")) {
+      QueryResponse results = qclient.query(query);
+      assertEquals(docs - 1, results.getResults().getNumFound());
+
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", docs + 1);
+      qclient.add(doc);
+      qclient.commit();
+
+      query = new SolrQuery("*:*");
+      query.set("rows", 0);
+      results = qclient.query(query);
+      assertEquals(docs, results.getResults().getNumFound());
+    }
+  }
+
+  private long testUpdateAndDelete() throws Exception {
+    long docId = 99999999L;
+    indexr("id", docId, t1, "originalcontent");
+
+    commit();
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.add("q", t1 + ":originalcontent");
+    QueryResponse results = clients.get(0).query(params);
+    assertEquals(1, results.getResults().getNumFound());
+
+    // update doc
+    indexr("id", docId, t1, "updatedcontent");
+
+    commit();
+
+    results = clients.get(0).query(params);
+    assertEquals(0, results.getResults().getNumFound());
+
+    params.set("q", t1 + ":updatedcontent");
+
+    results = clients.get(0).query(params);
+    assertEquals(1, results.getResults().getNumFound());
+
+    UpdateRequest uReq = new UpdateRequest();
+    // uReq.setParam(UpdateParams.UPDATE_CHAIN, DISTRIB_UPDATE_CHAIN);
+    uReq.deleteById(Long.toString(docId)).process(clients.get(0));
+
+    commit();
+
+    results = clients.get(0).query(params);
+    assertEquals(0, results.getResults().getNumFound());
+    return docId;
+  }
+
+  private void bringDownShardIndexSomeDocsAndRecover() throws Exception {
+    SolrQuery query = new SolrQuery("*:*");
+    query.set("distrib", false);
+
+    commit();
+
+    long deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
+        .query(query).getResults().getNumFound();
+
+    query("q", "*:*", "sort", "n_tl1 desc");
+
+    int oldLiveNodes = 
cloudClient.getZkStateReader().getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE,
 null, true).size();
+
+    assertEquals(5, oldLiveNodes);
+
+    // kill a shard
+    CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD1, 0);
+
+    // ensure shard is dead
+    expectThrows(SolrServerException.class,
+        "This server should be down and this update should have failed",
+        () -> index_specific(deadShard.client.solrClient, id, 999, i1, 107, 
t1, "specific doc!")
+    );
+
+    commit();
+
+    query("q", "*:*", "sort", "n_tl1 desc");
+
+    // long cloudClientDocs = cloudClient.query(new
+    // SolrQuery("*:*")).getResults().getNumFound();
+    // System.out.println("clouddocs:" + cloudClientDocs);
+
+    // try to index to a living shard at shard2
+
+
+    long numFound1 = cloudClient.query(new 
SolrQuery("*:*")).getResults().getNumFound();
+
+    cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1, 
60000);
+
+    try {
+      index_specific(shardToJetty.get(SHARD1).get(1).client.solrClient, id, 
1000, i1, 108, t1,
+          "specific doc!");
+    } catch (Exception e) {
+      // wait and try again
+      Thread.sleep(4000);
+      index_specific(shardToJetty.get(SHARD1).get(1).client.solrClient, id, 
1000, i1, 108, t1,
+          "specific doc!");
+    }
+
+    commit();
+
+    checkShardConsistency(true, false);
+
+    query("q", "*:*", "sort", "n_tl1 desc");
+
+
+    cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
+
+    long numFound2 = cloudClient.query(new 
SolrQuery("*:*")).getResults().getNumFound();
+
+    assertEquals(numFound1 + 1, numFound2);
+
+    SolrInputDocument doc = new SolrInputDocument();
+    doc.addField("id", 1001);
+
+    controlClient.add(doc);
+
+    // try adding a doc with CloudSolrServer
+    UpdateRequest ureq = new UpdateRequest();
+    ureq.add(doc);
+    // ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
+
+    try {
+      ureq.process(cloudClient);
+    } catch(SolrServerException e){
+      // try again
+      Thread.sleep(3500);
+      ureq.process(cloudClient);
+    }
+
+    commit();
+
+    query("q", "*:*", "sort", "n_tl1 desc");
+
+    long numFound3 = cloudClient.query(new 
SolrQuery("*:*")).getResults().getNumFound();
+
+    // lets just check that the one doc since last commit made it in...
+    assertEquals(numFound2 + 1, numFound3);
+
+    // test debugging
+    testDebugQueries();
+
+    if (VERBOSE) {
+      System.err.println(controlClient.query(new SolrQuery("*:*")).getResults()
+          .getNumFound());
+
+      for (SolrClient client : clients) {
+        try {
+          SolrQuery q = new SolrQuery("*:*");
+          q.set("distrib", false);
+          System.err.println(client.query(q).getResults()
+              .getNumFound());
+        } catch (Exception e) {
+
+        }
+      }
+    }
+    // TODO: This test currently fails because debug info is obtained only
+    // on shards with matches.
+    // query("q","matchesnothing","fl","*,score", "debugQuery", "true");
+
+    // this should trigger a recovery phase on deadShard
+    deadShard.jetty.start();
+
+    // make sure we have published we are recovering
+    Thread.sleep(1500);
+
+    waitForRecoveriesToFinish(false);
+
+    deadShardCount = shardToJetty.get(SHARD1).get(0).client.solrClient
+        .query(query).getResults().getNumFound();
+    // if we properly recovered, we should now have the couple missing docs 
that
+    // came in while shard was down
+    checkShardConsistency(true, false);
+
+
+    // recover over 100 docs so we do more than just peer sync (replicate 
recovery)
+    chaosMonkey.stopJetty(deadShard);
+
+    for (int i = 0; i < 226; i++) {
+      doc = new SolrInputDocument();
+      doc.addField("id", 2000 + i);
+      controlClient.add(doc);
+      ureq = new UpdateRequest();
+      ureq.add(doc);
+      // ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
+      ureq.process(cloudClient);
+    }
+    commit();
+
+    Thread.sleep(1500);
+
+    deadShard.jetty.start();
+
+    // make sure we have published we are recovering
+    Thread.sleep(1500);
+
+    waitForThingsToLevelOut(1, TimeUnit.MINUTES);
+
+    Thread.sleep(500);
+
+    waitForRecoveriesToFinish(false);
+
+    checkShardConsistency(true, false);
+
+    // try a backup command
+    try(final HttpSolrClient client = getHttpSolrClient((String) 
shardToJetty.get(SHARD2).get(0).info.get("base_url"))) {
+      final String backupName = "the_backup";
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("qt", ReplicationHandler.PATH);
+      params.set("command", "backup");
+      params.set("name", backupName);
+      final Path location = FilterPath.unwrap(createTempDir()).toRealPath();
+      // Allow non-standard location outside SOLR_HOME
+      jettys.forEach(j -> j.getCoreContainer().getAllowPaths().add(location));

Review comment:
       *NULL_DEREFERENCE:*  object returned by `j.getCoreContainer()` could be 
null and is dereferenced at line 407.
   (at-me [in a reply](https://help.sonatype.com/lift/talking-to-lift) with 
`help` or `ignore`)

##########
File path: 
solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java
##########
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Slow
+public abstract class AbstractRecoveryZkTestBase extends SolrCloudTestCase {
+
+  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(2)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+  }
+
+  private final List<StoppableIndexingThread> threads = new ArrayList<>();
+
+  @After
+  public void stopThreads() throws InterruptedException {
+    for (StoppableIndexingThread t : threads) {
+      t.safeStop();
+    }
+    for (StoppableIndexingThread t : threads) {
+      t.join();
+    }
+    threads.clear();
+  }
+
+  @Test
+  //commented 2-Aug-2018 
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028";) // 
28-June-2018
+  public void test() throws Exception {
+
+    final String collection = "recoverytest";
+
+    CollectionAdminRequest.createCollection(collection, "conf", 1, 2)
+        .process(cluster.getSolrClient());
+    waitForState("Expected a collection with one shard and two replicas", 
collection, clusterShape(1, 2));
+    cluster.getSolrClient().setDefaultCollection(collection);
+
+    // start a couple indexing threads
+
+    int[] maxDocList = new int[] {300, 700, 1200, 1350, 3000};
+    int[] maxDocNightlyList = new int[] {3000, 7000, 12000, 30000, 45000, 
60000};
+
+    int maxDoc;
+    if (!TEST_NIGHTLY) {
+      maxDoc = maxDocList[random().nextInt(maxDocList.length - 1)];
+    } else {
+      maxDoc = maxDocNightlyList[random().nextInt(maxDocList.length - 1)];
+    }
+    log.info("Indexing {} documents", maxDoc);
+
+    final StoppableIndexingThread indexThread
+        = new StoppableIndexingThread(null, cluster.getSolrClient(), "1", 
true, maxDoc, 1, true);
+    threads.add(indexThread);
+    indexThread.start();
+
+    final StoppableIndexingThread indexThread2
+        = new StoppableIndexingThread(null, cluster.getSolrClient(), "2", 
true, maxDoc, 1, true);
+    threads.add(indexThread2);
+    indexThread2.start();
+
+    // give some time to index...
+    int[] waitTimes = new int[] {200, 2000, 3000};
+    Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
+
+    // bring shard replica down
+    DocCollection state = getCollectionState(collection);
+    Replica leader = state.getLeader("shard1");
+    Replica replica = getRandomReplica(state.getSlice("shard1"), (r) -> leader 
!= r);
+
+    JettySolrRunner jetty = cluster.getReplicaJetty(replica);

Review comment:
       *NULL_DEREFERENCE:*  object `replica` last assigned on line 105 could be 
null and is dereferenced by call to `getReplicaJetty(...)` at line 107.
   (at-me [in a reply](https://help.sonatype.com/lift/talking-to-lift) with 
`help` or `ignore`)

##########
File path: 
solr/test-framework/src/java/org/apache/solr/cloud/AbstractUnloadDistributedZkTestBase.java
##########
@@ -0,0 +1,372 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.core.SolrPaths;
+import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TimeOut;
+import org.junit.Test;
+
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This test simply does a bunch of basic things in solrcloud mode and asserts 
things
+ * work as expected.
+ */
+public abstract class AbstractUnloadDistributedZkTestBase extends 
AbstractBasicDistributedZkTestBase {
+  public AbstractUnloadDistributedZkTestBase() {
+    super();
+  }
+
+  protected String getSolrXml() {
+    return "solr.xml";
+  }
+
+  @Test
+  public void test() throws Exception {
+    jettys.forEach(j -> {
+      Set<Path> allowPath = j.getCoreContainer().getAllowPaths();
+      allowPath.clear();
+      allowPath.add(SolrPaths.ALL_PATH); // Allow non-standard core instance 
path
+    });
+    testCoreUnloadAndLeaders(); // long
+    testUnloadLotsOfCores(); // long
+
+    testUnloadShardAndCollection();
+  }
+
+  private void checkCoreNamePresenceAndSliceCount(String collectionName, 
String coreName,
+                                                  boolean shouldBePresent, int 
expectedSliceCount) throws Exception {
+    final TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, 
TimeSource.NANO_TIME);
+    Boolean isPresent = null; // null meaning "don't know"
+    while (null == isPresent || shouldBePresent != isPresent) {
+      final DocCollection docCollection = 
getCommonCloudSolrClient().getZkStateReader().getClusterState().getCollectionOrNull(collectionName);
+      final Collection<Slice> slices = (docCollection != null) ? 
docCollection.getSlices() : Collections.emptyList();
+      if (timeout.hasTimedOut()) {
+        printLayout();
+        fail("checkCoreNamePresenceAndSliceCount failed:"
+            +" collection="+collectionName+" CoreName="+coreName
+            +" shouldBePresent="+shouldBePresent+" isPresent="+isPresent
+            +" expectedSliceCount="+expectedSliceCount+" 
actualSliceCount="+slices.size());
+      }
+      if (expectedSliceCount == slices.size()) {
+        isPresent = false;
+        for (Slice slice : slices) {
+          for (Replica replica : slice.getReplicas()) {
+            if (coreName.equals(replica.get("core"))) {
+              isPresent = true;
+            }
+          }
+        }
+      }
+      Thread.sleep(1000);
+    }
+  }
+
+  private void testUnloadShardAndCollection() throws Exception{
+    final int numShards = 2;
+
+    final String collection = "test_unload_shard_and_collection";
+
+    final String coreName1 = collection+"_1";
+    final String coreName2 = collection+"_2";
+
+    assertEquals(0, CollectionAdminRequest.createCollection(collection, 
"conf1", numShards, 1)
+        .setCreateNodeSet("")
+        .process(cloudClient).getStatus());
+    assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard1")
+        .setCoreName(coreName1)
+        .setNode(jettys.get(0).getNodeName())
+        .process(cloudClient).isSuccess());
+
+    assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard2")
+        .setCoreName(coreName2)
+        .setNode(jettys.get(0).getNodeName())
+        .process(cloudClient).isSuccess());
+
+
+    // does not mean they are active and up yet :*
+    waitForRecoveriesToFinish(collection, false);
+
+    final boolean unloadInOrder = random().nextBoolean();
+    final String unloadCmdCoreName1 = (unloadInOrder ? coreName1 : coreName2);
+    final String unloadCmdCoreName2 = (unloadInOrder ? coreName2 : coreName1);
+
+    try (HttpSolrClient adminClient = 
getHttpSolrClient(buildUrl(jettys.get(0).getLocalPort()))) {
+      // now unload one of the two
+      Unload unloadCmd = new Unload(false);
+      unloadCmd.setCoreName(unloadCmdCoreName1);
+      adminClient.request(unloadCmd);
+
+      // there should still be two shards (as of SOLR-5209)
+      checkCoreNamePresenceAndSliceCount(collection, unloadCmdCoreName1, false 
/* shouldBePresent */, numShards /* expectedSliceCount */);
+
+      // now unload one of the other
+      unloadCmd = new Unload(false);
+      unloadCmd.setCoreName(unloadCmdCoreName2);
+      adminClient.request(unloadCmd);
+      checkCoreNamePresenceAndSliceCount(collection, unloadCmdCoreName2, false 
/* shouldBePresent */, numShards /* expectedSliceCount */);
+    }
+
+    //printLayout();
+    // the collection should still be present (as of SOLR-5209 replica removal 
does not cascade to remove the slice and collection)
+    assertTrue("No longer found collection "+collection, 
getCommonCloudSolrClient().getZkStateReader().getClusterState().hasCollection(collection));
+  }
+
+  protected SolrCore getFirstCore(String collection, JettySolrRunner jetty) {
+    SolrCore solrCore = null;
+    for (SolrCore core : jetty.getCoreContainer().getCores()) {

Review comment:
       *NULL_DEREFERENCE:*  object returned by `jetty.getCoreContainer()` could 
be null and is dereferenced at line 158.
   (at-me [in a reply](https://help.sonatype.com/lift/talking-to-lift) with 
`help` or `ignore`)

##########
File path: 
solr/test-framework/src/java/org/apache/solr/cloud/AbstractUnloadDistributedZkTestBase.java
##########
@@ -0,0 +1,372 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.core.SolrPaths;
+import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TimeOut;
+import org.junit.Test;
+
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This test simply does a bunch of basic things in solrcloud mode and asserts 
things
+ * work as expected.
+ */
+public abstract class AbstractUnloadDistributedZkTestBase extends 
AbstractBasicDistributedZkTestBase {
+  public AbstractUnloadDistributedZkTestBase() {
+    super();
+  }
+
+  protected String getSolrXml() {
+    return "solr.xml";
+  }
+
+  @Test
+  public void test() throws Exception {
+    jettys.forEach(j -> {
+      Set<Path> allowPath = j.getCoreContainer().getAllowPaths();

Review comment:
       *NULL_DEREFERENCE:*  object returned by `j.getCoreContainer()` could be 
null and is dereferenced at line 68.
   (at-me [in a reply](https://help.sonatype.com/lift/talking-to-lift) with 
`help` or `ignore`)

##########
File path: 
solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCollectionsAPIDistributedZkTestBase.java
##########
@@ -0,0 +1,645 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import javax.management.MBeanServer;
+import javax.management.MBeanServerFactory;
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.lang.management.ManagementFactory;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.CoreStatus;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.SolrInfoBean.Category;
+import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TimeOut;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+
+/**
+ * Tests the Cloud Collections API.
+ */
+@Slow
+public abstract class AbstractCollectionsAPIDistributedZkTestBase extends 
SolrCloudTestCase {
+  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  abstract String getConfigSet();
+
+  @Before
+  public void setupCluster() throws Exception {
+    // we don't want this test to have zk timeouts
+    System.setProperty("zkClientTimeout", "60000");
+    System.setProperty("createCollectionWaitTimeTillActive", "5");
+    TestInjection.randomDelayInCoreCreation = "true:5";
+    System.setProperty("validateAfterInactivity", "200");
+    System.setProperty("solr.allowPaths", "*");
+
+    configureCluster(4)
+        .addConfig("conf", configset(getConfigSet()))
+        .addConfig("conf2", configset(getConfigSet()))
+        .withSolrXml(TEST_PATH().resolve("solr.xml"))
+        .configure();
+  }
+  
+  @After
+  public void tearDownCluster() throws Exception {
+    try {
+      shutdownCluster();
+    } finally {
+      System.clearProperty("createCollectionWaitTimeTillActive");
+      System.clearProperty("solr.allowPaths");
+      super.tearDown();
+    }
+  }
+
+  @Test
+  public void testCreationAndDeletion() throws Exception {
+    String collectionName = "created_and_deleted";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 1, 
1).process(cluster.getSolrClient());
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+                  .contains(collectionName));
+
+    
CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+        .contains(collectionName));
+
+    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE 
+ "/" + collectionName, true));
+  }
+
+  @Test
+  public void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception 
{
+    String collectionName = "out_of_sync_collection";
+
+    // manually create a collections zknode
+    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + 
collectionName, true);
+
+    CollectionAdminRequest.deleteCollection(collectionName)
+        .process(cluster.getSolrClient());
+
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+                  .contains(collectionName));
+    
+    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE 
+ "/" + collectionName, true));
+  }
+
+  @Test
+  public void deletePartiallyCreatedCollection() throws Exception {
+    final String collectionName = "halfdeletedcollection";
+
+    assertEquals(0, CollectionAdminRequest.createCollection(collectionName, 
"conf", 2, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    String dataDir = createTempDir().toFile().getAbsolutePath();
+    // create a core that simulates something left over from a 
partially-deleted collection
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard(collectionName, "shard1")
+        .setDataDir(dataDir)
+        .process(cluster.getSolrClient()).isSuccess());
+
+    CollectionAdminRequest.deleteCollection(collectionName)
+        .process(cluster.getSolrClient());
+
+    
assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .process(cluster.getSolrClient());
+
+    
assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+  }
+
+  @Test
+  public void deleteCollectionOnlyInZk() throws Exception {
+    final String collectionName = "onlyinzk";
+
+    // create the collections node, but nothing else
+    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + 
collectionName, true);
+
+    // delete via API - should remove collections node
+    
CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+    
assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+    
+    // now creating that collection should work
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .process(cluster.getSolrClient());
+    
assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+  }
+
+  @Test
+  public void testBadActionNames() {
+    // try a bad action
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", "BADACTION");
+    String collectionName = "badactioncollection";
+    params.set("name", collectionName);
+    params.set("numShards", 2);
+    final QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+  }
+
+  @Test
+  public void testMissingRequiredParameters() {
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("numShards", 2);
+    // missing required collection parameter
+    final QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+  }
+
+  @Test
+  public void testMissingNumShards() {
+    // No numShards should fail
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("name", "acollection");
+    params.set(REPLICATION_FACTOR, 10);
+    params.set("collection.configName", "conf");
+
+    final QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+  }
+
+  @Test
+  public void testZeroNumShards() {
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("name", "acollection");
+    params.set(REPLICATION_FACTOR, 10);
+    params.set("numShards", 0);
+    params.set("collection.configName", "conf");
+
+    final QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+  }
+
+  @Test
+  public void testCreateShouldFailOnExistingCore() throws Exception {
+    String nn1 = cluster.getJettySolrRunner(0).getNodeName();
+    String nn2 = cluster.getJettySolrRunner(1).getNodeName();
+
+    assertEquals(0, 
CollectionAdminRequest.createCollection("halfcollectionblocker", "conf", 1, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    
assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker", 
"shard1")
+        .setNode(cluster.getJettySolrRunner(0).getNodeName())
+        .setCoreName("halfcollection_shard1_replica_n1")
+        .process(cluster.getSolrClient()).isSuccess());
+
+    assertEquals(0, 
CollectionAdminRequest.createCollection("halfcollectionblocker2", "conf",1, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    
assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker2", 
"shard1")
+        .setNode(cluster.getJettySolrRunner(1).getNodeName())
+        .setCoreName("halfcollection_shard1_replica_n1")
+        .process(cluster.getSolrClient()).isSuccess());
+
+    expectThrows(BaseHttpSolrClient.RemoteSolrException.class, () -> {
+      CollectionAdminRequest.createCollection("halfcollection", "conf", 1, 1)
+          .setCreateNodeSet(nn1 + "," + nn2)
+          .process(cluster.getSolrClient());
+    });
+  }
+
+  @Test
+  public void testNoConfigSetExist() throws Exception {
+    expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createCollection("noconfig", "conf123", 1, 1)
+          .process(cluster.getSolrClient());
+    });
+
+    TimeUnit.MILLISECONDS.sleep(1000);
+    // in both cases, the collection should have default to the core name
+    
cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
+    
assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
+  }
+
+  @Test
+  public void testCoresAreDistributedAcrossNodes() throws Exception {
+    CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 
2, 2)
+        .process(cluster.getSolrClient());
+
+    Set<String> liveNodes = 
cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+
+    List<String> createNodeList = new ArrayList<>(liveNodes);
+
+    DocCollection collection = getCollectionState("nodes_used_collection");
+    for (Slice slice : collection.getSlices()) {
+      for (Replica replica : slice.getReplicas()) {
+        createNodeList.remove(replica.getNodeName());
+      }
+    }
+
+    assertEquals(createNodeList.toString(), 0, createNodeList.size());
+  }
+
+  @Test
+  public void testDeleteNonExistentCollection() throws Exception {
+
+    expectThrows(SolrException.class, () -> {
+      
CollectionAdminRequest.deleteCollection("unknown_collection").process(cluster.getSolrClient());
+    });
+
+    // create another collection should still work
+    CollectionAdminRequest.createCollection("acollectionafterbaddelete", 
"conf", 1, 2)
+        .process(cluster.getSolrClient());
+    waitForState("Collection creation after a bad delete failed", 
"acollectionafterbaddelete",
+        (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
+  }
+
+  @Test
+  public void testSpecificConfigsets() throws Exception {
+    CollectionAdminRequest.createCollection("withconfigset2", "conf2", 1, 
1).process(cluster.getSolrClient());
+    String configName  = 
cluster.getSolrClient().getClusterStateProvider().getCollection("withconfigset2").getConfigName();
+    assertEquals("conf2", configName);
+  }
+
+  @Test
+  public void testCreateNodeSet() throws Exception {
+    JettySolrRunner jetty1 = cluster.getRandomJetty(random());
+    JettySolrRunner jetty2 = cluster.getRandomJetty(random());
+
+    List<String> baseUrls = ImmutableList.of(jetty1.getBaseUrl().toString(), 
jetty2.getBaseUrl().toString());
+
+    CollectionAdminRequest.createCollection("nodeset_collection", "conf", 2, 1)
+        .setCreateNodeSet(baseUrls.get(0) + "," + baseUrls.get(1))
+        .process(cluster.getSolrClient());
+
+    DocCollection collectionState = getCollectionState("nodeset_collection");
+    for (Replica replica : collectionState.getReplicas()) {
+      String replicaUrl = replica.getCoreUrl();
+      boolean matchingJetty = false;
+      for (String jettyUrl : baseUrls) {
+        if (replicaUrl.startsWith(jettyUrl)) {
+          matchingJetty = true;
+        }
+      }
+      if (matchingJetty == false) {
+        fail("Expected replica to be on " + baseUrls + " but was on " + 
replicaUrl);
+      }
+    }
+  }
+
+  @Test
+  //28-June-2018 
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028";)
+  // See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove 
files on Windows machines occasionally
+  // commented out on: 24-Dec-2018   
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028";) // added 
09-Aug-2018 SOLR-12028
+  public void testCollectionsAPI() throws Exception {
+
+    // create new collections rapid fire
+    int cnt = random().nextInt(TEST_NIGHTLY ? 3 : 1) + 1;
+    CollectionAdminRequest.Create[] createRequests = new 
CollectionAdminRequest.Create[cnt];
+    
+    class Coll {
+      String name;
+      int numShards;
+      int replicationFactor;
+    }
+    
+    List<Coll> colls = new ArrayList<>();
+
+    for (int i = 0; i < cnt; i++) {
+
+      int numShards = TestUtil.nextInt(random(), 0, 
cluster.getJettySolrRunners().size()) + 1;
+      int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
+
+      createRequests[i]
+          = CollectionAdminRequest.createCollection("awhollynewcollection_" + 
i, "conf2", numShards, replicationFactor);
+      createRequests[i].processAsync(cluster.getSolrClient());
+      
+      Coll coll = new Coll();
+      coll.name = "awhollynewcollection_" + i;
+      coll.numShards = numShards;
+      coll.replicationFactor = replicationFactor;
+      colls.add(coll);
+    }
+
+    for (Coll coll : colls) {
+      cluster.waitForActiveCollection(coll.name, coll.numShards, 
coll.numShards * coll.replicationFactor);
+    }
+
+    waitForStable(cnt, createRequests);
+
+    for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
+      checkInstanceDirs(cluster.getJettySolrRunner(i));
+    }
+    
+    String collectionName = 
createRequests[random().nextInt(createRequests.length)].getCollectionName();
+    
+    // TODO: we should not need this...beast test well when trying to fix
+    Thread.sleep(1000);
+    
+    
cluster.getSolrClient().getZkStateReader().forciblyRefreshAllClusterStateSlow();
+
+    new UpdateRequest()
+        .add("id", "6")
+        .add("id", "7")
+        .add("id", "8")
+        .commit(cluster.getSolrClient(), collectionName);
+    long numFound = 0;
+    TimeOut timeOut = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+    while (!timeOut.hasTimedOut()) {
+
+      numFound = cluster.getSolrClient().query(collectionName, new 
SolrQuery("*:*")).getResults().getNumFound();
+      if (numFound == 3) {
+        break;
+      }
+
+      Thread.sleep(500);
+    }
+    
+    if (timeOut.hasTimedOut()) {
+      fail("Timeout waiting to see 3 found, instead saw " + numFound + " for 
collection " + collectionName);
+    }
+
+    checkNoTwoShardsUseTheSameIndexDir();
+  }
+
+  private void waitForStable(int cnt, CollectionAdminRequest.Create[] 
createRequests) throws InterruptedException {
+    for (int i = 0; i < cnt; i++) {
+      String collectionName = "awhollynewcollection_" + i;
+      final int j = i;
+      waitForState("Expected to see collection " + collectionName, 
collectionName,
+          (n, c) -> {
+            CollectionAdminRequest.Create req = createRequests[j];
+            return DocCollection.isFullyActive(n, c, req.getNumShards(), 
req.getReplicationFactor());
+          });
+      
+      ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+      // make sure we have leaders for each shard
+      for (int z = 1; z < createRequests[j].getNumShards(); z++) {
+        zkStateReader.getLeaderRetry(collectionName, "shard" + z, 10000);
+      }      // make sure we again have leaders for each shard
+    }
+  }
+
+  @Test
+  public void testCollectionReload() throws Exception {
+    final String collectionName = "reloaded_collection";
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 
2).process(cluster.getSolrClient());
+
+    // get core open times
+    Map<String, Long> urlToTimeBefore = new HashMap<>();
+    collectStartTimes(collectionName, urlToTimeBefore);
+    assertTrue(urlToTimeBefore.size() > 0);
+
+    
CollectionAdminRequest.reloadCollection(collectionName).processAsync(cluster.getSolrClient());
+
+    // reloads make take a short while
+    boolean allTimesAreCorrect = waitForReloads(collectionName, 
urlToTimeBefore);
+    assertTrue("some core start times did not change on reload", 
allTimesAreCorrect);
+  }
+
+  private void checkInstanceDirs(JettySolrRunner jetty) throws IOException {
+    CoreContainer cores = jetty.getCoreContainer();
+    Collection<SolrCore> theCores = cores.getCores();

Review comment:
       *NULL_DEREFERENCE:*  object `cores` last assigned on line 472 could be 
null and is dereferenced at line 473.
   (at-me [in a reply](https://help.sonatype.com/lift/talking-to-lift) with 
`help` or `ignore`)

##########
File path: solr/contrib/hdfs/build.gradle
##########
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+apply plugin: 'java-library'
+
+description = 'HDFS Contrib Module'
+
+dependencies {
+  
+  configurations.all {
+    exclude group: 'log4j', module: 'log4j'
+    exclude group: 'commons-logging', module: 'commons-logging'
+    exclude group: 'org.slf4j', module: 'slf4j-log4j12'
+    exclude group: 'org.apache.yetus', module: 'audience-annotations'
+    exclude group: 'org.codehaus.mojo', module: 'animal-sniffer-annotations'
+   // be conservative on what's added here.  Affects *all* configs, including 
internal ones.
+  }
+
+
+  implementation project(':solr:core')
+
+  // Hadoop dependencies
+  implementation ('org.apache.hadoop:hadoop-annotations') { transitive = false 
}
+  implementation ('org.apache.hadoop:hadoop-auth') { transitive = false }
+  implementation ('org.apache.hadoop:hadoop-common') { transitive = false }
+  implementation ('org.apache.hadoop:hadoop-hdfs-client') { transitive = false 
}
+  implementation ('org.apache.hadoop:hadoop-hdfs') { transitive = false }
+
+  // Guava implements the VisibleForTesting annotations
+  implementation ('com.google.guava:guava') { transitive = false }

Review comment:
       *Moderate OSS Vulnerability:*
   ### pkg:maven/com.google.guava/guava@25.1-jre
   0 Critical, 0 Severe, 1 Moderate, 0 Unknown vulnerabilities have been found 
across 1 dependencies
   
   <details>
     <summary><b>Components</b></summary><br/>
     <ul>
         <details>
           <summary><b>pkg:maven/com.google.guava/guava@25.1-jre</b></summary>
           <ul>
     <details>
       <summary><b>MODERATE Vulnerabilities (1)</b></summary><br/>
   <ul>
   
   > #### [CVE-2020-8908] A temp directory creation vulnerability exists in all 
versions of Guava, allowin...
   > A temp directory creation vulnerability exists in all versions of Guava, 
allowing an attacker with access to the machine to potentially access data in a 
temporary directory created by the Guava API 
com.google.common.io.Files.createTempDir(). By default, on unix-like systems, 
the created directory is world-readable (readable by an attacker with access to 
the system). The method in question has been marked @Deprecated in versions 
30.0 and later and should not be used. For Android developers, we recommend 
choosing a temporary directory API provided by Android, such as 
context.getCacheDir(). For other Java developers, we recommend migrating to the 
Java 7 API java.nio.file.Files.createTempDirectory() which explicitly 
configures permissions of 700, or configuring the Java runtime&#39;s 
java.io.tmpdir system property to point to a location whose permissions are 
appropriately configured.
   >
   > **CVSS Score:** 3.3
   >
   > **CVSS Vector:** CVSS:3.0/AV:L/AC:L/PR:L/UI:N/S:U/C:L/I:N/A:N
   
   </ul>
       </details>
           </ul>
         </details>
     </ul>
   </details>
   (at-me [in a reply](https://help.sonatype.com/lift/talking-to-lift) with 
`help` or `ignore`)

##########
File path: solr/contrib/hdfs/build.gradle
##########
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+apply plugin: 'java-library'
+
+description = 'HDFS Contrib Module'
+
+dependencies {
+  
+  configurations.all {
+    exclude group: 'log4j', module: 'log4j'
+    exclude group: 'commons-logging', module: 'commons-logging'
+    exclude group: 'org.slf4j', module: 'slf4j-log4j12'
+    exclude group: 'org.apache.yetus', module: 'audience-annotations'
+    exclude group: 'org.codehaus.mojo', module: 'animal-sniffer-annotations'
+   // be conservative on what's added here.  Affects *all* configs, including 
internal ones.
+  }
+
+
+  implementation project(':solr:core')
+
+  // Hadoop dependencies
+  implementation ('org.apache.hadoop:hadoop-annotations') { transitive = false 
}
+  implementation ('org.apache.hadoop:hadoop-auth') { transitive = false }
+  implementation ('org.apache.hadoop:hadoop-common') { transitive = false }
+  implementation ('org.apache.hadoop:hadoop-hdfs-client') { transitive = false 
}
+  implementation ('org.apache.hadoop:hadoop-hdfs') { transitive = false }
+
+  // Guava implements the VisibleForTesting annotations
+  implementation ('com.google.guava:guava') { transitive = false }
+
+  // Caffeine cache to implement HDFS block caching  
+  api('com.github.ben-manes.caffeine:caffeine', {
+    exclude group: "org.checkerframework", module: "checker-qual"
+  })
+
+  // Many HDFS tests are using/subclassing test framework classes
+  testImplementation project(':solr:test-framework')
+
+  // HdfsCheckindexTool tests reference LuceneTestCase
+  testImplementation ('org.apache.lucene:lucene-test-framework')
+
+  // hadoop dependencies for tests
+  testImplementation ('org.apache.hadoop:hadoop-hdfs') { transitive = false }
+  testImplementation ('org.apache.hadoop:hadoop-common::tests') { transitive = 
false }
+  testImplementation ('org.apache.hadoop:hadoop-hdfs::tests') { transitive = 
false }
+  testImplementation ('org.apache.hadoop:hadoop-minikdc') { transitive = false 
}
+
+  testImplementation 'org.apache.logging.log4j:log4j-1.2-api'
+
+  // classes like solr.ICUCollationField, used by NNFailoverTest for example.
+  testImplementation project(':solr:contrib:analysis-extras')
+
+  // required for instantiating a Zookeeper server in tests or embedded
+  runtimeOnly ('org.xerial.snappy:snappy-java')
+
+  // commons packages needed by test classes
+  testImplementation('commons-io:commons-io') { transitive = false }
+
+  // Zookeeper dependency - some tests like HdfsCloudBackupRestore need this
+  implementation ('org.apache.zookeeper:zookeeper')

Review comment:
       *Severe OSS Vulnerability:*
   ### pkg:maven/org.apache.zookeeper/zookeeper@3.7.0
   0 Critical, 2 Severe, 0 Moderate, 0 Unknown vulnerabilities have been found 
across 1 dependencies
   
   <details>
     <summary><b>Components</b></summary><br/>
     <ul>
         <details>
           
<summary><b>pkg:maven/org.apache.zookeeper/zookeeper@3.7.0</b></summary>
           <ul>
     <details>
       <summary><b>SEVERE Vulnerabilities (2)</b></summary><br/>
   <ul>
   <details>
               <summary>CVE-2021-28164</summary>
   
   > #### [CVE-2021-28164] In Eclipse Jetty 9.4.37.v20210219 to 
9.4.38.v20210224, the default compliance mo...
   > In Eclipse Jetty 9.4.37.v20210219 to 9.4.38.v20210224, the default 
compliance mode allows requests with URIs that contain %2e or %2e%2e segments 
to access protected resources within the WEB-INF directory. For example a 
request to /context/%2e/WEB-INF/web.xml can retrieve the web.xml file. This can 
reveal sensitive information regarding the implementation of a web application.
   >
   > **CVSS Score:** 5.3
   >
   > **CVSS Vector:** CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N
   
   </details>
   <details>
               <summary>CVE-2021-34429</summary>
   
   > #### [CVE-2021-34429] For Eclipse Jetty versions 9.4.37-9.4.42, 
10.0.1-10.0.5 & 11.0.1-11.0.5, URIs ca...
   > For Eclipse Jetty versions 9.4.37-9.4.42, 10.0.1-10.0.5 &amp; 
11.0.1-11.0.5, URIs can be crafted using some encoded characters to access the 
content of the WEB-INF directory and/or bypass some security constraints. This 
is a variation of the vulnerability reported in 
CVE-2021-28164/GHSA-v7ff-8wcx-gmc5.
   >
   > **CVSS Score:** 5.3
   >
   > **CVSS Vector:** CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N
   
   </details>
   </ul>
       </details>
           </ul>
         </details>
     </ul>
   </details>
   (at-me [in a reply](https://help.sonatype.com/lift/talking-to-lift) with 
`help` or `ignore`)




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org
For additional commands, e-mail: issues-h...@solr.apache.org

Reply via email to