dsmiley commented on a change in pull request #704:
URL: https://github.com/apache/solr/pull/704#discussion_r814350760



##########
File path: solr/test-framework/src/java/org/apache/solr/SolrJettyTestBase.java
##########
@@ -160,8 +165,10 @@ public SolrClient createNewSolrClient() {
     }
   }
 
-  // Sets up the necessary config files for Jetty. At least some tests require 
that the solrconfig from the test
-  // file directory are used, but some also require that the solr.xml file be 
explicitly there as of SOLR-4817
+  // Sets up the necessary config files for Jetty. At least some tests require 
that the solrconfig

Review comment:
       comment style here doesn't flow well as Javadoc; maybe convert to 
Javavdoc, or reflow manually

##########
File path: 
solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
##########
@@ -72,37 +72,42 @@
 public class SolrCloudTestCase extends SolrTestCaseJ4 {
 
   private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  public static final Boolean USE_PER_REPLICA_STATE = 
Boolean.parseBoolean(System.getProperty("use.per-replica", "false"));
+  public static final Boolean USE_PER_REPLICA_STATE =
+      Boolean.parseBoolean(System.getProperty("use.per-replica", "false"));
 
-  public static final int DEFAULT_TIMEOUT = 45; // this is an important 
timeout for test stability - can't be too short
+  public static final int DEFAULT_TIMEOUT =
+      45; // this is an important timeout for test stability - can't be too 
short
 
-  /**
-   * The cluster
-   */
+  /** The cluster */
   protected static volatile MiniSolrCloudCluster cluster;
 
   protected static SolrZkClient zkClient() {
     ZkStateReader reader = cluster.getSolrClient().getZkStateReader();
-    if (reader == null)
-      cluster.getSolrClient().connect();
+    if (reader == null) cluster.getSolrClient().connect();
     return cluster.getSolrClient().getZkStateReader().getZkClient();
   }
 
   /**
    * Call this to configure a cluster of n nodes.
-   * <p>
-   * NB you must call {@link MiniSolrCloudCluster.Builder#configure()} to 
start the cluster
+   *
+   * <p>NB you must call {@link MiniSolrCloudCluster.Builder#configure()} to 
start the cluster
    *
    * @param nodeCount the number of nodes
    */
   protected static MiniSolrCloudCluster.Builder configureCluster(int 
nodeCount) {
-    // By default the MiniSolrCloudCluster being built will randomly (seed 
based) decide which collection API strategy
-    // to use (distributed or Overseer based) and which cluster update 
strategy to use (distributed if collection API
-    // is distributed, but Overseer based or distributed randomly chosen if 
Collection API is Overseer based)
+    // By default the MiniSolrCloudCluster being built will randomly (seed 
based) decide which
+    // collection API strategy

Review comment:
       reflow; maybe use `/* ... */` style and perhaps spotless might even do 
for you?

##########
File path: 
solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
##########
@@ -257,16 +260,27 @@ public void distribSetUp() throws Exception {
 
     if (isSSLMode()) {
       System.clearProperty(URL_SCHEME);
-      try (ZkStateReader zkStateReader = new 
ZkStateReader(zkServer.getZkAddress(),
-          AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT)) {
+      try (ZkStateReader zkStateReader =

Review comment:
       an aside: perfect spot to use "var" -- the variable's name makes the 
type redundant.  And it's shorter.

##########
File path: solr/test-framework/src/java/org/apache/solr/util/LogListener.java
##########
@@ -326,26 +351,31 @@ public String pollMessage() {
   }
 
   /**
-   * The total number of Log events so far processed by this instance, 
regardless of wether they have already been 
-   * removed from the queue, or if they could not be added to the queue due to 
capacity restrictions.
+   * The total number of Log events so far processed by this instance, 
regardless of wether they
+   * have already been removed from the queue, or if they could not be added 
to the queue due to
+   * capacity restrictions.
    */
   public int getCount() {
     return loggerAppender.getCount();
   }
-  
+
   /**
-   * A Filter registered with the specified Logger to restrict what Log events 
we process in our {@link QueueAppender}
+   * A Filter registered with the specified Logger to restrict what Log events 
we process in our
+   * {@link QueueAppender}
    */
-  @SuppressForbidden(reason="We need to use log4J2 classes directly")
-  private static final class MutablePredicateFilter extends AbstractFilter  {
+  @SuppressForbidden(reason = "We need to use log4J2 classes directly")
+  private static final class MutablePredicateFilter extends AbstractFilter {
     // TODO: could probably refactor to share some code with ErrorLogMuter
 
-    // This could probably be implemented with a combination of 
"LevelMatchFilter" and "ConjunctionFilter" if "ConjunctionFilter" existed
-    // Since it doesn't, we write our own more specialized impl instead of 
writing & combining multiple generalized versions
+    // This could probably be implemented with a combination of 
"LevelMatchFilter" and
+    // "ConjunctionFilter" if "ConjunctionFilter" existed

Review comment:
       reflow

##########
File path: solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
##########
@@ -164,44 +171,36 @@
 import org.slf4j.LoggerFactory;
 import org.xml.sax.SAXException;
 
-import org.apache.commons.io.IOUtils;
-
-import static java.util.Objects.requireNonNull;
-import static org.apache.solr.cloud.SolrZkServer.ZK_WHITELIST_PROPERTY;
-import static org.apache.solr.common.cloud.ZkStateReader.HTTPS;
-import static org.apache.solr.common.cloud.ZkStateReader.URL_SCHEME;
-import static 
org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
-import static 
org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
-import static org.hamcrest.core.StringContains.containsString;
-
 /**
- * A junit4 Solr test harness that extends SolrTestCase and, by extension, 
LuceneTestCase.
- * To change which core is used when loading the schema and solrconfig.xml, 
simply
- * invoke the {@link #initCore(String, String, String, String)} method.
+ * A junit4 Solr test harness that extends SolrTestCase and, by extension, 
LuceneTestCase. To change
+ * which core is used when loading the schema and solrconfig.xml, simply 
invoke the {@link
+ * #initCore(String, String, String, String)} method.
  */
 @SuppressSysoutChecks(bugUrl = "Solr dumps tons of logs to console.")
-@SuppressFileSystems("ExtrasFS") // might be ok, the failures with e.g. 
nightly runs might be "normal"
+@SuppressFileSystems(
+    "ExtrasFS") // might be ok, the failures with e.g. nightly runs might be 
"normal"

Review comment:
       Move the comment to the line before, or shorten it

##########
File path: solr/test-framework/src/java/org/apache/solr/SolrTestCaseHS.java
##########
@@ -181,53 +192,56 @@ public static void matchJSON(String response, String... 
tests) throws Exception
     boolean failed = false;
 
     for (String test : tests) {
-      if (test == null || test.length()==0) continue;
+      if (test == null || test.length() == 0) continue;
 
       try {
         failed = true;
         String err = JSONTestUtil.match(response, test, 
JSONTestUtil.DEFAULT_DELTA);
         failed = false;
         if (err != null) {
-          log.error("query failed JSON validation. error={}\n expected ={}\n 
response = {}"
-              , err, test, response
-          );
+          log.error(
+              "query failed JSON validation. error={}\n expected ={}\n 
response = {}",
+              err,
+              test,
+              response);
           throw new RuntimeException(err);
         }
       } finally {
         if (failed) {
-          log.error("JSON query validation threw an exception.\n expected 
={}\n response = {}"
-                  , test, response
-          );
+          log.error(
+              "JSON query validation threw an exception.\n expected ={}\n 
response = {}",
+              test,
+              response);
         }
       }
     }
   }
 
   /***
-  public static void clearNCache() {
-    SolrQueryRequest req = req();
-    req.getSearcher().getnCache().clear();  // OFF-HEAP
-    req.close();
-  }***/
+   * public static void clearNCache() {
+   * SolrQueryRequest req = req();
+   * req.getSearcher().getnCache().clear();  // OFF-HEAP
+   * req.close();
+   * }***/

Review comment:
       yuck

##########
File path: 
solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZk2TestBase.java
##########
@@ -41,28 +40,31 @@
 import org.junit.Test;
 
 /**
- * This test simply does a bunch of basic things in solrcloud mode and asserts 
things
- * work as expected.
+ * This test simply does a bunch of basic things in solrcloud mode and asserts 
things work as
+ * expected.
  */
-@LuceneTestCase.SuppressCodecs({"SimpleText"}) // Backups do checksum 
validation against a footer value not present in 'SimpleText'
+@LuceneTestCase.SuppressCodecs({
+  "SimpleText"
+}) // Backups do checksum validation against a footer value not present in 
'SimpleText'

Review comment:
       Moving the comment to a line before the annotation would be more succinct

##########
File path: solr/test-framework/src/java/org/apache/solr/util/LogLevel.java
##########
@@ -81,33 +77,33 @@ public static void restoreLogLevels(Map<String, Level> 
savedLogLevels) {
       final Configuration config = ctx.getConfiguration();
 
       final Map<String, Level> oldLevels = new HashMap<>();
-      logLevels.forEach((loggerName, newLevel) -> {
-        final LoggerConfig logConfig = config.getLoggerConfig(loggerName);
-        if (loggerName.equals(logConfig.getName())) {
-          // we have an existing LoggerConfig for this specific loggerName
-          // record the existing 'old' level...
-          oldLevels.put(loggerName, logConfig.getLevel());
-          // ...and set the new one (or remove if null) ...
-          if (null == newLevel) {
-            config.removeLogger(loggerName);
-          } else {
-            logConfig.setLevel(newLevel);
-          }
-        } else {
-          // there is no existing configuration for the exact loggerName, 
logConfig is some ancestor
-          // record an 'old' level of 'null' to track the lack of any 
configured level...
-          oldLevels.put(loggerName, null);
-          // ...and now create a new logger config wih our new level
-          final LoggerConfig newLoggerConfig = new LoggerConfig(loggerName, 
newLevel, true);
-          config.addLogger(loggerName, newLoggerConfig);
-        }
+      logLevels.forEach(
+          (loggerName, newLevel) -> {
+            final LoggerConfig logConfig = config.getLoggerConfig(loggerName);
+            if (loggerName.equals(logConfig.getName())) {
+              // we have an existing LoggerConfig for this specific loggerName
+              // record the existing 'old' level...
+              oldLevels.put(loggerName, logConfig.getLevel());
+              // ...and set the new one (or remove if null) ...
+              if (null == newLevel) {
+                config.removeLogger(loggerName);
+              } else {
+                logConfig.setLevel(newLevel);
+              }
+            } else {
+              // there is no existing configuration for the exact loggerName, 
logConfig is some
+              // ancestor

Review comment:
       reflow

##########
File path: solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
##########
@@ -2877,46 +3010,63 @@ protected String getSaferTestName() {
     }
     return testName;
   }
-  
+
   @BeforeClass
   public static void assertNonBlockingRandomGeneratorAvailable() throws 
InterruptedException {
     final String EGD = "java.security.egd";
     final String URANDOM = "file:/dev/./urandom";
     final String ALLOWED = "test.solr.allowed.securerandom";
     final String allowedAlg = System.getProperty(ALLOWED);
     final String actualEGD = System.getProperty(EGD);
-    
+
     log.info("SecureRandom sanity checks: {}={} & {}={}", ALLOWED, allowedAlg, 
EGD, actualEGD);
 
     if (null != allowedAlg) {
       // the user has explicitly requested to bypass our assertions and allow 
a particular alg
       // the only thing we should do is assert that the algorithm they have 
allowed is actually used
-      
-      
+
       final String actualAlg = (new SecureRandom()).getAlgorithm();
-      assertEquals("Algorithm specified using "+ALLOWED+" system property " +
-                   "does not match actual algorithm", allowedAlg, actualAlg);
+      assertEquals(
+          "Algorithm specified using "
+              + ALLOWED
+              + " system property "
+              + "does not match actual algorithm",
+          allowedAlg,
+          actualAlg);
       return;
     }
-    // else: no user override, do the checks we want including 
-    
+    // else: no user override, do the checks we want including
+
     if (null == actualEGD) {
       System.setProperty(EGD, URANDOM);
-      log.warn("System property {} was not set by test runner, forcibly set to 
expected: {}", EGD, URANDOM);
-    } else if (! URANDOM.equals(actualEGD) ) {
-      log.warn("System property {}={} .. test runner should use expected: {}", 
EGD, actualEGD, URANDOM);
+      log.warn(
+          "System property {} was not set by test runner, forcibly set to 
expected: {}",
+          EGD,
+          URANDOM);
+    } else if (!URANDOM.equals(actualEGD)) {
+      log.warn(
+          "System property {}={} .. test runner should use expected: {}", EGD, 
actualEGD, URANDOM);
     }
-    
+
     final String algorithm = (new SecureRandom()).getAlgorithm();
-    
-    assertFalse("SecureRandom algorithm '" + algorithm + "' is in use by your 
JVM, " +
-                "which is a potentially blocking algorithm on some 
environments. " +
-                "Please report the details of this failure (and your JVM 
vendor/version) to us...@solr.apache.org. " +
-                "You can try to run your tests with -D"+EGD+"="+URANDOM+" or 
bypass this check using " +
-                "-Dtest.solr.allowed.securerandom="+ algorithm +" as a JVM 
option when running tests.",
-                // be permissive in our checks and deny only algorithms
-                // that are known to be blocking under some circumstances
-                algorithm.equals("NativePRNG") || 
algorithm.equals("NativePRNGBlocking"));
+
+    assertFalse(
+        "SecureRandom algorithm '"
+            + algorithm
+            + "' is in use by your JVM, "

Review comment:
       I wish spotless would recognize how to break these strings in a way that 
leverages the full line width

##########
File path: solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
##########
@@ -1849,73 +1914,90 @@ public Fld createField() {
       Fld fld = new Fld();
       fld.ftype = this;
       fld.vals = vals;
-      return fld;          
+      return fld;
     }
-
   }
 
   public static void assertResponseValues(SolrResponseBase rsp, Object... 
assertions) {
     Map<String, Object> values = Utils.makeMap(assertions);
-    values.forEach((s, o) -> {
-      if (o instanceof String) {
-        assertEquals(o, rsp.getResponse()._getStr(s, null));
-      } else {
-        assertEquals(o, rsp.getResponse()._get(s, null));
-      }
-    });
+    values.forEach(
+        (s, o) -> {
+          if (o instanceof String) {
+            assertEquals(o, rsp.getResponse()._getStr(s, null));
+          } else {
+            assertEquals(o, rsp.getResponse()._get(s, null));
+          }
+        });
   }
+
   @SuppressWarnings({"rawtypes"})
-  public Map<Comparable,Doc> indexDocs(List<FldType> descriptor, 
Map<Comparable,Doc> model, int nDocs) throws Exception {
+  public Map<Comparable, Doc> indexDocs(
+      List<FldType> descriptor, Map<Comparable, Doc> model, int nDocs) throws 
Exception {
     if (model == null) {
       model = new LinkedHashMap<>();
     }
 
     // commit an average of 10 times for large sets, or 10% of the time for 
small sets
-    int commitOneOutOf = Math.max(nDocs/10, 10);
+    int commitOneOutOf = Math.max(nDocs / 10, 10);
 
-    for (int i=0; i<nDocs; i++) {
+    for (int i = 0; i < nDocs; i++) {
       Doc doc = createDoc(descriptor);
       // doc.order = order++;
       updateJ(toJSON(doc), null);
       model.put(doc.id, doc);
 
       // commit 10% of the time
-      if (random().nextInt(commitOneOutOf)==0) {
+      if (random().nextInt(commitOneOutOf) == 0) {
         assertU(commit());
       }
 
       // duplicate 10% of the docs
-      if (random().nextInt(10)==0) {
+      if (random().nextInt(10) == 0) {
         updateJ(toJSON(doc), null);
-        model.put(doc.id, doc);        
+        model.put(doc.id, doc);
       }
     }
 
     // optimize 10% of the time
-    if (random().nextInt(10)==0) {
+    if (random().nextInt(10) == 0) {
       assertU(optimize());
     } else {
       if (random().nextInt(10) == 0) {
         assertU(commit());
       } else {
-        assertU(commit("softCommit","true"));
+        assertU(commit("softCommit", "true"));
       }
     }
 
-    // merging segments no longer selects just adjacent segments hence ids 
(doc.order) can be shuffled.
+    // merging segments no longer selects just adjacent segments hence ids 
(doc.order) can be
+    // shuffled.
     // we need to look at the index to determine the order.
-    String responseStr = h.query(req("q","*:*", "fl","id", "sort","_docid_ 
asc", "rows",Integer.toString(model.size()*2), "wt","json", "indent","true"));
+    String responseStr =
+        h.query(
+            req(
+                "q",

Review comment:
       ok for now but if I were writing fresh code and saw spotless do this, I 
would go about writing the code differently.  I'd call `params().set("q", 
"*:*").set(...).set(...)` etc.

##########
File path: solr/test-framework/src/java/org/apache/solr/SolrTestCaseHS.java
##########
@@ -107,72 +106,84 @@ public static ModifiableSolrParams params(SolrParams 
params, String... moreParam
     }
     return result;
   }
-  
-  
+
   @SuppressWarnings({"unchecked"})
-  public static Object 
createDocObjects(@SuppressWarnings({"rawtypes"})Map<Comparable, Doc> fullModel,
-                                        
@SuppressWarnings({"rawtypes"})Comparator sort, int rows,
-                                        Collection<String> fieldNames) {
+  public static Object createDocObjects(
+      @SuppressWarnings({"rawtypes"}) Map<Comparable, Doc> fullModel,
+      @SuppressWarnings({"rawtypes"}) Comparator sort,
+      int rows,
+      Collection<String> fieldNames) {
     List<Doc> docList = new ArrayList<>(fullModel.values());
     Collections.sort(docList, sort);
     @SuppressWarnings({"rawtypes"})
     List sortedDocs = new ArrayList(rows);
     for (Doc doc : docList) {
       if (sortedDocs.size() >= rows) break;
-      Map<String,Object> odoc = toObject(doc, h.getCore().getLatestSchema(), 
fieldNames);
+      Map<String, Object> odoc = toObject(doc, h.getCore().getLatestSchema(), 
fieldNames);
       sortedDocs.add(toObject(doc, h.getCore().getLatestSchema(), fieldNames));
     }
     return sortedDocs;
   }
 
-
-  public static void compare(SolrQueryRequest req, String path, Object model,
-                             @SuppressWarnings({"rawtypes"})Map<Comparable, 
Doc> fullModel) throws Exception {
+  public static void compare(
+      SolrQueryRequest req,
+      String path,
+      Object model,
+      @SuppressWarnings({"rawtypes"}) Map<Comparable, Doc> fullModel)
+      throws Exception {
     String strResponse = h.query(req);
 
     Object realResponse = ObjectBuilder.fromJSON(strResponse);
     String err = JSONTestUtil.matchObj(path, realResponse, model);
     if (err != null) {
-      log.error("RESPONSE MISMATCH: {}\n\trequest={}\n\tresult={}" +
-          "\n\texpected={}\n\tmodel={}"
-          , err, req, strResponse, JSONUtil.toJSON(model), fullModel);
+      log.error(
+          "RESPONSE MISMATCH: {}\n\trequest={}\n\tresult={}" + 
"\n\texpected={}\n\tmodel={}",

Review comment:
       I bet if you declare the first arg as a String in the previous line 
(`var msg = "RESPONSE ......."`) then it would then the following log.error 
would be one line.

##########
File path: 
solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
##########
@@ -72,37 +72,42 @@
 public class SolrCloudTestCase extends SolrTestCaseJ4 {
 
   private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  public static final Boolean USE_PER_REPLICA_STATE = 
Boolean.parseBoolean(System.getProperty("use.per-replica", "false"));
+  public static final Boolean USE_PER_REPLICA_STATE =
+      Boolean.parseBoolean(System.getProperty("use.per-replica", "false"));
 
-  public static final int DEFAULT_TIMEOUT = 45; // this is an important 
timeout for test stability - can't be too short
+  public static final int DEFAULT_TIMEOUT =
+      45; // this is an important timeout for test stability - can't be too 
short

Review comment:
       move comment to previous line

##########
File path: 
solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
##########
@@ -101,66 +100,85 @@ public void testExtraFilters() throws Exception {
     JettyConfig.Builder jettyConfig = JettyConfig.builder();
     jettyConfig.waitForLoadingCoresToFinish(null);
     jettyConfig.withFilter(JettySolrRunner.DebugFilter.class, "*");
-    MiniSolrCloudCluster cluster = new 
MiniSolrCloudCluster(random().nextInt(3) + 1, createTempDir(), 
jettyConfig.build());
+    MiniSolrCloudCluster cluster =
+        new MiniSolrCloudCluster(random().nextInt(3) + 1, createTempDir(), 
jettyConfig.build());
     cluster.shutdown();
   }
 
   public void testSolrHomeAndResourceLoaders() throws Exception {
     final String SOLR_HOME_PROP = "solr.solr.home";
-    // regardless of what sys prop may be set, everything in the cluster 
should use solr home dirs under the 
-    // configured base dir -- and nothing in the call stack should be 
"setting" the sys prop to make that work...
+    // regardless of what sys prop may be set, everything in the cluster 
should use solr home dirs
+    // under the

Review comment:
       reflow

##########
File path: solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
##########
@@ -2147,34 +2247,51 @@ public static void assertXmlFile(final File file, 
String... xpath)
       String xml = Files.readString(file.toPath());
       String results = TestHarness.validateXPath(xml, xpath);
       if (null != results) {
-        String msg = "File XPath failure: file=" + file.getPath() + " xpath="
-            + results + "\n\nxml was: " + xml;
+        String msg =

Review comment:
       minor: I suspect inlining msg to the `fail(....)` would be _just_ short 
enough to not overflow




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org
For additional commands, e-mail: issues-h...@solr.apache.org

Reply via email to