risdenk commented on a change in pull request #705:
URL: https://github.com/apache/solr/pull/705#discussion_r816411492



##########
File path: solr/core/src/java/org/apache/solr/update/AddUpdateCommand.java
##########
@@ -225,11 +224,13 @@ public void setIndexedId(BytesRef indexedId) {
       if (versionSif != null) {
         addVersionField(sdoc, versionSif);
       }
-      // TODO: if possible concurrent modification exception (if 
SolrInputDocument not cloned and is being forwarded to replicas)
+      // TODO: if possible concurrent modification exception (if 
SolrInputDocument not cloned and is
+      // being forwarded to replicas)
       // then we could add this field to the generated lucene document instead.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
##########
@@ -69,25 +69,26 @@
 
   private volatile boolean lastReplicationSuccess = true;
 
-  // will we attempt recovery as if we just started up (i.e. use starting 
versions rather than recent versions for peersync
+  // will we attempt recovery as if we just started up (i.e. use starting 
versions rather than
+  // recent versions for peersync
   // so we aren't looking at update versions that have started buffering since 
we came up.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
##########
@@ -128,7 +128,8 @@ private void closeIndexWriter(IndexWriterCloser closer) {
         }
 
         refCntWriter.incref();
-        succeeded = true;  // the returned RefCounted<IndexWriter> will 
release the readLock on a decref()
+        succeeded =
+            true; // the returned RefCounted<IndexWriter> will release the 
readLock on a decref()

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
##########
@@ -138,27 +139,32 @@ private void closeIndexWriter(IndexWriterCloser closer) {
         iwLock.readLock().unlock();
       }
     }
-
   }
 
   private void initRefCntWriter() {
-    // TODO: since we moved to a read-write lock, and don't rely on the count 
to close the writer, we don't really
-    // need this class any more.  It could also be a singleton created at the 
same time as SolrCoreState
-    // or we could change the API of SolrCoreState to just return the writer 
and then add a releaseWriter() call.
+    // TODO: since we moved to a read-write lock, and don't rely on the count 
to close the writer,
+    // we don't really
+    // need this class any more.  It could also be a singleton created at the 
same time as
+    // SolrCoreState
+    // or we could change the API of SolrCoreState to just return the writer 
and then add a
+    // releaseWriter() call.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
##########
@@ -138,27 +139,32 @@ private void closeIndexWriter(IndexWriterCloser closer) {
         iwLock.readLock().unlock();
       }
     }
-
   }
 
   private void initRefCntWriter() {
-    // TODO: since we moved to a read-write lock, and don't rely on the count 
to close the writer, we don't really
-    // need this class any more.  It could also be a singleton created at the 
same time as SolrCoreState
-    // or we could change the API of SolrCoreState to just return the writer 
and then add a releaseWriter() call.
+    // TODO: since we moved to a read-write lock, and don't rely on the count 
to close the writer,
+    // we don't really
+    // need this class any more.  It could also be a singleton created at the 
same time as
+    // SolrCoreState
+    // or we could change the API of SolrCoreState to just return the writer 
and then add a
+    // releaseWriter() call.
     if (refCntWriter == null && indexWriter != null) {
-      refCntWriter = new RefCounted<IndexWriter>(indexWriter) {
-
-        @Override
-        public void decref() {
-          iwLock.readLock().unlock();
-          super.decref();  // This is now redundant (since we switched to 
read-write locks), we don't really need to maintain our own reference count.
-        }
+      refCntWriter =
+          new RefCounted<IndexWriter>(indexWriter) {
+
+            @Override
+            public void decref() {
+              iwLock.readLock().unlock();
+              super.decref(); // This is now redundant (since we switched to 
read-write locks), we
+              // don't really need to maintain our own reference count.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
##########
@@ -646,14 +736,15 @@ public void commit(CommitUpdateCommand cmd) throws 
IOException {
         } else if (cmd.expungeDeletes) {
           writer.forceMergeDeletes();
         }
-        
+
         if (!cmd.softCommit) {
-          synchronized (solrCoreState.getUpdateLock()) { // sync is currently 
needed to prevent preCommit
-                                // from being called between preSoft and
-                                // postSoft... see postSoft comments.
+          synchronized (
+              solrCoreState.getUpdateLock()) { // sync is currently needed to 
prevent preCommit

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/PeerSync.java
##########
@@ -353,36 +359,60 @@ private boolean handleResponse(ShardResponse srsp) {
       // If the replica went down between asking for versions and asking for 
specific updates, that
       // shouldn't be treated as success since we counted on getting those 
updates back (and avoided
       // redundantly asking other replicas for them).
-      if (cantReachIsSuccess && sreq.purpose == 
SHARD_REQUEST_PURPOSE_GET_VERSIONS && srsp.getException() instanceof 
SolrServerException) {
-        Throwable solrException = ((SolrServerException) srsp.getException())
-            .getRootCause();
-        boolean connectTimeoutExceptionInChain = 
connectTimeoutExceptionInChain(srsp.getException());
-        if (connectTimeoutExceptionInChain || solrException instanceof 
ConnectTimeoutException || solrException instanceof SocketTimeoutException
-            || solrException instanceof NoHttpResponseException || 
solrException instanceof SocketException) {
-
-          log.warn("{} couldn't connect to {}, counting as success ", msg(), 
srsp.getShardAddress(), srsp.getException());
+      if (cantReachIsSuccess
+          && sreq.purpose == SHARD_REQUEST_PURPOSE_GET_VERSIONS
+          && srsp.getException() instanceof SolrServerException) {
+        Throwable solrException = ((SolrServerException) 
srsp.getException()).getRootCause();
+        boolean connectTimeoutExceptionInChain =
+            connectTimeoutExceptionInChain(srsp.getException());
+        if (connectTimeoutExceptionInChain
+            || solrException instanceof ConnectTimeoutException
+            || solrException instanceof SocketTimeoutException
+            || solrException instanceof NoHttpResponseException
+            || solrException instanceof SocketException) {
+
+          log.warn(
+              "{} couldn't connect to {}, counting as success ",
+              msg(),
+              srsp.getShardAddress(),
+              srsp.getException());
           return true;
         }
       }
-      
-      if (cantReachIsSuccess && sreq.purpose == 
SHARD_REQUEST_PURPOSE_GET_VERSIONS && srsp.getException() instanceof 
SolrException && ((SolrException) srsp.getException()).code() == 503) {
-        log.warn("{} got a 503 from {}, counting as success ", msg(), 
srsp.getShardAddress(), srsp.getException());
+
+      if (cantReachIsSuccess
+          && sreq.purpose == SHARD_REQUEST_PURPOSE_GET_VERSIONS
+          && srsp.getException() instanceof SolrException
+          && ((SolrException) srsp.getException()).code() == 503) {
+        log.warn(
+            "{} got a 503 from {}, counting as success ",
+            msg(),
+            srsp.getShardAddress(),
+            srsp.getException());
         return true;
       }
-      
-      if (cantReachIsSuccess && sreq.purpose == 
SHARD_REQUEST_PURPOSE_GET_VERSIONS && srsp.getException() instanceof 
SolrException && ((SolrException) srsp.getException()).code() == 404) {
-        log.warn("{} got a 404 from {}, counting as success. {} Perhaps /get 
is not registered?"
-            , msg(), srsp.getShardAddress(), srsp.getException());
+
+      if (cantReachIsSuccess
+          && sreq.purpose == SHARD_REQUEST_PURPOSE_GET_VERSIONS
+          && srsp.getException() instanceof SolrException
+          && ((SolrException) srsp.getException()).code() == 404) {
+        log.warn(
+            "{} got a 404 from {}, counting as success. {} Perhaps /get is not 
registered?",
+            msg(),
+            srsp.getShardAddress(),
+            srsp.getException());
         return true;
       }
-      
-      // TODO: we should return the above information so that when we can 
request a recovery through zookeeper, we do
+
+      // TODO: we should return the above information so that when we can 
request a recovery through
+      // zookeeper, we do
       // that for these nodes

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
##########
@@ -392,18 +439,24 @@ public String toString() {
     }
 
     // Called whenever we get results back from a sub-request.
-    // The only ambiguity is if I have _both_ a rollup tracker and a leader 
tracker. In that case we need to handle
-    // both requests returning from leaders of other shards _and_ from my 
followers. This happens if a leader happens
+    // The only ambiguity is if I have _both_ a rollup tracker and a leader 
tracker. In that case we
+    // need to handle
+    // both requests returning from leaders of other shards _and_ from my 
followers. This happens if
+    // a leader happens
     // to be the aggregator too.
     //
-    // This isn't really a problem because only responses _from_ some leader 
will have the "rf" parameter, in which case
+    // This isn't really a problem because only responses _from_ some leader 
will have the "rf"
+    // parameter, in which case
     // we need to add the data to the rollup tracker.
     //
-    // In the case of a leaderTracker and rollupTracker both being present, 
then we need to take care when assembling
+    // In the case of a leaderTracker and rollupTracker both being present, 
then we need to take
+    // care when assembling
     // the final response to check both the rollup and leader trackers on the 
aggregator node.
-    public void trackRequestResult(org.eclipse.jetty.client.api.Response resp, 
InputStream respBody, boolean success) {
+    public void trackRequestResult(
+        org.eclipse.jetty.client.api.Response resp, InputStream respBody, 
boolean success) {
 
-      // Returning Integer.MAX_VALUE here means there was no "rf" on the 
response, therefore we just need to increment
+      // Returning Integer.MAX_VALUE here means there was no "rf" on the 
response, therefore we just
+      // need to increment
       // our achieved rf if we are a leader, i.e. have a leaderTracker.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
##########
@@ -615,9 +715,11 @@ public void visit(QueryVisitor visitor) {
             docSets[currentPartition.get()].set(doc);
           }
           currentPartition.set((currentPartition.get() + 1) % numPieces);
-        } else  {
+        } else {
           int matchingRangesCount = 0;
-          for (int i=0; i < rangesArr.length; i++) {      // inner-loop: use 
array here for extra speed.
+          for (int i = 0;
+              i < rangesArr.length;
+              i++) { // inner-loop: use array here for extra speed.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/TransactionLog.java
##########
@@ -75,30 +71,37 @@
   Path tlog;
   FileChannel channel;
   OutputStream os;
-  protected FastOutputStream fos;    // all accesses to this stream should be 
synchronized on "this" (The TransactionLog)
+  protected FastOutputStream
+      fos; // all accesses to this stream should be synchronized on "this" 
(The TransactionLog)
   int numRecords;
   public boolean isBuffer;
 
-  protected volatile boolean deleteOnClose = true;  // we can delete old tlogs 
since they are currently only used for real-time-get (and in the future, 
recovery)
+  protected volatile boolean deleteOnClose =
+      true; // we can delete old tlogs since they are currently only used for 
real-time-get (and in
+  // the future, recovery)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
##########
@@ -101,64 +101,96 @@
   // tracks when auto-commit should occur
   protected final CommitTracker commitTracker;
   protected final CommitTracker softCommitTracker;
-  
+
   protected boolean commitWithinSoftCommit;
   /**
    * package access for testing
+   *
    * @lucene.internal
    */
   void setCommitWithinSoftCommit(boolean value) {
     this.commitWithinSoftCommit = value;
   }
 
   private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
+
   public DirectUpdateHandler2(SolrCore core) {
     super(core);
-   
+
     solrCoreState = core.getSolrCoreState();
-    
-    UpdateHandlerInfo updateHandlerInfo = core.getSolrConfig()
-        .getUpdateHandlerInfo();
+
+    UpdateHandlerInfo updateHandlerInfo = 
core.getSolrConfig().getUpdateHandlerInfo();
     int docsUpperBound = updateHandlerInfo.autoCommmitMaxDocs;
     int timeUpperBound = updateHandlerInfo.autoCommmitMaxTime;
     long fileSizeUpperBound = updateHandlerInfo.autoCommitMaxSizeBytes;
-    commitTracker = new CommitTracker("Hard", core, docsUpperBound, 
timeUpperBound, fileSizeUpperBound, updateHandlerInfo.openSearcher, false);
-    
+    commitTracker =
+        new CommitTracker(
+            "Hard",
+            core,
+            docsUpperBound,
+            timeUpperBound,
+            fileSizeUpperBound,
+            updateHandlerInfo.openSearcher,
+            false);
+
     int softCommitDocsUpperBound = updateHandlerInfo.autoSoftCommmitMaxDocs;
     int softCommitTimeUpperBound = updateHandlerInfo.autoSoftCommmitMaxTime;
-    softCommitTracker = new CommitTracker("Soft", core, 
softCommitDocsUpperBound, softCommitTimeUpperBound, 
NO_FILE_SIZE_UPPER_BOUND_PLACEHOLDER, true, true);
-    
+    softCommitTracker =
+        new CommitTracker(
+            "Soft",
+            core,
+            softCommitDocsUpperBound,
+            softCommitTimeUpperBound,
+            NO_FILE_SIZE_UPPER_BOUND_PLACEHOLDER,
+            true,
+            true);
+
     commitWithinSoftCommit = updateHandlerInfo.commitWithinSoftCommit;
 
     ZkController zkController = core.getCoreContainer().getZkController();
-    if (zkController != null && 
core.getCoreDescriptor().getCloudDescriptor().getReplicaType() == 
Replica.Type.TLOG) {
+    if (zkController != null
+        && core.getCoreDescriptor().getCloudDescriptor().getReplicaType() == 
Replica.Type.TLOG) {
       commitWithinSoftCommit = false;
       commitTracker.setOpenSearcher(true);
     }
-
   }
-  
+
   public DirectUpdateHandler2(SolrCore core, UpdateHandler updateHandler) {
     super(core, updateHandler.getUpdateLog());
     solrCoreState = core.getSolrCoreState();
-    
-    UpdateHandlerInfo updateHandlerInfo = core.getSolrConfig()
-        .getUpdateHandlerInfo();
+
+    UpdateHandlerInfo updateHandlerInfo = 
core.getSolrConfig().getUpdateHandlerInfo();
     int docsUpperBound = updateHandlerInfo.autoCommmitMaxDocs;
     int timeUpperBound = updateHandlerInfo.autoCommmitMaxTime;
     long fileSizeUpperBound = updateHandlerInfo.autoCommitMaxSizeBytes;
-    commitTracker = new CommitTracker("Hard", core, docsUpperBound, 
timeUpperBound, fileSizeUpperBound, updateHandlerInfo.openSearcher, false);
-    
+    commitTracker =
+        new CommitTracker(
+            "Hard",
+            core,
+            docsUpperBound,
+            timeUpperBound,
+            fileSizeUpperBound,
+            updateHandlerInfo.openSearcher,
+            false);
+
     int softCommitDocsUpperBound = updateHandlerInfo.autoSoftCommmitMaxDocs;
     int softCommitTimeUpperBound = updateHandlerInfo.autoSoftCommmitMaxTime;
-    softCommitTracker = new CommitTracker("Soft", core, 
softCommitDocsUpperBound, softCommitTimeUpperBound, 
NO_FILE_SIZE_UPPER_BOUND_PLACEHOLDER, updateHandlerInfo.openSearcher, true);
-    
+    softCommitTracker =
+        new CommitTracker(
+            "Soft",
+            core,
+            softCommitDocsUpperBound,
+            softCommitTimeUpperBound,
+            NO_FILE_SIZE_UPPER_BOUND_PLACEHOLDER,
+            updateHandlerInfo.openSearcher,
+            true);
+
     commitWithinSoftCommit = updateHandlerInfo.commitWithinSoftCommit;
 
     UpdateLog existingLog = updateHandler.getUpdateLog();
     if (this.ulog != null && this.ulog == existingLog) {
-      // If we are reusing the existing update log, inform the log that its 
update handler has changed.
+      // If we are reusing the existing update log, inform the log that its 
update handler has
+      // changed.
       // We do this as late as possible.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/TransactionLog.java
##########
@@ -852,26 +877,31 @@ public Object next() throws IOException {
         // Position buffer so that this record is at the end.
         // For small records, this will cause subsequent calls to next() to be 
within the buffer.
         long seekPos = endOfThisRecord - fis.getBufferSize();
-        seekPos = Math.min(seekPos, prevPos); // seek to the start of the 
record if it's larger then the block size.
+        seekPos =
+            Math.min(
+                seekPos,
+                prevPos); // seek to the start of the record if it's larger 
then the block size.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/TransactionLog.java
##########
@@ -792,22 +815,23 @@ public Object next() throws IOException, 
InterruptedException {
 
     @Override
     public abstract String toString();
-
   }
 
   public class FSReverseReader extends ReverseReader {
     ChannelFastInputStream fis;
-    private LogCodec codec = new LogCodec(resolver) {
-      @Override
-      public SolrInputDocument readSolrInputDocument(DataInputInputStream dis) 
{
-        // Given that the SolrInputDocument is last in an add record, it's OK 
to just skip
-        // reading it completely.
-        return null;
-      }
-    };
+    private LogCodec codec =
+        new LogCodec(resolver) {
+          @Override
+          public SolrInputDocument readSolrInputDocument(DataInputInputStream 
dis) {
+            // Given that the SolrInputDocument is last in an add record, it's 
OK to just skip
+            // reading it completely.
+            return null;
+          }
+        };
 
-    int nextLength;  // length of the next record (the next one closer to the 
start of the log file)
-    long prevPos;    // where we started reading from last time (so prevPos - 
nextLength == start of next record)
+    int nextLength; // length of the next record (the next one closer to the 
start of the log file)
+    long prevPos; // where we started reading from last time (so prevPos - 
nextLength == start of
+    // next record)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -186,48 +198,53 @@ public String toString() {
   protected TransactionLog tlog;
   protected TransactionLog prevTlog;
   protected TransactionLog prevTlogOnPrecommit;
-  protected final Deque<TransactionLog> logs = new LinkedList<>();  // list of 
recent logs, newest first
+  protected final Deque<TransactionLog> logs =
+      new LinkedList<>(); // list of recent logs, newest first
   protected LinkedList<TransactionLog> newestLogsOnStartup = new 
LinkedList<>();
-  protected int numOldRecords;  // number of records in the recent logs
+  protected int numOldRecords; // number of records in the recent logs
 
-  protected Map<BytesRef,LogPtr> map = new HashMap<>();
-  protected Map<BytesRef,LogPtr> prevMap;  // used while committing/reopening 
is happening
-  protected Map<BytesRef,LogPtr> prevMap2;  // used while committing/reopening 
is happening
-  protected TransactionLog prevMapLog;  // the transaction log used to look up 
entries found in prevMap
-  protected TransactionLog prevMapLog2;  // the transaction log used to look 
up entries found in prevMap2
+  protected Map<BytesRef, LogPtr> map = new HashMap<>();
+  protected Map<BytesRef, LogPtr> prevMap; // used while committing/reopening 
is happening
+  protected Map<BytesRef, LogPtr> prevMap2; // used while committing/reopening 
is happening
+  protected TransactionLog
+      prevMapLog; // the transaction log used to look up entries found in 
prevMap
+  protected TransactionLog
+      prevMapLog2; // the transaction log used to look up entries found in 
prevMap2
 
   protected final int numDeletesToKeep = 1000;
   protected final int numDeletesByQueryToKeep = 100;
   protected int numRecordsToKeep;
   protected int maxNumLogsToKeep;
-  protected int numVersionBuckets; // This should only be used to initialize 
VersionInfo... the actual number of buckets may be rounded up to a power of two.
+  protected int
+      numVersionBuckets; // This should only be used to initialize 
VersionInfo... the actual number
+  // of buckets may be rounded up to a power of two.
   protected Long maxVersionFromIndex = null;
   protected boolean existOldBufferLog = false;
 
   // keep track of deletes only... this is not updated on an add
-  protected LinkedHashMap<BytesRef, LogPtr> oldDeletes = new 
LinkedHashMap<>(numDeletesToKeep) {
-    @Override
-    protected boolean removeEldestEntry(Map.Entry<BytesRef, LogPtr> eldest) {
-      return size() > numDeletesToKeep;
-    }
-  };
+  protected LinkedHashMap<BytesRef, LogPtr> oldDeletes =
+      new LinkedHashMap<>(numDeletesToKeep) {
+        @Override
+        protected boolean removeEldestEntry(Map.Entry<BytesRef, LogPtr> 
eldest) {
+          return size() > numDeletesToKeep;
+        }
+      };
 
-  /**
-   * Holds the query and the version for a DeleteByQuery command
-   */
+  /** Holds the query and the version for a DeleteByQuery command */
   public static class DBQ {
-    public String q;     // the query string
+    public String q; // the query string
     public long version; // positive version of the DBQ
 
     @Override
     public String toString() {
-      return "DBQ{version=" + version + ",q="+q+"}";
+      return "DBQ{version=" + version + ",q=" + q + "}";
     }
   }
 
   protected LinkedList<DBQ> deleteByQueries = new LinkedList<>();
 
-  protected String[] tlogFiles; // Needs to be String because hdfs.Path is 
incompatible with nio.Path
+  protected String[]
+      tlogFiles; // Needs to be String because hdfs.Path is incompatible with 
nio.Path

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateCommand.java
##########
@@ -20,22 +20,22 @@
 import io.opentracing.util.GlobalTracer;
 import org.apache.solr.request.SolrQueryRequest;
 
-
-/** An index update command encapsulated in an object (Command pattern)
- *
- *
- */
+/** An index update command encapsulated in an object (Command pattern) */
 public abstract class UpdateCommand implements Cloneable {
   protected SolrQueryRequest req;
   protected long version;
   protected String route;
   protected int flags;
 
-  public static int BUFFERING = 0x00000001;    // update command is being 
buffered.
-  public static int REPLAY    = 0x00000002;    // update command is from 
replaying a log.
-  public static int PEER_SYNC    = 0x00000004; // update command is a missing 
update being provided by a peer.
-  public static int IGNORE_AUTOCOMMIT = 0x00000008; // this update should not 
count toward triggering of autocommits.
-  public static int CLEAR_CACHES = 0x00000010; // clear caches associated with 
the update log.  used when applying reordered DBQ updates when doing an add.
+  public static int BUFFERING = 0x00000001; // update command is being 
buffered.
+  public static int REPLAY = 0x00000002; // update command is from replaying a 
log.
+  public static int PEER_SYNC =
+      0x00000004; // update command is a missing update being provided by a 
peer.
+  public static int IGNORE_AUTOCOMMIT =
+      0x00000008; // this update should not count toward triggering of 
autocommits.
+  public static int CLEAR_CACHES =
+      0x00000010; // clear caches associated with the update log.  used when 
applying reordered DBQ
+  // updates when doing an add.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -186,48 +198,53 @@ public String toString() {
   protected TransactionLog tlog;
   protected TransactionLog prevTlog;
   protected TransactionLog prevTlogOnPrecommit;
-  protected final Deque<TransactionLog> logs = new LinkedList<>();  // list of 
recent logs, newest first
+  protected final Deque<TransactionLog> logs =
+      new LinkedList<>(); // list of recent logs, newest first
   protected LinkedList<TransactionLog> newestLogsOnStartup = new 
LinkedList<>();
-  protected int numOldRecords;  // number of records in the recent logs
+  protected int numOldRecords; // number of records in the recent logs
 
-  protected Map<BytesRef,LogPtr> map = new HashMap<>();
-  protected Map<BytesRef,LogPtr> prevMap;  // used while committing/reopening 
is happening
-  protected Map<BytesRef,LogPtr> prevMap2;  // used while committing/reopening 
is happening
-  protected TransactionLog prevMapLog;  // the transaction log used to look up 
entries found in prevMap
-  protected TransactionLog prevMapLog2;  // the transaction log used to look 
up entries found in prevMap2
+  protected Map<BytesRef, LogPtr> map = new HashMap<>();
+  protected Map<BytesRef, LogPtr> prevMap; // used while committing/reopening 
is happening
+  protected Map<BytesRef, LogPtr> prevMap2; // used while committing/reopening 
is happening
+  protected TransactionLog
+      prevMapLog; // the transaction log used to look up entries found in 
prevMap
+  protected TransactionLog
+      prevMapLog2; // the transaction log used to look up entries found in 
prevMap2
 
   protected final int numDeletesToKeep = 1000;
   protected final int numDeletesByQueryToKeep = 100;
   protected int numRecordsToKeep;
   protected int maxNumLogsToKeep;
-  protected int numVersionBuckets; // This should only be used to initialize 
VersionInfo... the actual number of buckets may be rounded up to a power of two.
+  protected int
+      numVersionBuckets; // This should only be used to initialize 
VersionInfo... the actual number
+  // of buckets may be rounded up to a power of two.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -252,12 +269,14 @@ public String toString() {
   public static class LogPtr {
     final long pointer;
     final long version;
-    final long previousPointer; // used for entries that are in-place updates 
and need a pointer to a previous update command
+    final long
+        previousPointer; // used for entries that are in-place updates and 
need a pointer to a
+    // previous update command

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -1561,16 +1667,16 @@ private void update() {
             numUpdates++;
           }
 
-        } catch (IOException | AssertionError e) { // catch AssertionError to 
handle certain test failures correctly
+        } catch (IOException
+            | AssertionError e) { // catch AssertionError to handle certain 
test failures correctly
           // failure to read a log record isn't fatal

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -1762,7 +1868,8 @@ public void run() {
       params.set(DistributedUpdateProcessor.LOG_REPLAY, "true");
       req = new LocalSolrQueryRequest(uhandler.core, params);
       rsp = new SolrQueryResponse();
-      SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));    // 
setting request info will help logging
+      SolrRequestInfo.setRequestInfo(
+          new SolrRequestInfo(req, rsp)); // setting request info will help 
logging

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DocumentBuilder.java
##########
@@ -316,7 +380,8 @@ private static void moveLargestFieldLast(Document doc) {
       if (largestIsLast && !field.name().equals(largestField)) {
         largestIsLast = false;
       }
-      if (field.numericValue() != null) { // just ignore these as 
non-competitive (avoid toString'ing their number)
+      if (field.numericValue()
+          != null) { // just ignore these as non-competitive (avoid 
toString'ing their number)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
##########
@@ -453,9 +537,14 @@ private Query getQuery(DeleteUpdateCommand cmd) {
         bq.add(q, Occur.MUST);
         SchemaField sf = ulog.getVersionInfo().getVersionField();
         ValueSource vs = sf.getType().getValueSource(sf, null);
-        ValueSourceRangeFilter filt = new ValueSourceRangeFilter(vs, 
Long.toString(Math.abs(cmd.getVersion())), null, true, true);
+        ValueSourceRangeFilter filt =
+            new ValueSourceRangeFilter(
+                vs, Long.toString(Math.abs(cmd.getVersion())), null, true, 
true);
         FunctionRangeQuery range = new FunctionRangeQuery(filt);
-        bq.add(range, Occur.MUST_NOT);  // formulated in the "MUST_NOT" sense 
so we can delete docs w/o a version (some tests depend on this...)
+        bq.add(
+            range,
+            Occur.MUST_NOT); // formulated in the "MUST_NOT" sense so we can 
delete docs w/o a
+        // version (some tests depend on this...)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
##########
@@ -914,31 +1008,36 @@ public void split(SplitIndexCommand cmd) throws 
IOException {
   }
 
   /**
-   * Calls either {@link IndexWriter#updateDocValues} or 
<code>IndexWriter#updateDocument</code>(s) as
-   * needed based on {@link AddUpdateCommand#isInPlaceUpdate}.
-   * <p>
-   * If the this is an UPDATE_INPLACE cmd, then all fields included in 
-   * {@link AddUpdateCommand#makeLuceneDocForInPlaceUpdate} must either be the 
uniqueKey field, or be DocValue
-   * only fields.
-   * </p>
+   * Calls either {@link IndexWriter#updateDocValues} or 
<code>IndexWriter#updateDocument</code>(s)
+   * as needed based on {@link AddUpdateCommand#isInPlaceUpdate}.
+   *
+   * <p>If the this is an UPDATE_INPLACE cmd, then all fields included in 
{@link
+   * AddUpdateCommand#makeLuceneDocForInPlaceUpdate} must either be the 
uniqueKey field, or be
+   * DocValue only fields.
    *
    * @param cmd - cmd apply to IndexWriter
    * @param writer - IndexWriter to use
    */
   private void updateDocOrDocValues(AddUpdateCommand cmd, IndexWriter writer) 
throws IOException {
-    assert idField != null; // this code path requires an idField in order to 
potentially replace a doc
+    assert idField
+        != null; // this code path requires an idField in order to potentially 
replace a doc
     boolean hasUpdateTerm = cmd.updateTerm != null; // AKA dedupe
 
     if (cmd.isInPlaceUpdate()) {
       if (hasUpdateTerm) {
-        throw new IllegalStateException("cmd.updateTerm/dedupe is not 
compatible with in-place updates");
+        throw new IllegalStateException(
+            "cmd.updateTerm/dedupe is not compatible with in-place updates");
       }
-      // we don't support the solrInputDoc with nested child docs either but 
we'll throw an exception if attempted
+      // we don't support the solrInputDoc with nested child docs either but 
we'll throw an
+      // exception if attempted
 
       // can't use cmd.getIndexedId because it will be a root doc if this doc 
is a child
-      Term updateTerm = new Term(idField.getName(),
-          
core.getLatestSchema().indexableUniqueKey(cmd.getSelfOrNestedDocIdStr()));
-      List<IndexableField> fields = 
cmd.makeLuceneDocForInPlaceUpdate().getFields(); // skips uniqueKey and _root_
+      Term updateTerm =
+          new Term(
+              idField.getName(),
+              
core.getLatestSchema().indexableUniqueKey(cmd.getSelfOrNestedDocIdStr()));
+      List<IndexableField> fields =
+          cmd.makeLuceneDocForInPlaceUpdate().getFields(); // skips uniqueKey 
and _root_

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java
##########
@@ -477,17 +535,18 @@ public void processAdd(AddUpdateCommand cmd) throws 
IOException {
       super.processAdd(cmd);
     }
 
-    /**
-     * Recursively find unknown fields in the given doc and its child 
documents, if any.
-     */
-    private void getUnknownFields
-    (FieldNameSelector selector, SolrInputDocument doc, 
Map<String,List<SolrInputField>> unknownFields) {
+    /** Recursively find unknown fields in the given doc and its child 
documents, if any. */
+    private void getUnknownFields(
+        FieldNameSelector selector,
+        SolrInputDocument doc,
+        Map<String, List<SolrInputField>> unknownFields) {
       for (final String fieldName : doc.getFieldNames()) {
-        //We do a assert and a null check because even after SOLR-12710 is 
addressed
-        //older SolrJ versions can send null values causing an NPE
+        // We do a assert and a null check because even after SOLR-12710 is 
addressed
+        // older SolrJ versions can send null values causing an NPE
         assert fieldName != null;
         if (fieldName != null) {
-          if (selector.shouldMutate(fieldName)) { // returns false if the 
field already exists in the current schema
+          if (selector.shouldMutate(
+              fieldName)) { // returns false if the field already exists in 
the current schema

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
##########
@@ -951,11 +1050,14 @@ private void updateDocOrDocValues(AddUpdateCommand cmd, 
IndexWriter writer) thro
       log.debug("updateDocuments({})", cmd);
       writer.updateDocuments(updateTerm, nestedDocs);
 
-      // If hasUpdateTerm, then delete any existing documents with the same ID 
other than the one added above
+      // If hasUpdateTerm, then delete any existing documents with the same ID 
other than the one
+      // added above
       //   (used in near-duplicate replacement)
       if (hasUpdateTerm) { // rare
         BooleanQuery.Builder bq = new BooleanQuery.Builder();
-        bq.add(new TermQuery(updateTerm), Occur.MUST_NOT); //don't want the 
one we added above (will be unique)
+        bq.add(
+            new TermQuery(updateTerm),
+            Occur.MUST_NOT); // don't want the one we added above (will be 
unique)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
##########
@@ -377,7 +394,9 @@ public void cancelRecovery() {
   @Override
   public void recovered() {
     recoveryStrat = null;
-    recoveringAfterStartup = false;  // once we have successfully recovered, 
we no longer need to act as if we are recovering after startup
+    recoveringAfterStartup =
+        false; // once we have successfully recovered, we no longer need to 
act as if we are
+    // recovering after startup

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/PeerSync.java
##########
@@ -717,12 +770,14 @@ MissedUpdatesRequest handleVersionsWithRanges(List<Long> 
otherVersions, boolean
     }
 
     @VisibleForTesting
-    /**
-     * Implementation assumes the passed in lists are sorted and contain no 
duplicates.
-     */
-    static MissedUpdatesRequest handleVersionsWithRanges(List<Long> 
otherVersions, boolean completeList,
-        List<Long> ourUpdates, long ourLowThreshold) {
-      // we may endup asking for updates for too many versions, causing 2MB 
post payload limit. Construct a range of
+    /** Implementation assumes the passed in lists are sorted and contain no 
duplicates. */
+    static MissedUpdatesRequest handleVersionsWithRanges(
+        List<Long> otherVersions,
+        boolean completeList,
+        List<Long> ourUpdates,
+        long ourLowThreshold) {
+      // we may endup asking for updates for too many versions, causing 2MB 
post payload limit.
+      // Construct a range of
       // versions to request instead of asking individual versions

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
##########
@@ -299,28 +335,45 @@ public void doSplit() throws IOException {
         if (splitMethod == SplitMethod.LINK) {
           t = timings.sub("deleteDocuments");
           t.resume();
-          // apply deletions specific to this partition. As a side-effect on 
the first call this also populates
-          // a cache of docsets to delete per leaf reader per partition, which 
is reused for subsequent partitions.
-          iw.deleteDocuments(new SplittingQuery(partitionNumber, field, 
rangesArr, hashRouter, splitKey, docsToDeleteCache, currentPartition));
+          // apply deletions specific to this partition. As a side-effect on 
the first call this
+          // also populates
+          // a cache of docsets to delete per leaf reader per partition, which 
is reused for
+          // subsequent partitions.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
##########
@@ -392,18 +439,24 @@ public String toString() {
     }
 
     // Called whenever we get results back from a sub-request.
-    // The only ambiguity is if I have _both_ a rollup tracker and a leader 
tracker. In that case we need to handle
-    // both requests returning from leaders of other shards _and_ from my 
followers. This happens if a leader happens
+    // The only ambiguity is if I have _both_ a rollup tracker and a leader 
tracker. In that case we
+    // need to handle
+    // both requests returning from leaders of other shards _and_ from my 
followers. This happens if
+    // a leader happens

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/PeerSyncWithLeader.java
##########
@@ -281,26 +295,33 @@ private boolean handleUpdates(NamedList<Object> rsp, long 
numRequestedUpdates, I
     }
 
     // Leader will compute its fingerprint, then retrieve its recent updates 
versions.
-    // There are a case that some updates (gap) get into recent versions but 
do not exist in index (fingerprint).
-    // If the gap do not contains DBQ or DBI, it is safe to use 
leaderFingerprint.maxVersionEncountered as a cut point.
+    // There are a case that some updates (gap) get into recent versions but 
do not exist in index
+    // (fingerprint).
+    // If the gap do not contains DBQ or DBI, it is safe to use
+    // leaderFingerprint.maxVersionEncountered as a cut point.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -1720,24 +1827,23 @@ public State getState() {
 
   @Override
   public String toString() {
-    return "FSUpdateLog{state="+getState()+", tlog="+tlog+"}";
+    return "FSUpdateLog{state=" + getState() + ", tlog=" + tlog + "}";
   }
 
-
-  public static Runnable testing_logReplayHook;  // called before each log read
-  public static Runnable testing_logReplayFinishHook;  // called when log 
replay has finished
-
-
+  public static Runnable testing_logReplayHook; // called before each log read
+  public static Runnable testing_logReplayFinishHook; // called when log 
replay has finished
 
   protected RecoveryInfo recoveryInfo;
 
   class LogReplayer implements Runnable {
-    private Logger loglog = log;  // set to something different?
+    private Logger loglog = log; // set to something different?
 
     Deque<TransactionLog> translogs;
     TransactionLog.LogReader tlogReader;
     boolean activeLog;
-    boolean finishing = false;  // state where we lock out other updates and 
finish those updates that snuck in before we locked
+    boolean finishing =
+        false; // state where we lock out other updates and finish those 
updates that snuck in
+    // before we locked

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/StreamingSolrClients.java
##########
@@ -66,15 +65,18 @@ public synchronized SolrClient getSolrClient(final 
SolrCmdDistributor.Req req) {
     String url = getFullUrl(req.node.getUrl());
     ConcurrentUpdateHttp2SolrClient client = solrClients.get(url);
     if (client == null) {
-      // NOTE: increasing to more than 1 threadCount for the client could 
cause updates to be reordered
-      // on a greater scale since the current behavior is to only increase the 
number of connections/Runners when
+      // NOTE: increasing to more than 1 threadCount for the client could 
cause updates to be
+      // reordered
+      // on a greater scale since the current behavior is to only increase the 
number of
+      // connections/Runners when

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/TransactionLog.java
##########
@@ -75,30 +71,37 @@
   Path tlog;
   FileChannel channel;
   OutputStream os;
-  protected FastOutputStream fos;    // all accesses to this stream should be 
synchronized on "this" (The TransactionLog)
+  protected FastOutputStream
+      fos; // all accesses to this stream should be synchronized on "this" 
(The TransactionLog)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/PeerSync.java
##########
@@ -187,7 +194,8 @@ public PeerSyncResult sync() {
       timerContext = syncTime.time();
 
       // Fire off the requests before getting our own recent updates (for 
better concurrency)
-      // This also allows us to avoid getting updates we don't need... if we 
got our updates and then got their updates,
+      // This also allows us to avoid getting updates we don't need... if we 
got our updates and
+      // then got their updates,
       // they would
       // have newer stuff that we also had (assuming updates are going on and 
are being forwarded).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -186,48 +198,53 @@ public String toString() {
   protected TransactionLog tlog;
   protected TransactionLog prevTlog;
   protected TransactionLog prevTlogOnPrecommit;
-  protected final Deque<TransactionLog> logs = new LinkedList<>();  // list of 
recent logs, newest first
+  protected final Deque<TransactionLog> logs =
+      new LinkedList<>(); // list of recent logs, newest first

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
##########
@@ -106,7 +105,8 @@ public String toLower() {
   final SplitIndexCommand cmd;
   final SolrIndexSearcher searcher;
   final SchemaField field;
-  final DocRouter.Range[] rangesArr; // same as ranges list, but an array for 
extra speed in inner loops
+  final DocRouter.Range[]
+      rangesArr; // same as ranges list, but an array for extra speed in inner 
loops

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/TransactionLog.java
##########
@@ -852,26 +877,31 @@ public Object next() throws IOException {
         // Position buffer so that this record is at the end.
         // For small records, this will cause subsequent calls to next() to be 
within the buffer.
         long seekPos = endOfThisRecord - fis.getBufferSize();
-        seekPos = Math.min(seekPos, prevPos); // seek to the start of the 
record if it's larger then the block size.
+        seekPos =
+            Math.min(
+                seekPos,
+                prevPos); // seek to the start of the record if it's larger 
then the block size.
         seekPos = Math.max(seekPos, 0);
         fis.seek(seekPos);
-        fis.peek();  // cause buffer to be filled
+        fis.peek(); // cause buffer to be filled
       }
 
       fis.seek(prevPos);
-      nextLength = fis.readInt();     // this is the length of the *next* 
record (i.e. closer to the beginning)
+      nextLength =
+          fis.readInt(); // this is the length of the *next* record (i.e. 
closer to the beginning)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -186,48 +198,53 @@ public String toString() {
   protected TransactionLog tlog;
   protected TransactionLog prevTlog;
   protected TransactionLog prevTlogOnPrecommit;
-  protected final Deque<TransactionLog> logs = new LinkedList<>();  // list of 
recent logs, newest first
+  protected final Deque<TransactionLog> logs =
+      new LinkedList<>(); // list of recent logs, newest first
   protected LinkedList<TransactionLog> newestLogsOnStartup = new 
LinkedList<>();
-  protected int numOldRecords;  // number of records in the recent logs
+  protected int numOldRecords; // number of records in the recent logs
 
-  protected Map<BytesRef,LogPtr> map = new HashMap<>();
-  protected Map<BytesRef,LogPtr> prevMap;  // used while committing/reopening 
is happening
-  protected Map<BytesRef,LogPtr> prevMap2;  // used while committing/reopening 
is happening
-  protected TransactionLog prevMapLog;  // the transaction log used to look up 
entries found in prevMap
-  protected TransactionLog prevMapLog2;  // the transaction log used to look 
up entries found in prevMap2
+  protected Map<BytesRef, LogPtr> map = new HashMap<>();
+  protected Map<BytesRef, LogPtr> prevMap; // used while committing/reopening 
is happening
+  protected Map<BytesRef, LogPtr> prevMap2; // used while committing/reopening 
is happening
+  protected TransactionLog
+      prevMapLog; // the transaction log used to look up entries found in 
prevMap
+  protected TransactionLog
+      prevMapLog2; // the transaction log used to look up entries found in 
prevMap2

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/TransactionLog.java
##########
@@ -852,26 +877,31 @@ public Object next() throws IOException {
         // Position buffer so that this record is at the end.
         // For small records, this will cause subsequent calls to next() to be 
within the buffer.
         long seekPos = endOfThisRecord - fis.getBufferSize();
-        seekPos = Math.min(seekPos, prevPos); // seek to the start of the 
record if it's larger then the block size.
+        seekPos =
+            Math.min(
+                seekPos,
+                prevPos); // seek to the start of the record if it's larger 
then the block size.
         seekPos = Math.max(seekPos, 0);
         fis.seek(seekPos);
-        fis.peek();  // cause buffer to be filled
+        fis.peek(); // cause buffer to be filled
       }
 
       fis.seek(prevPos);
-      nextLength = fis.readInt();     // this is the length of the *next* 
record (i.e. closer to the beginning)
+      nextLength =
+          fis.readInt(); // this is the length of the *next* record (i.e. 
closer to the beginning)
 
       // TODO: optionally skip document data
       Object o = codec.readVal(fis);
 
-      // assert fis.position() == prevPos + 4 + thisLength;  // this is only 
true if we read all the data (and we currently skip reading SolrInputDocument
+      // assert fis.position() == prevPos + 4 + thisLength;  // this is only 
true if we read all the
+      // data (and we currently skip reading SolrInputDocument

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/TransactionLog.java
##########
@@ -910,15 +945,17 @@ public void seek(long position) throws IOException {
         // seek within buffer
         pos = (int) (position - getBufferPos());
       } else {
-        // long currSize = ch.size();   // not needed - underlying read should 
handle (unless read never done)
-        // if (position > currSize) throw new EOFException("Read past EOF: 
seeking to " + position + " on file of size " + currSize + " file=" + ch);
+        // long currSize = ch.size();   // not needed - underlying read should 
handle (unless read
+        // never done)
+        // if (position > currSize) throw new EOFException("Read past EOF: 
seeking to " + position +
+        // " on file of size " + currSize + " file=" + ch);

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/AtomicUpdateProcessorFactory.java
##########
@@ -170,11 +173,17 @@ private void processAddWithRetry(AddUpdateCommand cmd, 
int attempts, SolrInputDo
           // if lastVersion is null then we put -1 to assert that document 
must not exist
           lastVersion = lastVersion == null ? -1 : lastVersion;
 
-          // The AtomicUpdateDocumentMerger modifies the 
AddUpdateCommand.solrDoc to populate the real values of the
-          // modified fields. We don't want those absolute values because they 
are out-of-date due to the conflict
-          // so we restore the original document created in processAdd method 
and set the right version on it
+          // The AtomicUpdateDocumentMerger modifies the 
AddUpdateCommand.solrDoc to populate the
+          // real values of the
+          // modified fields. We don't want those absolute values because they 
are out-of-date due
+          // to the conflict
+          // so we restore the original document created in processAdd method 
and set the right
+          // version on it

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/ClassificationUpdateProcessorParams.java
##########
@@ -30,7 +30,8 @@
 
   private int maxPredictedClasses; // the max number of classes to assign
 
-  private ClassificationUpdateProcessorFactory.Algorithm algorithm; // the 
Classification Algorithm to use - currently 'knn' or 'bayes'
+  private ClassificationUpdateProcessorFactory.Algorithm
+      algorithm; // the Classification Algorithm to use - currently 'knn' or 
'bayes'

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/AtomicUpdateDocumentMerger.java
##########
@@ -330,83 +342,108 @@ private static String getRouteField(AddUpdateCommand 
cmd) {
     }
     return result;
   }
-  
+
   /**
-   *
-   * @param fullDoc the full doc to  be compared against
+   * @param fullDoc the full doc to be compared against
    * @param partialDoc the sub document to be tested
    * @return whether partialDoc is derived from fullDoc
    */
   public static boolean isDerivedFromDoc(SolrInputDocument fullDoc, 
SolrInputDocument partialDoc) {
-    for(SolrInputField subSif: partialDoc) {
+    for (SolrInputField subSif : partialDoc) {
       Collection<Object> fieldValues = 
fullDoc.getFieldValues(subSif.getName());
       if (fieldValues == null) return false;
       if (fieldValues.size() < subSif.getValueCount()) return false;
       Collection<Object> partialFieldValues = subSif.getValues();
-      // filter all derived child docs from partial field values since they 
fail List#containsAll check (uses SolrInputDocument#equals which fails).
-      // If a child doc exists in partialDoc but not in full doc, it will not 
be filtered, and therefore List#containsAll will return false
-      Stream<Object> nonChildDocElements = 
partialFieldValues.stream().filter(x -> !(isChildDoc(x) &&
-          (fieldValues.stream().anyMatch(y ->
-              (isChildDoc(x) &&
-                  isDerivedFromDoc((SolrInputDocument) y, (SolrInputDocument) 
x)
-              )
-          )
-          )));
+      // filter all derived child docs from partial field values since they 
fail List#containsAll
+      // check (uses SolrInputDocument#equals which fails).
+      // If a child doc exists in partialDoc but not in full doc, it will not 
be filtered, and
+      // therefore List#containsAll will return false
+      Stream<Object> nonChildDocElements =
+          partialFieldValues.stream()
+              .filter(
+                  x ->
+                      !(isChildDoc(x)
+                          && (fieldValues.stream()
+                              .anyMatch(
+                                  y ->
+                                      (isChildDoc(x)
+                                          && isDerivedFromDoc(
+                                              (SolrInputDocument) y, 
(SolrInputDocument) x))))));
       if (!nonChildDocElements.allMatch(fieldValues::contains)) return false;
     }
     return true;
   }
 
   /**
-   * Given an AddUpdateCommand containing update operations (e.g. set, inc), 
merge and resolve the operations into
-   * a partial document that can be used for indexing the in-place updates. 
The AddUpdateCommand is modified to contain
-   * the partial document (instead of the original document which contained 
the update operations) and also
-   * the prevVersion that this in-place update depends on.
-   * Note: updatedFields passed into the method can be changed, i.e. the 
version field can be added to the set.
-   * @return If in-place update cannot succeed, e.g. if the old document is 
deleted recently, then false is returned. A false
-   *        return indicates that this update can be re-tried as a full atomic 
update. Returns true if the in-place update
-   *        succeeds.
+   * Given an AddUpdateCommand containing update operations (e.g. set, inc), 
merge and resolve the
+   * operations into a partial document that can be used for indexing the 
in-place updates. The
+   * AddUpdateCommand is modified to contain the partial document (instead of 
the original document
+   * which contained the update operations) and also the prevVersion that this 
in-place update
+   * depends on. Note: updatedFields passed into the method can be changed, 
i.e. the version field
+   * can be added to the set.
+   *
+   * @return If in-place update cannot succeed, e.g. if the old document is 
deleted recently, then
+   *     false is returned. A false return indicates that this update can be 
re-tried as a full
+   *     atomic update. Returns true if the in-place update succeeds.
    */
-  public boolean doInPlaceUpdateMerge(AddUpdateCommand cmd, Set<String> 
updatedFields) throws IOException {
+  public boolean doInPlaceUpdateMerge(AddUpdateCommand cmd, Set<String> 
updatedFields)
+      throws IOException {
     SolrInputDocument inputDoc = cmd.getSolrInputDocument();
     BytesRef rootIdBytes = cmd.getIndexedId();
     BytesRef idBytes = 
schema.indexableUniqueKey(cmd.getSelfOrNestedDocIdStr());
 
-    updatedFields.add(CommonParams.VERSION_FIELD); // add the version field so 
that it is fetched too
-    SolrInputDocument oldDocument = RealTimeGetComponent.getInputDocument
-      (cmd.getReq().getCore(), idBytes, rootIdBytes,
-          null, // don't want the version to be returned
-          updatedFields, RealTimeGetComponent.Resolution.DOC);
+    updatedFields.add(
+        CommonParams.VERSION_FIELD); // add the version field so that it is 
fetched too
+    SolrInputDocument oldDocument =
+        RealTimeGetComponent.getInputDocument(
+            cmd.getReq().getCore(),
+            idBytes,
+            rootIdBytes,
+            null, // don't want the version to be returned
+            updatedFields,
+            RealTimeGetComponent.Resolution.DOC);
 
     if (oldDocument == RealTimeGetComponent.DELETED || oldDocument == null) {
-      // This doc was deleted recently. In-place update cannot work, hence a 
full atomic update should be tried.
+      // This doc was deleted recently. In-place update cannot work, hence a 
full atomic update
+      // should be tried.
       return false;
     }
 
     if (oldDocument.containsKey(CommonParams.VERSION_FIELD) == false) {
-      throw new SolrException (ErrorCode.INVALID_STATE, "There is no _version_ 
in previous document. id=" + 
-          cmd.getPrintableId());
+      throw new SolrException(
+          ErrorCode.INVALID_STATE,
+          "There is no _version_ in previous document. id=" + 
cmd.getPrintableId());
     }
     Long oldVersion = (Long) 
oldDocument.remove(CommonParams.VERSION_FIELD).getValue();
 
-    // If the oldDocument contains any other field apart from updatedFields 
(or id/version field), then remove them.
-    // This can happen, despite requesting for these fields in the call to 
RTGC.getInputDocument, if the document was
-    // fetched from the tlog and had all these fields (possibly because it was 
a full document ADD operation).
+    // If the oldDocument contains any other field apart from updatedFields 
(or id/version field),
+    // then remove them.
+    // This can happen, despite requesting for these fields in the call to 
RTGC.getInputDocument, if
+    // the document was
+    // fetched from the tlog and had all these fields (possibly because it was 
a full document ADD
+    // operation).
     if (updatedFields != null) {
       Collection<String> names = new HashSet<>(oldDocument.getFieldNames());
-      for (String fieldName: names) {
-        if (fieldName.equals(CommonParams.VERSION_FIELD)==false && 
fieldName.equals(ID)==false && updatedFields.contains(fieldName)==false) {
+      for (String fieldName : names) {
+        if (fieldName.equals(CommonParams.VERSION_FIELD) == false
+            && fieldName.equals(ID) == false
+            && updatedFields.contains(fieldName) == false) {
           oldDocument.remove(fieldName);
         }
       }
     }
     // Copy over all supported DVs from oldDocument to partialDoc
     //
-    // Assuming multiple updates to the same doc: field 'dv1' in one update, 
then field 'dv2' in a second
-    // update, and then again 'dv1' in a third update (without commits in 
between), the last update would
-    // fetch from the tlog the partial doc for the 2nd (dv2) update. If that 
doc doesn't copy over the
-    // previous updates to dv1 as well, then a full resolution (by following 
previous pointers) would
-    // need to be done to calculate the dv1 value -- so instead copy all the 
potentially affected DV fields.
+    // Assuming multiple updates to the same doc: field 'dv1' in one update, 
then field 'dv2' in a
+    // second
+    // update, and then again 'dv1' in a third update (without commits in 
between), the last update
+    // would
+    // fetch from the tlog the partial doc for the 2nd (dv2) update. If that 
doc doesn't copy over
+    // the
+    // previous updates to dv1 as well, then a full resolution (by following 
previous pointers)
+    // would
+    // need to be done to calculate the dv1 value -- so instead copy all the 
potentially affected DV
+    // fields.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/AtomicUpdateProcessorFactory.java
##########
@@ -170,11 +173,17 @@ private void processAddWithRetry(AddUpdateCommand cmd, 
int attempts, SolrInputDo
           // if lastVersion is null then we put -1 to assert that document 
must not exist
           lastVersion = lastVersion == null ? -1 : lastVersion;
 
-          // The AtomicUpdateDocumentMerger modifies the 
AddUpdateCommand.solrDoc to populate the real values of the
-          // modified fields. We don't want those absolute values because they 
are out-of-date due to the conflict
-          // so we restore the original document created in processAdd method 
and set the right version on it
+          // The AtomicUpdateDocumentMerger modifies the 
AddUpdateCommand.solrDoc to populate the
+          // real values of the
+          // modified fields. We don't want those absolute values because they 
are out-of-date due
+          // to the conflict
+          // so we restore the original document created in processAdd method 
and set the right
+          // version on it
           cmd.solrDoc = clonedOriginalDoc;
-          clonedOriginalDoc = clonedOriginalDoc.deepCopy(); // copy again 
because the old cloned ref will be modified during processAdd
+          clonedOriginalDoc =
+              clonedOriginalDoc
+                  .deepCopy(); // copy again because the old cloned ref will 
be modified during
+          // processAdd

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
##########
@@ -914,31 +1008,36 @@ public void split(SplitIndexCommand cmd) throws 
IOException {
   }
 
   /**
-   * Calls either {@link IndexWriter#updateDocValues} or 
<code>IndexWriter#updateDocument</code>(s) as
-   * needed based on {@link AddUpdateCommand#isInPlaceUpdate}.
-   * <p>
-   * If the this is an UPDATE_INPLACE cmd, then all fields included in 
-   * {@link AddUpdateCommand#makeLuceneDocForInPlaceUpdate} must either be the 
uniqueKey field, or be DocValue
-   * only fields.
-   * </p>
+   * Calls either {@link IndexWriter#updateDocValues} or 
<code>IndexWriter#updateDocument</code>(s)
+   * as needed based on {@link AddUpdateCommand#isInPlaceUpdate}.
+   *
+   * <p>If the this is an UPDATE_INPLACE cmd, then all fields included in 
{@link
+   * AddUpdateCommand#makeLuceneDocForInPlaceUpdate} must either be the 
uniqueKey field, or be
+   * DocValue only fields.
    *
    * @param cmd - cmd apply to IndexWriter
    * @param writer - IndexWriter to use
    */
   private void updateDocOrDocValues(AddUpdateCommand cmd, IndexWriter writer) 
throws IOException {
-    assert idField != null; // this code path requires an idField in order to 
potentially replace a doc
+    assert idField
+        != null; // this code path requires an idField in order to potentially 
replace a doc

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -289,15 +295,17 @@ protected boolean versionAdd(AddUpdateCommand cmd) throws 
IOException {
 
     if (vinfo == null) {
       if (AtomicUpdateDocumentMerger.isAtomicUpdate(cmd)) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST,
             "Atomic document updates are not supported unless <updateLog/> is 
configured");
       } else {
         super.processAdd(cmd);
         return false;
       }
     }
 
-    // This is only the hash for the bucket, and must be based only on the 
uniqueKey (i.e. do not use a pluggable hash
+    // This is only the hash for the bucket, and must be based only on the 
uniqueKey (i.e. do not
+    // use a pluggable hash
     // here)

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java
##########
@@ -145,140 +139,173 @@
  *     &lt;str name="replacement"&gt;key_feat$1&lt;/str&gt;
  *   &lt;/str&gt;
  * &lt;/processor&gt;
- * 
+ *
  * &lt;!-- syntactic sugar syntax --&gt;
  * &lt;processor class="solr.processor.CloneFieldUpdateProcessorFactory"&gt;
  *   &lt;str name="pattern"&gt;^feat(.*)s$&lt;/str&gt;
  *   &lt;str name="replacement"&gt;key_feat$1&lt;/str&gt;
  * &lt;/processor&gt;
  * </pre>
  *
- * <p>
- * When cloning multiple fields (or a single multivalued field) into a single 
valued field, one of the 
- * {@link FieldValueSubsetUpdateProcessorFactory} implementations configured 
after the 
- * <code>CloneFieldUpdateProcessorFactory</code> can be useful to reduce the 
list of values down to a 
+ * <p>When cloning multiple fields (or a single multivalued field) into a 
single valued field, one
+ * of the {@link FieldValueSubsetUpdateProcessorFactory} implementations 
configured after the <code>
+ * CloneFieldUpdateProcessorFactory</code> can be useful to reduce the list of 
values down to a
  * single value.
- * </p>
- * 
+ *
  * @see FieldValueSubsetUpdateProcessorFactory
  * @since 4.0.0
  */
-public class CloneFieldUpdateProcessorFactory 
-  extends UpdateRequestProcessorFactory implements SolrCoreAware {
+public class CloneFieldUpdateProcessorFactory extends 
UpdateRequestProcessorFactory
+    implements SolrCoreAware {
 
   private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
+
   public static final String SOURCE_PARAM = "source";
   public static final String DEST_PARAM = "dest";
   public static final String PATTERN_PARAM = "pattern";
   public static final String REPLACEMENT_PARAM = "replacement";
 
   private SelectorParams srcInclusions = new SelectorParams();
-  private Collection<SelectorParams> srcExclusions 
-    = new ArrayList<>();
+  private Collection<SelectorParams> srcExclusions = new ArrayList<>();
 
   private FieldNameSelector srcSelector = null;
 
-  /** 
-   * If pattern is null, this this is a literal field name.  If pattern is 
non-null then this
-   * is a replacement string that may contain meta-characters (ie: capture 
group identifiers)
+  /**
+   * If pattern is null, this this is a literal field name. If pattern is 
non-null then this is a
+   * replacement string that may contain meta-characters (ie: capture group 
identifiers)
+   *
    * @see #pattern
    */
   private String dest = null;
-  /** @see #dest */
+  /**
+   * @see #dest
+   */
   private Pattern pattern = null;
 
   @SuppressWarnings("WeakerAccess")
   protected final FieldNameSelector getSourceSelector() {
     if (null != srcSelector) return srcSelector;
 
-    throw new SolrException(SERVER_ERROR, "selector was never initialized, "+
-                            " inform(SolrCore) never called???");
+    throw new SolrException(
+        SERVER_ERROR, "selector was never initialized, " + " inform(SolrCore) 
never called???");
   }
 
   @Override
   public void init(NamedList<?> args) {
 
     // high level (loose) check for which type of config we have.
-    // 
+    //
     // individual init methods do more strict syntax checking
-    if (0 <= args.indexOf(SOURCE_PARAM, 0) && 0 <= args.indexOf(DEST_PARAM, 0) 
) {
+    if (0 <= args.indexOf(SOURCE_PARAM, 0) && 0 <= args.indexOf(DEST_PARAM, 
0)) {
       initSourceSelectorSyntax(args);
     } else if (0 <= args.indexOf(PATTERN_PARAM, 0) && 0 <= 
args.indexOf(REPLACEMENT_PARAM, 0)) {
       initSimpleRegexReplacement(args);
     } else {
-      throw new SolrException(SERVER_ERROR, "A combination of either '" + 
SOURCE_PARAM + "' + '"+
-                              DEST_PARAM + "', or '" + REPLACEMENT_PARAM + "' 
+ '" +
-                              PATTERN_PARAM + "' init params are mandatory");
+      throw new SolrException(
+          SERVER_ERROR,
+          "A combination of either '"
+              + SOURCE_PARAM
+              + "' + '"
+              + DEST_PARAM
+              + "', or '"
+              + REPLACEMENT_PARAM
+              + "' + '"
+              + PATTERN_PARAM
+              + "' init params are mandatory");
     }
-    
+
     if (0 < args.size()) {
-      throw new SolrException(SERVER_ERROR,
-          "Unexpected init param(s): '" +
-              args.getName(0) + "'");
+      throw new SolrException(SERVER_ERROR, "Unexpected init param(s): '" + 
args.getName(0) + "'");
     }
 
     super.init(args);
   }
 
   /**
-   * init helper method that should only be called when we know for certain 
that both the 
-   * "source" and "dest" init params do <em>not</em> exist.
+   * init helper method that should only be called when we know for certain 
that both the "source"
+   * and "dest" init params do <em>not</em> exist.
    */
   private void initSimpleRegexReplacement(NamedList<?> args) {
-    // The syntactic sugar for the case where there is only one regex pattern 
for source and the same pattern
+    // The syntactic sugar for the case where there is only one regex pattern 
for source and the
+    // same pattern
     // is used for the destination pattern...

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java
##########
@@ -145,140 +139,173 @@
  *     &lt;str name="replacement"&gt;key_feat$1&lt;/str&gt;
  *   &lt;/str&gt;
  * &lt;/processor&gt;
- * 
+ *
  * &lt;!-- syntactic sugar syntax --&gt;
  * &lt;processor class="solr.processor.CloneFieldUpdateProcessorFactory"&gt;
  *   &lt;str name="pattern"&gt;^feat(.*)s$&lt;/str&gt;
  *   &lt;str name="replacement"&gt;key_feat$1&lt;/str&gt;
  * &lt;/processor&gt;
  * </pre>
  *
- * <p>
- * When cloning multiple fields (or a single multivalued field) into a single 
valued field, one of the 
- * {@link FieldValueSubsetUpdateProcessorFactory} implementations configured 
after the 
- * <code>CloneFieldUpdateProcessorFactory</code> can be useful to reduce the 
list of values down to a 
+ * <p>When cloning multiple fields (or a single multivalued field) into a 
single valued field, one
+ * of the {@link FieldValueSubsetUpdateProcessorFactory} implementations 
configured after the <code>
+ * CloneFieldUpdateProcessorFactory</code> can be useful to reduce the list of 
values down to a
  * single value.
- * </p>
- * 
+ *
  * @see FieldValueSubsetUpdateProcessorFactory
  * @since 4.0.0
  */
-public class CloneFieldUpdateProcessorFactory 
-  extends UpdateRequestProcessorFactory implements SolrCoreAware {
+public class CloneFieldUpdateProcessorFactory extends 
UpdateRequestProcessorFactory
+    implements SolrCoreAware {
 
   private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
+
   public static final String SOURCE_PARAM = "source";
   public static final String DEST_PARAM = "dest";
   public static final String PATTERN_PARAM = "pattern";
   public static final String REPLACEMENT_PARAM = "replacement";
 
   private SelectorParams srcInclusions = new SelectorParams();
-  private Collection<SelectorParams> srcExclusions 
-    = new ArrayList<>();
+  private Collection<SelectorParams> srcExclusions = new ArrayList<>();
 
   private FieldNameSelector srcSelector = null;
 
-  /** 
-   * If pattern is null, this this is a literal field name.  If pattern is 
non-null then this
-   * is a replacement string that may contain meta-characters (ie: capture 
group identifiers)
+  /**
+   * If pattern is null, this this is a literal field name. If pattern is 
non-null then this is a
+   * replacement string that may contain meta-characters (ie: capture group 
identifiers)
+   *
    * @see #pattern
    */
   private String dest = null;
-  /** @see #dest */
+  /**
+   * @see #dest
+   */
   private Pattern pattern = null;
 
   @SuppressWarnings("WeakerAccess")
   protected final FieldNameSelector getSourceSelector() {
     if (null != srcSelector) return srcSelector;
 
-    throw new SolrException(SERVER_ERROR, "selector was never initialized, "+
-                            " inform(SolrCore) never called???");
+    throw new SolrException(
+        SERVER_ERROR, "selector was never initialized, " + " inform(SolrCore) 
never called???");
   }
 
   @Override
   public void init(NamedList<?> args) {
 
     // high level (loose) check for which type of config we have.
-    // 
+    //
     // individual init methods do more strict syntax checking
-    if (0 <= args.indexOf(SOURCE_PARAM, 0) && 0 <= args.indexOf(DEST_PARAM, 0) 
) {
+    if (0 <= args.indexOf(SOURCE_PARAM, 0) && 0 <= args.indexOf(DEST_PARAM, 
0)) {
       initSourceSelectorSyntax(args);
     } else if (0 <= args.indexOf(PATTERN_PARAM, 0) && 0 <= 
args.indexOf(REPLACEMENT_PARAM, 0)) {
       initSimpleRegexReplacement(args);
     } else {
-      throw new SolrException(SERVER_ERROR, "A combination of either '" + 
SOURCE_PARAM + "' + '"+
-                              DEST_PARAM + "', or '" + REPLACEMENT_PARAM + "' 
+ '" +
-                              PATTERN_PARAM + "' init params are mandatory");
+      throw new SolrException(
+          SERVER_ERROR,
+          "A combination of either '"
+              + SOURCE_PARAM
+              + "' + '"
+              + DEST_PARAM
+              + "', or '"
+              + REPLACEMENT_PARAM
+              + "' + '"
+              + PATTERN_PARAM
+              + "' init params are mandatory");
     }
-    
+
     if (0 < args.size()) {
-      throw new SolrException(SERVER_ERROR,
-          "Unexpected init param(s): '" +
-              args.getName(0) + "'");
+      throw new SolrException(SERVER_ERROR, "Unexpected init param(s): '" + 
args.getName(0) + "'");
     }
 
     super.init(args);
   }
 
   /**
-   * init helper method that should only be called when we know for certain 
that both the 
-   * "source" and "dest" init params do <em>not</em> exist.
+   * init helper method that should only be called when we know for certain 
that both the "source"
+   * and "dest" init params do <em>not</em> exist.
    */
   private void initSimpleRegexReplacement(NamedList<?> args) {
-    // The syntactic sugar for the case where there is only one regex pattern 
for source and the same pattern
+    // The syntactic sugar for the case where there is only one regex pattern 
for source and the
+    // same pattern
     // is used for the destination pattern...
     //
     //  pattern != null && replacement != null
-    //    
+    //
     // ...as top level elements, with no other config options specified
-    
-    // if we got here we know we had pattern and replacement, now check for 
the other two  so that we can give a better
+
+    // if we got here we know we had pattern and replacement, now check for 
the other two  so that
+    // we can give a better
     // message than "unexpected"

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java
##########
@@ -288,16 +315,25 @@ private void initSourceSelectorSyntax(NamedList<?> args) {
     //
     //   source != null && dest != null
 
-    // if we got here we know we had source and dest, now check for the other 
two so that we can give a better
+    // if we got here we know we had source and dest, now check for the other 
two so that we can
+    // give a better
     // message than "unexpected"

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -373,27 +401,35 @@ public void init(UpdateHandler uhandler, SolrCore core) {
       throw new SolrException(ErrorCode.SERVER_ERROR, "Could not set up 
tlogs", e);
     }
     tlogFiles = getLogList(tlogDir.toFile());
-    id = getLastLogId() + 1;   // add 1 since we will create a new log for the 
next update
+    id = getLastLogId() + 1; // add 1 since we will create a new log for the 
next update
 
     if (debug) {
-      log.debug("UpdateHandler init: tlogDir={}, existing tlogs={}, next 
id={}", tlogDir, Arrays.asList(tlogFiles), id);
+      log.debug(
+          "UpdateHandler init: tlogDir={}, existing tlogs={}, next id={}",
+          tlogDir,
+          Arrays.asList(tlogFiles),
+          id);
     }
 
     final String prefix = BUFFER_TLOG_NAME + '.';
     try (Stream<Path> bufferedTLogs = Files.walk(tlogDir, 1)) {
-      existOldBufferLog = bufferedTLogs.anyMatch(path -> 
path.getFileName().toString().startsWith(prefix));
+      existOldBufferLog =
+          bufferedTLogs.anyMatch(path -> 
path.getFileName().toString().startsWith(prefix));
     } catch (IOException e) {
-      // Existance of buffered t-logs indicates previous recovery failed and 
lets us skip peer sync as an optimization
+      // Existance of buffered t-logs indicates previous recovery failed and 
lets us skip peer sync
+      // as an optimization
       // Failing to read them is non-fatal and almost not even worth logging 
about
-      log.debug("Could not read {} directory searching for buffered 
transaction log files.", tlogDir, e);
+      log.debug(
+          "Could not read {} directory searching for buffered transaction log 
files.", tlogDir, e);
       existOldBufferLog = false;
     }
     TransactionLog oldLog = null;
     for (String oldLogName : tlogFiles) {
       Path path = tlogDir.resolve(oldLogName);
       try {
         oldLog = newTransactionLog(path, null, true);
-        addOldLog(oldLog, false);  // don't remove old logs on startup since 
more than one may be uncapped.
+        addOldLog(
+            oldLog, false); // don't remove old logs on startup since more 
than one may be uncapped.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -875,47 +927,66 @@ public void postSoftCommit(CommitUpdateCommand cmd) {
       // record what old maps were created and only remove those.
 
       if (debug) {
-        SolrCore.verbose("TLOG: postSoftCommit: disposing of prevMap="+ 
System.identityHashCode(prevMap) + ", prevMap2=" + 
System.identityHashCode(prevMap2));
+        SolrCore.verbose(
+            "TLOG: postSoftCommit: disposing of prevMap="
+                + System.identityHashCode(prevMap)
+                + ", prevMap2="
+                + System.identityHashCode(prevMap2));
       }
       clearOldMaps();
-
     }
   }
 
   /**
-   * Goes over backwards, following the prevPointer, to merge all partial 
updates into the passed doc. Stops at either a full
-   * document, or if there are no previous entries to follow in the update log.
+   * Goes over backwards, following the prevPointer, to merge all partial 
updates into the passed
+   * doc. Stops at either a full document, or if there are no previous entries 
to follow in the
+   * update log.
    *
-   * @param id          Binary representation of the unique key field
-   * @param prevPointer Pointer to the previous entry in the ulog, based on 
which the current in-place update was made.
-   * @param prevVersion Version of the previous entry in the ulog, based on 
which the current in-place update was made.
-   * @param onlyTheseFields When a non-null set of field names is passed in, 
the resolve process only attempts to populate
-   *        the given fields in this set. When this set is null, it resolves 
all fields.
-   * @param latestPartialDoc   Partial document that is to be populated
-   * @return Returns 0 if a full document was found in the log, -1 if no full 
document was found. If full document was supposed
-   * to be found in the tlogs, but couldn't be found (because the logs were 
rotated) then the prevPointer is returned.
+   * @param id Binary representation of the unique key field
+   * @param prevPointer Pointer to the previous entry in the ulog, based on 
which the current
+   *     in-place update was made.
+   * @param prevVersion Version of the previous entry in the ulog, based on 
which the current
+   *     in-place update was made.
+   * @param onlyTheseFields When a non-null set of field names is passed in, 
the resolve process
+   *     only attempts to populate the given fields in this set. When this set 
is null, it resolves
+   *     all fields.
+   * @param latestPartialDoc Partial document that is to be populated
+   * @return Returns 0 if a full document was found in the log, -1 if no full 
document was found. If
+   *     full document was supposed to be found in the tlogs, but couldn't be 
found (because the
+   *     logs were rotated) then the prevPointer is returned.
    */
   @SuppressWarnings({"unchecked"})
-  synchronized public long applyPartialUpdates(BytesRef id, long prevPointer, 
long prevVersion,
-      Set<String> onlyTheseFields, SolrDocumentBase<?, ?> latestPartialDoc) {
-    
+  public synchronized long applyPartialUpdates(
+      BytesRef id,
+      long prevPointer,
+      long prevVersion,
+      Set<String> onlyTheseFields,
+      SolrDocumentBase<?, ?> latestPartialDoc) {
+
     SolrInputDocument partialUpdateDoc = null;
 
     List<TransactionLog> lookupLogs = Arrays.asList(tlog, prevMapLog, 
prevMapLog2);
     while (prevPointer >= 0) {
-      //go through each partial update and apply it on the incoming doc one 
after another
+      // go through each partial update and apply it on the incoming doc one 
after another
       List<?> entry;
       entry = getEntryFromTLog(prevPointer, prevVersion, lookupLogs);
       if (entry == null) {
-        return prevPointer; // a previous update was supposed to be found, but 
wasn't found (due to log rotation)
+        return prevPointer; // a previous update was supposed to be found, but 
wasn't found (due to
+        // log rotation)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -373,27 +401,35 @@ public void init(UpdateHandler uhandler, SolrCore core) {
       throw new SolrException(ErrorCode.SERVER_ERROR, "Could not set up 
tlogs", e);
     }
     tlogFiles = getLogList(tlogDir.toFile());
-    id = getLastLogId() + 1;   // add 1 since we will create a new log for the 
next update
+    id = getLastLogId() + 1; // add 1 since we will create a new log for the 
next update
 
     if (debug) {
-      log.debug("UpdateHandler init: tlogDir={}, existing tlogs={}, next 
id={}", tlogDir, Arrays.asList(tlogFiles), id);
+      log.debug(
+          "UpdateHandler init: tlogDir={}, existing tlogs={}, next id={}",
+          tlogDir,
+          Arrays.asList(tlogFiles),
+          id);
     }
 
     final String prefix = BUFFER_TLOG_NAME + '.';
     try (Stream<Path> bufferedTLogs = Files.walk(tlogDir, 1)) {
-      existOldBufferLog = bufferedTLogs.anyMatch(path -> 
path.getFileName().toString().startsWith(prefix));
+      existOldBufferLog =
+          bufferedTLogs.anyMatch(path -> 
path.getFileName().toString().startsWith(prefix));
     } catch (IOException e) {
-      // Existance of buffered t-logs indicates previous recovery failed and 
lets us skip peer sync as an optimization
+      // Existance of buffered t-logs indicates previous recovery failed and 
lets us skip peer sync
+      // as an optimization
       // Failing to read them is non-fatal and almost not even worth logging 
about

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -875,47 +927,66 @@ public void postSoftCommit(CommitUpdateCommand cmd) {
       // record what old maps were created and only remove those.
 
       if (debug) {
-        SolrCore.verbose("TLOG: postSoftCommit: disposing of prevMap="+ 
System.identityHashCode(prevMap) + ", prevMap2=" + 
System.identityHashCode(prevMap2));
+        SolrCore.verbose(
+            "TLOG: postSoftCommit: disposing of prevMap="
+                + System.identityHashCode(prevMap)
+                + ", prevMap2="
+                + System.identityHashCode(prevMap2));
       }
       clearOldMaps();
-
     }
   }
 
   /**
-   * Goes over backwards, following the prevPointer, to merge all partial 
updates into the passed doc. Stops at either a full
-   * document, or if there are no previous entries to follow in the update log.
+   * Goes over backwards, following the prevPointer, to merge all partial 
updates into the passed
+   * doc. Stops at either a full document, or if there are no previous entries 
to follow in the
+   * update log.
    *
-   * @param id          Binary representation of the unique key field
-   * @param prevPointer Pointer to the previous entry in the ulog, based on 
which the current in-place update was made.
-   * @param prevVersion Version of the previous entry in the ulog, based on 
which the current in-place update was made.
-   * @param onlyTheseFields When a non-null set of field names is passed in, 
the resolve process only attempts to populate
-   *        the given fields in this set. When this set is null, it resolves 
all fields.
-   * @param latestPartialDoc   Partial document that is to be populated
-   * @return Returns 0 if a full document was found in the log, -1 if no full 
document was found. If full document was supposed
-   * to be found in the tlogs, but couldn't be found (because the logs were 
rotated) then the prevPointer is returned.
+   * @param id Binary representation of the unique key field
+   * @param prevPointer Pointer to the previous entry in the ulog, based on 
which the current
+   *     in-place update was made.
+   * @param prevVersion Version of the previous entry in the ulog, based on 
which the current
+   *     in-place update was made.
+   * @param onlyTheseFields When a non-null set of field names is passed in, 
the resolve process
+   *     only attempts to populate the given fields in this set. When this set 
is null, it resolves
+   *     all fields.
+   * @param latestPartialDoc Partial document that is to be populated
+   * @return Returns 0 if a full document was found in the log, -1 if no full 
document was found. If
+   *     full document was supposed to be found in the tlogs, but couldn't be 
found (because the
+   *     logs were rotated) then the prevPointer is returned.
    */
   @SuppressWarnings({"unchecked"})
-  synchronized public long applyPartialUpdates(BytesRef id, long prevPointer, 
long prevVersion,
-      Set<String> onlyTheseFields, SolrDocumentBase<?, ?> latestPartialDoc) {
-    
+  public synchronized long applyPartialUpdates(
+      BytesRef id,
+      long prevPointer,
+      long prevVersion,
+      Set<String> onlyTheseFields,
+      SolrDocumentBase<?, ?> latestPartialDoc) {
+
     SolrInputDocument partialUpdateDoc = null;
 
     List<TransactionLog> lookupLogs = Arrays.asList(tlog, prevMapLog, 
prevMapLog2);
     while (prevPointer >= 0) {
-      //go through each partial update and apply it on the incoming doc one 
after another
+      // go through each partial update and apply it on the incoming doc one 
after another
       List<?> entry;
       entry = getEntryFromTLog(prevPointer, prevVersion, lookupLogs);
       if (entry == null) {
-        return prevPointer; // a previous update was supposed to be found, but 
wasn't found (due to log rotation)
+        return prevPointer; // a previous update was supposed to be found, but 
wasn't found (due to
+        // log rotation)
       }
       int flags = (int) entry.get(UpdateLog.FLAGS_IDX);
-      
-      // since updates can depend only upon ADD updates or other 
UPDATE_INPLACE updates, we assert that we aren't
+
+      // since updates can depend only upon ADD updates or other 
UPDATE_INPLACE updates, we assert
+      // that we aren't
       // getting something else

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/AtomicUpdateDocumentMerger.java
##########
@@ -330,83 +342,108 @@ private static String getRouteField(AddUpdateCommand 
cmd) {
     }
     return result;
   }
-  
+
   /**
-   *
-   * @param fullDoc the full doc to  be compared against
+   * @param fullDoc the full doc to be compared against
    * @param partialDoc the sub document to be tested
    * @return whether partialDoc is derived from fullDoc
    */
   public static boolean isDerivedFromDoc(SolrInputDocument fullDoc, 
SolrInputDocument partialDoc) {
-    for(SolrInputField subSif: partialDoc) {
+    for (SolrInputField subSif : partialDoc) {
       Collection<Object> fieldValues = 
fullDoc.getFieldValues(subSif.getName());
       if (fieldValues == null) return false;
       if (fieldValues.size() < subSif.getValueCount()) return false;
       Collection<Object> partialFieldValues = subSif.getValues();
-      // filter all derived child docs from partial field values since they 
fail List#containsAll check (uses SolrInputDocument#equals which fails).
-      // If a child doc exists in partialDoc but not in full doc, it will not 
be filtered, and therefore List#containsAll will return false
-      Stream<Object> nonChildDocElements = 
partialFieldValues.stream().filter(x -> !(isChildDoc(x) &&
-          (fieldValues.stream().anyMatch(y ->
-              (isChildDoc(x) &&
-                  isDerivedFromDoc((SolrInputDocument) y, (SolrInputDocument) 
x)
-              )
-          )
-          )));
+      // filter all derived child docs from partial field values since they 
fail List#containsAll
+      // check (uses SolrInputDocument#equals which fails).
+      // If a child doc exists in partialDoc but not in full doc, it will not 
be filtered, and
+      // therefore List#containsAll will return false

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/AtomicUpdateDocumentMerger.java
##########
@@ -330,83 +342,108 @@ private static String getRouteField(AddUpdateCommand 
cmd) {
     }
     return result;
   }
-  
+
   /**
-   *
-   * @param fullDoc the full doc to  be compared against
+   * @param fullDoc the full doc to be compared against
    * @param partialDoc the sub document to be tested
    * @return whether partialDoc is derived from fullDoc
    */
   public static boolean isDerivedFromDoc(SolrInputDocument fullDoc, 
SolrInputDocument partialDoc) {
-    for(SolrInputField subSif: partialDoc) {
+    for (SolrInputField subSif : partialDoc) {
       Collection<Object> fieldValues = 
fullDoc.getFieldValues(subSif.getName());
       if (fieldValues == null) return false;
       if (fieldValues.size() < subSif.getValueCount()) return false;
       Collection<Object> partialFieldValues = subSif.getValues();
-      // filter all derived child docs from partial field values since they 
fail List#containsAll check (uses SolrInputDocument#equals which fails).
-      // If a child doc exists in partialDoc but not in full doc, it will not 
be filtered, and therefore List#containsAll will return false
-      Stream<Object> nonChildDocElements = 
partialFieldValues.stream().filter(x -> !(isChildDoc(x) &&
-          (fieldValues.stream().anyMatch(y ->
-              (isChildDoc(x) &&
-                  isDerivedFromDoc((SolrInputDocument) y, (SolrInputDocument) 
x)
-              )
-          )
-          )));
+      // filter all derived child docs from partial field values since they 
fail List#containsAll
+      // check (uses SolrInputDocument#equals which fails).
+      // If a child doc exists in partialDoc but not in full doc, it will not 
be filtered, and
+      // therefore List#containsAll will return false
+      Stream<Object> nonChildDocElements =
+          partialFieldValues.stream()
+              .filter(
+                  x ->
+                      !(isChildDoc(x)
+                          && (fieldValues.stream()
+                              .anyMatch(
+                                  y ->
+                                      (isChildDoc(x)
+                                          && isDerivedFromDoc(
+                                              (SolrInputDocument) y, 
(SolrInputDocument) x))))));
       if (!nonChildDocElements.allMatch(fieldValues::contains)) return false;
     }
     return true;
   }
 
   /**
-   * Given an AddUpdateCommand containing update operations (e.g. set, inc), 
merge and resolve the operations into
-   * a partial document that can be used for indexing the in-place updates. 
The AddUpdateCommand is modified to contain
-   * the partial document (instead of the original document which contained 
the update operations) and also
-   * the prevVersion that this in-place update depends on.
-   * Note: updatedFields passed into the method can be changed, i.e. the 
version field can be added to the set.
-   * @return If in-place update cannot succeed, e.g. if the old document is 
deleted recently, then false is returned. A false
-   *        return indicates that this update can be re-tried as a full atomic 
update. Returns true if the in-place update
-   *        succeeds.
+   * Given an AddUpdateCommand containing update operations (e.g. set, inc), 
merge and resolve the
+   * operations into a partial document that can be used for indexing the 
in-place updates. The
+   * AddUpdateCommand is modified to contain the partial document (instead of 
the original document
+   * which contained the update operations) and also the prevVersion that this 
in-place update
+   * depends on. Note: updatedFields passed into the method can be changed, 
i.e. the version field
+   * can be added to the set.
+   *
+   * @return If in-place update cannot succeed, e.g. if the old document is 
deleted recently, then
+   *     false is returned. A false return indicates that this update can be 
re-tried as a full
+   *     atomic update. Returns true if the in-place update succeeds.
    */
-  public boolean doInPlaceUpdateMerge(AddUpdateCommand cmd, Set<String> 
updatedFields) throws IOException {
+  public boolean doInPlaceUpdateMerge(AddUpdateCommand cmd, Set<String> 
updatedFields)
+      throws IOException {
     SolrInputDocument inputDoc = cmd.getSolrInputDocument();
     BytesRef rootIdBytes = cmd.getIndexedId();
     BytesRef idBytes = 
schema.indexableUniqueKey(cmd.getSelfOrNestedDocIdStr());
 
-    updatedFields.add(CommonParams.VERSION_FIELD); // add the version field so 
that it is fetched too
-    SolrInputDocument oldDocument = RealTimeGetComponent.getInputDocument
-      (cmd.getReq().getCore(), idBytes, rootIdBytes,
-          null, // don't want the version to be returned
-          updatedFields, RealTimeGetComponent.Resolution.DOC);
+    updatedFields.add(
+        CommonParams.VERSION_FIELD); // add the version field so that it is 
fetched too
+    SolrInputDocument oldDocument =
+        RealTimeGetComponent.getInputDocument(
+            cmd.getReq().getCore(),
+            idBytes,
+            rootIdBytes,
+            null, // don't want the version to be returned
+            updatedFields,
+            RealTimeGetComponent.Resolution.DOC);
 
     if (oldDocument == RealTimeGetComponent.DELETED || oldDocument == null) {
-      // This doc was deleted recently. In-place update cannot work, hence a 
full atomic update should be tried.
+      // This doc was deleted recently. In-place update cannot work, hence a 
full atomic update
+      // should be tried.
       return false;
     }
 
     if (oldDocument.containsKey(CommonParams.VERSION_FIELD) == false) {
-      throw new SolrException (ErrorCode.INVALID_STATE, "There is no _version_ 
in previous document. id=" + 
-          cmd.getPrintableId());
+      throw new SolrException(
+          ErrorCode.INVALID_STATE,
+          "There is no _version_ in previous document. id=" + 
cmd.getPrintableId());
     }
     Long oldVersion = (Long) 
oldDocument.remove(CommonParams.VERSION_FIELD).getValue();
 
-    // If the oldDocument contains any other field apart from updatedFields 
(or id/version field), then remove them.
-    // This can happen, despite requesting for these fields in the call to 
RTGC.getInputDocument, if the document was
-    // fetched from the tlog and had all these fields (possibly because it was 
a full document ADD operation).
+    // If the oldDocument contains any other field apart from updatedFields 
(or id/version field),
+    // then remove them.
+    // This can happen, despite requesting for these fields in the call to 
RTGC.getInputDocument, if
+    // the document was
+    // fetched from the tlog and had all these fields (possibly because it was 
a full document ADD
+    // operation).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -1982,7 +2106,9 @@ public void doReplay(TransactionLog translog) {
         cmd.setFlags(UpdateCommand.REPLAY);
         try {
           if (debug) log.debug("commit {}", cmd);
-          uhandler.commit(cmd);          // this should cause a commit to be 
added to the incomplete log and avoid it being replayed again after a restart.
+          uhandler.commit(
+              cmd); // this should cause a commit to be added to the 
incomplete log and avoid it
+          // being replayed again after a restart.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/update/UpdateLog.java
##########
@@ -1612,10 +1718,10 @@ public RecentUpdates getRecentUpdates() {
       }
     }
 
-    // TODO: what if I hand out a list of updates, then do an update, then 
hand out another list (and
+    // TODO: what if I hand out a list of updates, then do an update, then 
hand out another list
+    // (and
     // one of the updates I originally handed out fell off the list).  
Over-request?

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -312,25 +320,29 @@ protected boolean versionAdd(AddUpdateCommand cmd) throws 
IOException {
       SolrInputField versionField = 
cmd.getSolrInputDocument().getField(CommonParams.VERSION_FIELD);
       if (versionField != null) {
         Object o = versionField.getValue();
-        versionOnUpdate = o instanceof Number ? ((Number) o).longValue() : 
Long.parseLong(o.toString());
+        versionOnUpdate =
+            o instanceof Number ? ((Number) o).longValue() : 
Long.parseLong(o.toString());
       } else {
         // Find the version
         String versionOnUpdateS = 
req.getParams().get(CommonParams.VERSION_FIELD);
         versionOnUpdate = versionOnUpdateS == null ? 0 : 
Long.parseLong(versionOnUpdateS);
       }
     }
 
-    boolean isReplayOrPeersync = (cmd.getFlags() & (UpdateCommand.REPLAY | 
UpdateCommand.PEER_SYNC)) != 0;
+    boolean isReplayOrPeersync =
+        (cmd.getFlags() & (UpdateCommand.REPLAY | UpdateCommand.PEER_SYNC)) != 
0;
     boolean leaderLogic = isLeader && !isReplayOrPeersync;
     boolean forwardedFromCollection = 
cmd.getReq().getParams().get(DISTRIB_FROM_COLLECTION) != null;
 
     VersionBucket bucket = vinfo.bucket(bucketHash);
 
     long dependentVersionFound = -1;
-    // if this is an in-place update, check and wait if we should be waiting 
for a previous update (on which
+    // if this is an in-place update, check and wait if we should be waiting 
for a previous update
+    // (on which
     // this update depends), before entering the synchronized block

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -394,17 +424,26 @@ private boolean doVersionAdd(AddUpdateCommand cmd, long 
versionOnUpdate, boolean
           if (versionOnUpdate != 0) {
             Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
             long foundVersion = lastVersion == null ? -1 : lastVersion;
-            if (versionOnUpdate == foundVersion || (versionOnUpdate < 0 && 
foundVersion < 0)
+            if (versionOnUpdate == foundVersion
+                || (versionOnUpdate < 0 && foundVersion < 0)
                 || (versionOnUpdate == 1 && foundVersion > 0)) {
-              // we're ok if versions match, or if both are negative (all 
missing docs are equal), or if cmd
+              // we're ok if versions match, or if both are negative (all 
missing docs are equal),
+              // or if cmd
               // specified it must exist (versionOnUpdate==1) and it does.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -427,30 +466,37 @@ private boolean doVersionAdd(AddUpdateCommand cmd, long 
versionOnUpdate, boolean
             long prev = cmd.prevVersion;
             Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
             if (lastVersion == null || Math.abs(lastVersion) < prev) {
-              // this was checked for (in waitForDependentUpdates()) before 
entering the synchronized block.
+              // this was checked for (in waitForDependentUpdates()) before 
entering the
+              // synchronized block.
               // So we shouldn't be here, unless what must've happened is:
-              // by the time synchronization block was entered, the prev 
update was deleted by DBQ. Since
-              // now that update is not in index, the vinfo.lookupVersion() is 
possibly giving us a version
+              // by the time synchronization block was entered, the prev 
update was deleted by DBQ.
+              // Since
+              // now that update is not in index, the vinfo.lookupVersion() is 
possibly giving us a
+              // version
               // from the deleted list (which might be older than the prev 
update!)
               UpdateCommand fetchedFromLeader = fetchFullUpdateFromLeader(cmd, 
versionOnUpdate);
 
               if (fetchedFromLeader instanceof DeleteUpdateCommand) {
                 if (log.isInfoEnabled()) {
-                  log.info("In-place update of {} failed to find valid 
lastVersion to apply to, and the document was deleted at the leader 
subsequently."
-                      , idBytes.utf8ToString());
+                  log.info(
+                      "In-place update of {} failed to find valid lastVersion 
to apply to, and the document was deleted at the leader subsequently.",
+                      idBytes.utf8ToString());
                 }
                 versionDelete((DeleteUpdateCommand) fetchedFromLeader);
                 return true;
               } else {
                 assert fetchedFromLeader instanceof AddUpdateCommand;
-                // Newer document was fetched from the leader. Apply that 
document instead of this current in-place
+                // Newer document was fetched from the leader. Apply that 
document instead of this
+                // current in-place
                 // update.
                 if (log.isInfoEnabled()) {
                   log.info(
                       "In-place update of {} failed to find valid lastVersion 
to apply to, forced to fetch full doc from leader: {}",
-                      idBytes.utf8ToString(), fetchedFromLeader);
+                      idBytes.utf8ToString(),
+                      fetchedFromLeader);
                 }
-                // Make this update to become a non-inplace update containing 
the full document obtained from the
+                // Make this update to become a non-inplace update containing 
the full document
+                // obtained from the
                 // leader

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -427,30 +466,37 @@ private boolean doVersionAdd(AddUpdateCommand cmd, long 
versionOnUpdate, boolean
             long prev = cmd.prevVersion;
             Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
             if (lastVersion == null || Math.abs(lastVersion) < prev) {
-              // this was checked for (in waitForDependentUpdates()) before 
entering the synchronized block.
+              // this was checked for (in waitForDependentUpdates()) before 
entering the
+              // synchronized block.
               // So we shouldn't be here, unless what must've happened is:
-              // by the time synchronization block was entered, the prev 
update was deleted by DBQ. Since
-              // now that update is not in index, the vinfo.lookupVersion() is 
possibly giving us a version
+              // by the time synchronization block was entered, the prev 
update was deleted by DBQ.
+              // Since
+              // now that update is not in index, the vinfo.lookupVersion() is 
possibly giving us a
+              // version

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -427,30 +466,37 @@ private boolean doVersionAdd(AddUpdateCommand cmd, long 
versionOnUpdate, boolean
             long prev = cmd.prevVersion;
             Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
             if (lastVersion == null || Math.abs(lastVersion) < prev) {
-              // this was checked for (in waitForDependentUpdates()) before 
entering the synchronized block.
+              // this was checked for (in waitForDependentUpdates()) before 
entering the
+              // synchronized block.
               // So we shouldn't be here, unless what must've happened is:
-              // by the time synchronization block was entered, the prev 
update was deleted by DBQ. Since
-              // now that update is not in index, the vinfo.lookupVersion() is 
possibly giving us a version
+              // by the time synchronization block was entered, the prev 
update was deleted by DBQ.
+              // Since
+              // now that update is not in index, the vinfo.lookupVersion() is 
possibly giving us a
+              // version
               // from the deleted list (which might be older than the prev 
update!)
               UpdateCommand fetchedFromLeader = fetchFullUpdateFromLeader(cmd, 
versionOnUpdate);
 
               if (fetchedFromLeader instanceof DeleteUpdateCommand) {
                 if (log.isInfoEnabled()) {
-                  log.info("In-place update of {} failed to find valid 
lastVersion to apply to, and the document was deleted at the leader 
subsequently."
-                      , idBytes.utf8ToString());
+                  log.info(
+                      "In-place update of {} failed to find valid lastVersion 
to apply to, and the document was deleted at the leader subsequently.",
+                      idBytes.utf8ToString());
                 }
                 versionDelete((DeleteUpdateCommand) fetchedFromLeader);
                 return true;
               } else {
                 assert fetchedFromLeader instanceof AddUpdateCommand;
-                // Newer document was fetched from the leader. Apply that 
document instead of this current in-place
+                // Newer document was fetched from the leader. Apply that 
document instead of this
+                // current in-place
                 // update.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -511,52 +562,66 @@ private boolean doVersionAdd(AddUpdateCommand cmd, long 
versionOnUpdate, boolean
   }
 
   /**
-   *
    * @return whether cmd doc should be cloned before localAdd
    */
   protected boolean shouldCloneCmdDoc() {
     return false;
   }
 
   @VisibleForTesting
-  boolean shouldBufferUpdate(AddUpdateCommand cmd, boolean isReplayOrPeersync, 
UpdateLog.State state) {
+  boolean shouldBufferUpdate(
+      AddUpdateCommand cmd, boolean isReplayOrPeersync, UpdateLog.State state) 
{
     if (state == UpdateLog.State.APPLYING_BUFFERED
         && !isReplayOrPeersync
         && !cmd.isInPlaceUpdate()) {
-      // this a new update sent from the leader, it contains whole document 
therefore it won't depend on other updates
+      // this a new update sent from the leader, it contains whole document 
therefore it won't
+      // depend on other updates
       return false;
     }
 
     return state != UpdateLog.State.ACTIVE && isReplayOrPeersync == false;
   }
 
   /**
-   * This method checks the update/transaction logs and index to find out if 
the update ("previous update") that the current update
-   * depends on (in the case that this current update is an in-place update) 
has already been completed. If not,
-   * this method will wait for the missing update until it has arrived. If it 
doesn't arrive within a timeout threshold,
-   * then this actively fetches from the leader.
-   * 
-   * @return -1 if the current in-place should be dropped, or last found 
version if previous update has been indexed.
+   * This method checks the update/transaction logs and index to find out if 
the update ("previous
+   * update") that the current update depends on (in the case that this 
current update is an
+   * in-place update) has already been completed. If not, this method will 
wait for the missing
+   * update until it has arrived. If it doesn't arrive within a timeout 
threshold, then this
+   * actively fetches from the leader.
+   *
+   * @return -1 if the current in-place should be dropped, or last found 
version if previous update
+   *     has been indexed.
    */
-  private long waitForDependentUpdates(AddUpdateCommand cmd, long 
versionOnUpdate,
-                               boolean isReplayOrPeersync, VersionBucket 
bucket) throws IOException {
+  private long waitForDependentUpdates(
+      AddUpdateCommand cmd, long versionOnUpdate, boolean isReplayOrPeersync, 
VersionBucket bucket)
+      throws IOException {
     long lastFoundVersion = 0;
     TimeOut waitTimeout = new TimeOut(5, TimeUnit.SECONDS, 
TimeSource.NANO_TIME);
 
     vinfo.lockForUpdate();
     try {
-      lastFoundVersion = 
bucket.runWithLock(vinfo.getVersionBucketLockTimeoutMs(), () -> 
doWaitForDependentUpdates(cmd, versionOnUpdate, isReplayOrPeersync, bucket, 
waitTimeout));
+      lastFoundVersion =
+          bucket.runWithLock(
+              vinfo.getVersionBucketLockTimeoutMs(),
+              () ->
+                  doWaitForDependentUpdates(
+                      cmd, versionOnUpdate, isReplayOrPeersync, bucket, 
waitTimeout));
     } finally {
       vinfo.unlockForUpdate();
     }
 
     if (Math.abs(lastFoundVersion) > cmd.prevVersion) {
-      // This must've been the case due to a higher version full update 
succeeding concurrently, while we were waiting or
-      // trying to index this partial update. Since a full update more recent 
than this partial update has succeeded,
+      // This must've been the case due to a higher version full update 
succeeding concurrently,
+      // while we were waiting or
+      // trying to index this partial update. Since a full update more recent 
than this partial
+      // update has succeeded,
       // we can drop the current update.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -459,10 +505,13 @@ private boolean doVersionAdd(AddUpdateCommand cmd, long 
versionOnUpdate, boolean
               }
             } else {
               if (lastVersion != null && Math.abs(lastVersion) > prev) {
-                // this means we got a newer full doc update and in that case 
it makes no sense to apply the older
+                // this means we got a newer full doc update and in that case 
it makes no sense to
+                // apply the older
                 // inplace update. Drop this update

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -949,7 +1067,8 @@ protected boolean versionDelete(DeleteUpdateCommand cmd) 
throws IOException {
       return false;
     }
 
-    // This is only the hash for the bucket, and must be based only on the 
uniqueKey (i.e. do not use a pluggable hash
+    // This is only the hash for the bucket, and must be based only on the 
uniqueKey (i.e. do not
+    // use a pluggable hash
     // here)

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -744,17 +839,20 @@ public void processDelete(DeleteUpdateCommand cmd) throws 
IOException {
     }
   }
 
-  // Implementing min_rf here was a bit tricky. When a request comes in for a 
delete by id to a replica that does _not_
-  // have any documents specified by those IDs, the request is not forwarded 
to any other replicas on that shard. Thus
-  // we have to spoof the replicationTracker and set the achieved rf to the 
number of active replicas.
+  // Implementing min_rf here was a bit tricky. When a request comes in for a 
delete by id to a
+  // replica that does _not_
+  // have any documents specified by those IDs, the request is not forwarded 
to any other replicas
+  // on that shard. Thus
+  // we have to spoof the replicationTracker and set the achieved rf to the 
number of active
+  // replicas.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -1201,35 +1352,43 @@ private static String buildMsg(List<Error> errors) {
     }
   }
 
-
   //    Keeps track of the replication factor achieved for a distributed 
update request
-  //    originated in this distributed update processor. A 
RollupReplicationTracker is the only tracker that will
+  //    originated in this distributed update processor. A 
RollupReplicationTracker is the only
+  // tracker that will
   //    persist across sub-requests.
   //
-  //   Note that the replica that receives the original request has the only 
RollupReplicationTracker that exists for the
-  //   lifetime of the batch. The leader for each shard keeps track of its own 
achieved replication for its shard
-  //   and attaches that to the response to the originating node (i.e. the one 
with the RollupReplicationTracker).
-  //   Followers in general do not need a tracker of any sort with the sole 
exception of the RollupReplicationTracker
+  //   Note that the replica that receives the original request has the only
+  // RollupReplicationTracker that exists for the
+  //   lifetime of the batch. The leader for each shard keeps track of its own 
achieved replication
+  // for its shard
+  //   and attaches that to the response to the originating node (i.e. the one 
with the

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -877,7 +991,8 @@ protected void versionDeleteByQuery(DeleteUpdateCommand 
cmd) throws IOException
       doLocalDeleteByQuery(cmd, versionOnUpdate, isReplayOrPeersync);
 
       // since we don't know which documents were deleted, the easiest thing 
to do is to invalidate
-      // all real-time caches (i.e. UpdateLog) which involves also getting a 
new version of the IndexReader
+      // all real-time caches (i.e. UpdateLog) which involves also getting a 
new version of the
+      // IndexReader
       // (so cache misses will see up-to-date data)

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -1019,13 +1156,21 @@ private boolean doVersionDelete(DeleteUpdateCommand 
cmd, long versionOnUpdate, l
           if (signedVersionOnUpdate != 0) {
             Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
             long foundVersion = lastVersion == null ? -1 : lastVersion;
-            if ((signedVersionOnUpdate == foundVersion) || 
(signedVersionOnUpdate < 0 && foundVersion < 0)
+            if ((signedVersionOnUpdate == foundVersion)
+                || (signedVersionOnUpdate < 0 && foundVersion < 0)
                 || (signedVersionOnUpdate == 1 && foundVersion > 0)) {
-              // we're ok if versions match, or if both are negative (all 
missing docs are equal), or if cmd
+              // we're ok if versions match, or if both are negative (all 
missing docs are equal),
+              // or if cmd
               // specified it must exist (versionOnUpdate==1) and it does.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -769,28 +867,34 @@ protected void doDeleteById(DeleteUpdateCommand cmd) 
throws IOException {
     if (returnVersions && rsp != null && cmd.getIndexedId() != null && idField 
!= null) {
       if (deleteResponse == null) {
         deleteResponse = new NamedList<>(1);
-        rsp.add("deletes",deleteResponse);
+        rsp.add("deletes", deleteResponse);
       }
       if (scratch == null) scratch = new CharsRefBuilder();
       idField.getType().indexedToReadable(cmd.getIndexedId(), scratch);
-      deleteResponse.add(scratch.toString(), cmd.getVersion());  // we're 
returning the version of the delete.. not the version of the doc we deleted.
+      deleteResponse.add(
+          scratch.toString(),
+          cmd.getVersion()); // we're returning the version of the delete.. 
not the version of the
+      // doc we deleted.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -86,14 +85,18 @@
   private boolean readOnlyCollection = false;
   private boolean broadcastDeleteById = false;
 
-  // The cached immutable clusterState for the update... usually refreshed for 
each individual update.
-  // Different parts of this class used to request current clusterState views, 
which lead to subtle bugs and race conditions
-  // such as SOLR-13815 (live split data loss.)  Most likely, the only valid 
reasons for updating clusterState should be on
+  // The cached immutable clusterState for the update... usually refreshed for 
each individual
+  // update.
+  // Different parts of this class used to request current clusterState views, 
which lead to subtle
+  // bugs and race conditions
+  // such as SOLR-13815 (live split data loss.)  Most likely, the only valid 
reasons for updating

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -1244,25 +1403,29 @@ public void testAndSetAchievedRf(int rf) {
     }
 
     public String toString() {
-      StringBuilder sb = new StringBuilder("RollupRequestReplicationTracker")
-          .append(" achievedRf: ")
-          .append(achievedRf);
+      StringBuilder sb =
+          new StringBuilder("RollupRequestReplicationTracker")
+              .append(" achievedRf: ")
+              .append(achievedRf);
       return sb.toString();
     }
   }
 
-
-  // Allocate a LeaderRequestReplicatinTracker if (and only if) we're a 
leader. If the request comes in to the leader
+  // Allocate a LeaderRequestReplicatinTracker if (and only if) we're a 
leader. If the request comes
+  // in to the leader
   // at first, allocate both one of these and a 
RollupRequestReplicationTracker.
   //
-  // Since these are leader-only, all they really have to do is track the 
individual update request for this shard
-  // and return it to be added to the rollup tracker. Which is kind of simple 
since we get an onSuccess method in
+  // Since these are leader-only, all they really have to do is track the 
individual update request
+  // for this shard
+  // and return it to be added to the rollup tracker. Which is kind of simple 
since we get an
+  // onSuccess method in

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
##########
@@ -1244,25 +1403,29 @@ public void testAndSetAchievedRf(int rf) {
     }
 
     public String toString() {
-      StringBuilder sb = new StringBuilder("RollupRequestReplicationTracker")
-          .append(" achievedRf: ")
-          .append(achievedRf);
+      StringBuilder sb =
+          new StringBuilder("RollupRequestReplicationTracker")
+              .append(" achievedRf: ")
+              .append(achievedRf);
       return sb.toString();
     }
   }
 
-
-  // Allocate a LeaderRequestReplicatinTracker if (and only if) we're a 
leader. If the request comes in to the leader
+  // Allocate a LeaderRequestReplicatinTracker if (and only if) we're a 
leader. If the request comes
+  // in to the leader
   // at first, allocate both one of these and a 
RollupRequestReplicationTracker.
   //
-  // Since these are leader-only, all they really have to do is track the 
individual update request for this shard
-  // and return it to be added to the rollup tracker. Which is kind of simple 
since we get an onSuccess method in
+  // Since these are leader-only, all they really have to do is track the 
individual update request
+  // for this shard
+  // and return it to be added to the rollup tracker. Which is kind of simple 
since we get an
+  // onSuccess method in
   // SolrCmdDistributor
 
   public static class LeaderRequestReplicationTracker {
     private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-    // Since we only allocate one of these on the leader and, by definition, 
the leader has been found and is running,
+    // Since we only allocate one of these on the leader and, by definition, 
the leader has been
+    // found and is running,
     // we have a replication factor of one by default.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -203,17 +217,20 @@ public void processCommit(CommitUpdateCommand cmd) throws 
IOException {
         params.set(DISTRIB_UPDATE_PARAM, DistribPhase.TOLEADER.toString());
         params.set(COMMIT_END_POINT, "leaders");
         if (useNodes != null) {
-          params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(
-              zkController.getBaseUrl(), req.getCore().getName()));
+          params.set(
+              DISTRIB_FROM,
+              ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), 
req.getCore().getName()));
           cmdDistrib.distribCommit(cmd, useNodes, params);
           issuedDistribCommit = true;
         }
       }
 
       if (isLeader) {
         if (issuedDistribCommit) {
-          // defensive copy of params, which was passed into 
distribCommit(...) above; will unconditionally replace
-          // DISTRIB_UPDATE_PARAM, COMMIT_END_POINT, and DISTRIB_FROM if the 
new `params` val will actually be used
+          // defensive copy of params, which was passed into 
distribCommit(...) above; will
+          // unconditionally replace
+          // DISTRIB_UPDATE_PARAM, COMMIT_END_POINT, and DISTRIB_FROM if the 
new `params` val will
+          // actually be used

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -223,22 +240,29 @@ public void processCommit(CommitUpdateCommand cmd) throws 
IOException {
         useNodes = getReplicaNodesForLeader(cloudDesc.getShardId(), 
leaderReplica);
 
         if (useNodes != null) {
-          params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(
-              zkController.getBaseUrl(), req.getCore().getName()));
+          params.set(
+              DISTRIB_FROM,
+              ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), 
req.getCore().getName()));
 
-          // NOTE: distribCommit(...) internally calls `blockAndDoRetries()`, 
flushing any TOLEADER distrib commits
+          // NOTE: distribCommit(...) internally calls `blockAndDoRetries()`, 
flushing any TOLEADER
+          // distrib commits
           cmdDistrib.distribCommit(cmd, useNodes, params);
           issuedDistribCommit = true;
         }
 
         doLocalCommit(cmd);
       }
       if (issuedDistribCommit) {
-        // TODO: according to discussion on SOLR-15045, this call (and all 
tracking of `issuedDistribCommit`) may
-        //  well be superfluous, and can probably simply be removed. It is 
left in place for now, intentionally
-        //  punting on the question of whether this internal 
`blockAndDoRetries()` is necessary. At worst, its
-        //  presence is misleading; but it should be harmless, and allows the 
change fixing SOLR-15045 to be as
-        //  tightly scoped as possible, leaving the behavior of the code 
otherwise functionally equivalent (for
+        // TODO: according to discussion on SOLR-15045, this call (and all 
tracking of
+        // `issuedDistribCommit`) may
+        //  well be superfluous, and can probably simply be removed. It is 
left in place for now,
+        // intentionally
+        //  punting on the question of whether this internal 
`blockAndDoRetries()` is necessary. At
+        // worst, its
+        //  presence is misleading; but it should be harmless, and allows the 
change fixing
+        // SOLR-15045 to be as
+        //  tightly scoped as possible, leaving the behavior of the code 
otherwise functionally
+        // equivalent (for
         //  better or worse!)

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -257,7 +281,8 @@ public void processAdd(AddUpdateCommand cmd) throws 
IOException {
 
     setupRequest(cmd);
 
-    // check if client has requested minimum replication factor information. 
will set replicationTracker to null if
+    // check if client has requested minimum replication factor information. 
will set
+    // replicationTracker to null if
     // we aren't the leader or subShardLeader

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -425,37 +473,43 @@ protected void doDeleteByQuery(DeleteUpdateCommand cmd) 
throws IOException {
       isLeader = false;
     }
 
-    // check if client has requested minimum replication factor information. 
will set replicationTracker to null if
+    // check if client has requested minimum replication factor information. 
will set
+    // replicationTracker to null if
     // we aren't the leader or subShardLeader

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -331,15 +366,20 @@ public void processDelete(DeleteUpdateCommand cmd) throws 
IOException {
   protected void doDeleteById(DeleteUpdateCommand cmd) throws IOException {
     setupRequest(cmd);
 
-    if (broadcastDeleteById && DistribPhase.NONE == 
DistribPhase.parseParam(req.getParams().get(DISTRIB_UPDATE_PARAM))) {
+    if (broadcastDeleteById
+        && DistribPhase.NONE
+            == 
DistribPhase.parseParam(req.getParams().get(DISTRIB_UPDATE_PARAM))) {
       DocCollection coll = clusterState.getCollection(collection);
       if (log.isDebugEnabled()) {
-        log.debug("The deleteById command for doc {} is missing the required 
route, broadcasting to leaders of other shards", cmd.getId());
+        log.debug(
+            "The deleteById command for doc {} is missing the required route, 
broadcasting to leaders of other shards",
+            cmd.getId());
       }
       forwardDelete(coll, cmd);
     }
 
-    // check if client has requested minimum replication factor information. 
will set replicationTracker to null if
+    // check if client has requested minimum replication factor information. 
will set
+    // replicationTracker to null if
     // we aren't the leader or subShardLeader

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -754,37 +880,41 @@ protected boolean shouldCloneCmdDoc() {
   // Sets replicationTracker = null if we aren't the leader
   // We have two possibilities here:
   //
-  // 1> we are a leader: Allocate a LeaderTracker and, if we're getting the 
original request, a RollupTracker
+  // 1> we are a leader: Allocate a LeaderTracker and, if we're getting the 
original request, a
+  // RollupTracker
   // 2> we're a follower: allocat a RollupTracker
   //
   private void checkReplicationTracker(UpdateCommand cmd) {
 
     SolrParams rp = cmd.getReq().getParams();
     String distribUpdate = rp.get(DISTRIB_UPDATE_PARAM);
-    // Ok,we're receiving the original request, we need a rollup tracker, but 
only one so we accumulate over the
+    // Ok,we're receiving the original request, we need a rollup tracker, but 
only one so we
+    // accumulate over the
     // course of a batch.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DocBasedVersionConstraintsProcessor.java
##########
@@ -64,20 +64,22 @@
   private final SolrCore core;
   private final NamedList<Object> tombstoneConfig;
 
-  private final DistributedUpdateProcessor distribProc;  // the distributed 
update processor following us
+  private final DistributedUpdateProcessor
+      distribProc; // the distributed update processor following us

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -655,7 +768,9 @@ void setupRequest(UpdateCommand cmd) {
 
         assert TestInjection.injectFailReplicaRequests();
 
-        isLeader = false;     // we actually might be the leader, but we don't 
want leader-logic for these types of updates anyway.
+        isLeader =
+            false; // we actually might be the leader, but we don't want 
leader-logic for these
+        // types of updates anyway.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -602,25 +702,34 @@ void setupRequest(UpdateCommand cmd) {
     updateCommand = cmd;
     zkCheck();
     if (cmd instanceof AddUpdateCommand) {
-      AddUpdateCommand acmd = (AddUpdateCommand)cmd;
+      AddUpdateCommand acmd = (AddUpdateCommand) cmd;
       nodes = setupRequest(acmd.getIndexedIdStr(), 
acmd.getSolrInputDocument());
     } else if (cmd instanceof DeleteUpdateCommand) {
-      DeleteUpdateCommand dcmd = (DeleteUpdateCommand)cmd;
-      nodes = setupRequest(dcmd.getId(), null, (null != dcmd.getRoute() ? 
dcmd.getRoute() : req.getParams().get(ShardParams._ROUTE_)) );
+      DeleteUpdateCommand dcmd = (DeleteUpdateCommand) cmd;
+      nodes =
+          setupRequest(
+              dcmd.getId(),
+              null,
+              (null != dcmd.getRoute()
+                  ? dcmd.getRoute()
+                  : req.getParams().get(ShardParams._ROUTE_)));
     }
   }
 
   protected List<SolrCmdDistributor.Node> setupRequest(String id, 
SolrInputDocument doc) {
     return setupRequest(id, doc, null);
   }
 
-  protected List<SolrCmdDistributor.Node> setupRequest(String id, 
SolrInputDocument doc, String route) {
+  protected List<SolrCmdDistributor.Node> setupRequest(
+      String id, SolrInputDocument doc, String route) {
     // if we are in zk mode...
 
     assert TestInjection.injectUpdateRandomPause();
 
     if ((updateCommand.getFlags() & (UpdateCommand.REPLAY | 
UpdateCommand.PEER_SYNC)) != 0) {
-      isLeader = false;     // we actually might be the leader, but we don't 
want leader-logic for these types of updates anyway.
+      isLeader =
+          false; // we actually might be the leader, but we don't want 
leader-logic for these types
+      // of updates anyway.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -576,24 +666,34 @@ protected void doDistribDeleteByQuery(DeleteUpdateCommand 
cmd, List<SolrCmdDistr
   protected String getLeaderUrl(String id) {
     // try get leader from req params, fallback to zk lookup if not found.
     String distribFrom = req.getParams().get(DISTRIB_FROM);
-    if(distribFrom != null) {
+    if (distribFrom != null) {
       return distribFrom;
     }
     return getLeaderUrlZk(id);
   }
 
   private String getLeaderUrlZk(String id) {
-    // An update we're dependent upon didn't arrive! This is unexpected. 
Perhaps likely our leader is
-    // down or partitioned from us for some reason. Lets force refresh cluster 
state, and request the
+    // An update we're dependent upon didn't arrive! This is unexpected. 
Perhaps likely our leader
+    // is
+    // down or partitioned from us for some reason. Lets force refresh cluster 
state, and request
+    // the
     // leader for the update.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -754,37 +880,41 @@ protected boolean shouldCloneCmdDoc() {
   // Sets replicationTracker = null if we aren't the leader
   // We have two possibilities here:
   //
-  // 1> we are a leader: Allocate a LeaderTracker and, if we're getting the 
original request, a RollupTracker
+  // 1> we are a leader: Allocate a LeaderTracker and, if we're getting the 
original request, a
+  // RollupTracker
   // 2> we're a follower: allocat a RollupTracker
   //
   private void checkReplicationTracker(UpdateCommand cmd) {
 
     SolrParams rp = cmd.getReq().getParams();
     String distribUpdate = rp.get(DISTRIB_UPDATE_PARAM);
-    // Ok,we're receiving the original request, we need a rollup tracker, but 
only one so we accumulate over the
+    // Ok,we're receiving the original request, we need a rollup tracker, but 
only one so we
+    // accumulate over the
     // course of a batch.
-    if ((distribUpdate == null || 
DistribPhase.NONE.toString().equals(distribUpdate)) &&
-        rollupReplicationTracker == null) {
+    if ((distribUpdate == null || 
DistribPhase.NONE.toString().equals(distribUpdate))
+        && rollupReplicationTracker == null) {
       rollupReplicationTracker = new RollupRequestReplicationTracker();
     }
-    // If we're a leader, we need a leader replication tracker, so let's do 
that. If there are multiple docs in
+    // If we're a leader, we need a leader replication tracker, so let's do 
that. If there are
+    // multiple docs in
     // a batch we need to use the _same_ leader replication tracker.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -1125,23 +1302,31 @@ protected void doDistribFinish() {
       // recover - the doc was already added locally, so it should have been
       // legit
 
-      DistribPhase phase = 
DistribPhase.parseParam(error.req.uReq.getParams().get(DISTRIB_UPDATE_PARAM));
+      DistribPhase phase =
+          
DistribPhase.parseParam(error.req.uReq.getParams().get(DISTRIB_UPDATE_PARAM));
       if (phase != DistribPhase.FROMLEADER)
         continue; // don't have non-leaders try to recovery other nodes
 
-      // commits are special -- they can run on any node irrespective of 
whether it is a leader or not
+      // commits are special -- they can run on any node irrespective of 
whether it is a leader or
+      // not
       // we don't want to run recovery on a node which missed a commit command

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/TolerantUpdateProcessor.java
##########
@@ -164,22 +154,26 @@ public void processAdd(AddUpdateCommand cmd) throws 
IOException {
 
   @Override
   public void processDelete(DeleteUpdateCommand cmd) throws IOException {
-    
+
     try {
 
       super.processDelete(cmd);
 
     } catch (Throwable t) {
       firstErrTracker.caught(t);
 
-      ToleratedUpdateError err = new ToleratedUpdateError(cmd.isDeleteById() ? 
CmdType.DELID : CmdType.DELQ,
-                                                          cmd.isDeleteById() ? 
cmd.id : cmd.query,
-                                                          t.getMessage());
+      ToleratedUpdateError err =
+          new ToleratedUpdateError(
+              cmd.isDeleteById() ? CmdType.DELID : CmdType.DELQ,
+              cmd.isDeleteById() ? cmd.id : cmd.query,
+              t.getMessage());
       knownErrors.add(err);
 
       // NOTE: we're not using this to dedup before adding to knownErrors.
-      // if we're lucky enough to get an immediate local failure (ie: we're a 
leader, or some other processor
-      // failed) then recording the multiple failures is a good thing -- helps 
us with an accurate fail
+      // if we're lucky enough to get an immediate local failure (ie: we're a 
leader, or some other
+      // processor
+      // failed) then recording the multiple failures is a good thing -- helps 
us with an accurate
+      // fail

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/RoutedAliasUpdateProcessor.java
##########
@@ -72,7 +72,8 @@
   private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   // make sure we don't request collection properties any more frequently than 
once a minute during
-  // slow continuous indexing, and even less frequently during bulk indexing. 
(cache is updated by zk
+  // slow continuous indexing, and even less frequently during bulk indexing. 
(cache is updated by
+  // zk
   // watch instead of re-requested until indexing has been stopped for the 
duration specified here)

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DocBasedVersionConstraintsProcessor.java
##########
@@ -397,22 +427,23 @@ public void processAdd(AddUpdateCommand cmd) throws 
IOException {
       }
 
       try {
-        cmd.setVersion(oldSolrVersion);  // use optimistic concurrency to 
ensure that the doc has not changed in the meantime
+        cmd.setVersion(
+            oldSolrVersion); // use optimistic concurrency to ensure that the 
doc has not changed in
+        // the meantime

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DocBasedVersionConstraintsProcessor.java
##########
@@ -464,42 +499,58 @@ public void processDelete(DeleteUpdateCommand cmd) throws 
IOException {
         // drop the delete, and instead propagate an AddDoc that
         // replaces the doc with a new "empty" one that records the deleted 
version
 
-        SolrInputDocument newDoc = 
createTombstoneDocument(core.getLatestSchema(), cmd.getId(), versionFieldNames, 
deleteParamValues, this.tombstoneConfig);
+        SolrInputDocument newDoc =
+            createTombstoneDocument(
+                core.getLatestSchema(),
+                cmd.getId(),
+                versionFieldNames,
+                deleteParamValues,
+                this.tombstoneConfig);
 
         AddUpdateCommand newCmd = new AddUpdateCommand(cmd.getReq());
         newCmd.solrDoc = newDoc;
         newCmd.commitWithin = cmd.commitWithin;
 
-        newCmd.setVersion(oldSolrVersion);  // use optimistic concurrency to 
ensure that the doc has not changed in the meantime
+        newCmd.setVersion(
+            oldSolrVersion); // use optimistic concurrency to ensure that the 
doc has not changed in
+        // the meantime

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/TolerantUpdateProcessor.java
##########
@@ -231,23 +225,25 @@ public void processRollback(RollbackUpdateCommand cmd) 
throws IOException {
   public void finish() throws IOException {
 
     // even if processAdd threw an error, this.finish() is still called and we 
might have additional
-    // errors from other remote leaders that we need to check for from the 
finish method of downstream processors
+    // errors from other remote leaders that we need to check for from the 
finish method of
+    // downstream processors
     // (like DUP)

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/TolerantUpdateProcessor.java
##########
@@ -292,19 +289,21 @@ public void finish() throws IOException {
     // decide if we have hit a situation where we know an error needs to be 
thrown.
 
     if ((DistribPhase.TOLEADER.equals(distribPhase) ? 0 : maxErrors) < 
knownErrors.size()) {
-      // NOTE: even if maxErrors wasn't exceeded, we need to throw an error 
when we have any errors if we're
-      // a leader that was forwarded to by another node so that the forwarding 
node knows we encountered some
+      // NOTE: even if maxErrors wasn't exceeded, we need to throw an error 
when we have any errors
+      // if we're
+      // a leader that was forwarded to by another node so that the forwarding 
node knows we
+      // encountered some
       // problems and can aggregate the results

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/TolerantUpdateProcessor.java
##########
@@ -231,23 +225,25 @@ public void processRollback(RollbackUpdateCommand cmd) 
throws IOException {
   public void finish() throws IOException {
 
     // even if processAdd threw an error, this.finish() is still called and we 
might have additional
-    // errors from other remote leaders that we need to check for from the 
finish method of downstream processors
+    // errors from other remote leaders that we need to check for from the 
finish method of
+    // downstream processors
     // (like DUP)
 
     try {
       super.finish();
     } catch (DistributedUpdateProcessor.DistributedUpdatesAsyncException duae) 
{
       firstErrTracker.caught(duae);
 
-
       // adjust our stats based on each of the distributed errors
       for (Error error : duae.errors) {
-        // we can't trust the req info from the Error, because multiple 
original requests might have been
+        // we can't trust the req info from the Error, because multiple 
original requests might have
+        // been
         // lumped together

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/util/ConcurrentLRUCache.java
##########
@@ -366,23 +416,26 @@ private void markAndSweepByIdleTime() {
   }
 
   /*
-    Must be called after acquiring markAndSweepLock
-   */
+   Must be called after acquiring markAndSweepLock
+  */
   private void markAndSweepByRamSize() {
     assert markAndSweepLock.isHeldByCurrentThread() : "markAndSweepLock held 
by another thread";
     List<CacheEntry<K, V>> entriesInAccessOrder = new ArrayList<>(map.size());
-    map.forEach((o, kvCacheEntry) -> {
-      kvCacheEntry.lastAccessedCopy = kvCacheEntry.lastAccessed; // important 
because we want to avoid volatile read during comparisons
-      entriesInAccessOrder.add(kvCacheEntry);
-    });
+    map.forEach(
+        (o, kvCacheEntry) -> {
+          kvCacheEntry.lastAccessedCopy =
+              kvCacheEntry.lastAccessed; // important because we want to avoid 
volatile read during
+          // comparisons

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/util/FileUtils.java
##########
@@ -94,17 +87,20 @@ public static boolean fileExists(String filePathString) {
   }
 
   // Files.createDirectories has odd behavior if the path is a symlink and it 
already exists
-  // _even if it's a symlink to a directory_. 
-  // 
-  // oddly, if the path to be created just contains a symlink in intermediate 
levels, Files.createDirectories
+  // _even if it's a symlink to a directory_.
+  //
+  // oddly, if the path to be created just contains a symlink in intermediate 
levels,
+  // Files.createDirectories
   // works just fine.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/DistributedZkUpdateProcessor.java
##########
@@ -630,22 +739,26 @@ void setupRequest(UpdateCommand cmd) {
     Slice slice = coll.getRouter().getTargetSlice(id, doc, route, 
req.getParams(), coll);
 
     if (slice == null) {
-      // No slice found.  Most strict routers will have already thrown an 
exception, so a null return is
+      // No slice found.  Most strict routers will have already thrown an 
exception, so a null
+      // return is
       // a signal to use the slice of this core.

Review comment:
       Fix this

##########
File path: 
solr/core/src/java/org/apache/solr/update/processor/TolerantUpdateProcessor.java
##########
@@ -381,12 +382,14 @@ public void annotate(List<ToleratedUpdateError> errors) {
         firstErrMetadata = new NamedList<String>();
         first.setMetadata(firstErrMetadata);
       } else {
-        // any existing metadata representing ToleratedUpdateErrors in this 
single exception needs removed
+        // any existing metadata representing ToleratedUpdateErrors in this 
single exception needs
+        // removed
         // so we can add *all* of the known ToleratedUpdateErrors (from this 
and other exceptions)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/util/LongSet.java
##########
@@ -69,7 +73,8 @@ public boolean add(long val) {
     // and bit 52 for double precision.
     // Many values will only have significant bits just to the right of that.
 
-    // For now, lets just settle to get first 8 significant mantissa bits of 
double or float in the lowest bits of our hash
+    // For now, lets just settle to get first 8 significant mantissa bits of 
double or float in the
+    // lowest bits of our hash
     // The upper bits of our hash will be irrelevant.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/util/PackageTool.java
##########
@@ -234,92 +283,92 @@ protected void runImpl(CommandLine cli) throws Exception {
       log.info("Finished: {}", cmd);
 
     } catch (Exception ex) {
-      ex.printStackTrace(); // We need to print this since SolrCLI drops the 
stack trace in favour of brevity. Package tool should surely print full 
stacktraces!
+      ex.printStackTrace(); // We need to print this since SolrCLI drops the 
stack trace in favour
+      // of brevity. Package tool should surely print full stacktraces!

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/util/SolrCLI.java
##########
@@ -3092,30 +3328,43 @@ protected void waitToSeeLiveNodes(int maxWaitSecs, 
String zkHost, int numNodes)
 
       // don't display a huge path for solr home if it is relative to the cwd
       if (!isWindows && cwdPath.length() > 1 && solrHome.startsWith(cwdPath))
-        solrHome = solrHome.substring(cwdPath.length()+1);
+        solrHome = solrHome.substring(cwdPath.length() + 1);
 
       String startCmd =
-          String.format(Locale.ROOT, "\"%s\" start %s -p %d -s \"%s\" %s %s %s 
%s %s %s",
-              callScript, cloudModeArg, port, solrHome, hostArg, zkHostArg, 
memArg, forceArg, extraArgs, addlOptsArg);
+          String.format(
+              Locale.ROOT,
+              "\"%s\" start %s -p %d -s \"%s\" %s %s %s %s %s %s",
+              callScript,
+              cloudModeArg,
+              port,
+              solrHome,
+              hostArg,
+              zkHostArg,
+              memArg,
+              forceArg,
+              extraArgs,
+              addlOptsArg);
       startCmd = startCmd.replaceAll("\\s+", " ").trim(); // for pretty 
printing
 
       echo("\nStarting up Solr on port " + port + " using command:");
       echo(startCmd + "\n");
 
       String solrUrl =
-          String.format(Locale.ROOT, "%s://%s:%d/solr", urlScheme, (host != 
null ? host : "localhost"), port);
+          String.format(
+              Locale.ROOT, "%s://%s:%d/solr", urlScheme, (host != null ? host 
: "localhost"), port);
 
-      Map<String,Object> nodeStatus = checkPortConflict(solrUrl, solrHomeDir, 
port, cli);
+      Map<String, Object> nodeStatus = checkPortConflict(solrUrl, solrHomeDir, 
port, cli);
       if (nodeStatus != null)
         return nodeStatus; // the server they are trying to start is already 
running
 
       int code = 0;
       if (isWindows) {
         // On Windows, the execution doesn't return, so we have to execute 
async
-        // and when calling the script, it seems to be inheriting the 
environment that launched this app
+        // and when calling the script, it seems to be inheriting the 
environment that launched this
+        // app
         // so we have to prune out env vars that may cause issues

Review comment:
       Fix this




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org
For additional commands, e-mail: issues-h...@solr.apache.org

Reply via email to