On Fri, Jan 23, 2009 at 2:12 PM, Jaco <jdevr...@gmail.com> wrote:

> Hi,
>
> I applied the patch and did some more tests - also adding some LOG.info()
> calls in delTree to see if it actually gets invoked (LOG.info("START:
> delTree: "+dir.getName()); at the start of that method). I don't see any
> entries of this showing up in the log file at all, so it looks like delTree
> doesn't get invoked at all.
>
> To be sure, explaining the issue to prevent misunderstanding:
> - The number of files in the index directory on the slave keeps increasing
> (in my very small test core, there are now 128 files in the slave's index
> directory, and only 73 files in the master's index directory)
> - The directories index.xxxxx are still there after replication, but they
> are empty
>
> Are there any other things I can do check, or more info that I can provide
> to help fix this?
>

The problem is that when we do a commit on the slave after replication is
done. The commit does not re-open the IndexWriter. Therefore, the deletion
policy does not take affect and older files are left as is. This can keep on
building up. The only solution is to re-open the index writer.

I think the attached patch can solve this problem. Can you try this and let
us know? Thank you for your patience.

-- 
Regards,
Shalin Shekhar Mangar.
Index: src/java/org/apache/solr/handler/SnapPuller.java
===================================================================
--- src/java/org/apache/solr/handler/SnapPuller.java	(revision 736746)
+++ src/java/org/apache/solr/handler/SnapPuller.java	Fri Jan 23 16:47:41 IST 2009
@@ -27,6 +27,7 @@
 import static org.apache.solr.handler.ReplicationHandler.*;
 import org.apache.solr.search.SolrIndexSearcher;
 import org.apache.solr.update.CommitUpdateCommand;
+import org.apache.solr.update.DirectUpdateHandler2;
 import org.apache.solr.util.RefCounted;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -281,14 +282,14 @@
         replicationStartTime = 0;
         return successfulInstall;
       } catch (ReplicationHandlerException e) {
-        delTree(tmpIndexDir);
         LOG.error("User aborted Replication");
       } catch (SolrException e) {
-        delTree(tmpIndexDir);
         throw e;
       } catch (Exception e) {
         delTree(tmpIndexDir);
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Snappull failed : ", e);
+      } finally {
+        delTree(tmpIndexDir);
       }
       return successfulInstall;
     } finally {
@@ -349,7 +350,15 @@
     cmd.waitFlush = true;
     cmd.waitSearcher = true;
     solrCore.getUpdateHandler().commit(cmd);
+    if (solrCore.getUpdateHandler() instanceof DirectUpdateHandler2) {
+      LOG.info("Re-opening index writer to make sure older index files get deleted");
+      DirectUpdateHandler2 handler = (DirectUpdateHandler2) solrCore.getUpdateHandler();
+      handler.reOpenWriter();
+    } else  {
+      LOG.warn("The update handler is not an instance or sub-class of DirectUpdateHandler2. " +
+              "ReplicationHandler may not be able to cleanup un-used index files.");
-  }
+    }
+  }
 
 
   /**
Index: src/java/org/apache/solr/update/DirectUpdateHandler2.java
===================================================================
--- src/java/org/apache/solr/update/DirectUpdateHandler2.java	(revision 736614)
+++ src/java/org/apache/solr/update/DirectUpdateHandler2.java	Fri Jan 23 16:23:36 IST 2009
@@ -187,7 +187,7 @@
     addCommands.incrementAndGet();
     addCommandsCumulative.incrementAndGet();
     int rc=-1;
-    
+
     // if there is no ID field, use allowDups
     if( idField == null ) {
       cmd.allowDups = true;
@@ -259,7 +259,7 @@
     } finally {
       iwCommit.unlock();
     }
-    
+
     if( tracker.timeUpperBound > 0 ) {
       tracker.scheduleCommitWithin( tracker.timeUpperBound );
     }
@@ -294,7 +294,7 @@
          deleteAll();
        } else {
         openWriter();
-        writer.deleteDocuments(q);         
+        writer.deleteDocuments(q);
        }
      } finally {
        iwCommit.unlock();
@@ -313,8 +313,15 @@
     }
   }
 
+  public void reOpenWriter() throws IOException  {
+    iwCommit.lock();
+    try {
+      openWriter();
+    } finally {
+      iwCommit.unlock();
+    }
+  }
 
-
   public void commit(CommitUpdateCommand cmd) throws IOException {
 
     if (cmd.optimize) {
@@ -419,14 +426,14 @@
         tracker.pending.cancel( true );
         tracker.pending = null;
       }
-      tracker.scheduler.shutdown(); 
+      tracker.scheduler.shutdown();
       closeWriter();
     } finally {
       iwCommit.unlock();
     }
     log.info("closed " + this);
   }
-    
+
   /** Helper class for tracking autoCommit state.
    *
    * Note: This is purely an implementation detail of autoCommit and will
@@ -435,8 +442,8 @@
    *
    * Note: all access must be synchronized.
    */
-  class CommitTracker implements Runnable 
-  {  
+  class CommitTracker implements Runnable
+  {
     // scheduler delay for maxDoc-triggered autocommits
     public final int DOC_COMMIT_DELAY_MS = 250;
 
@@ -447,12 +454,12 @@
     private final ScheduledExecutorService scheduler =
        Executors.newScheduledThreadPool(1);
     private ScheduledFuture pending;
-    
+
     // state
-    long docsSinceCommit;    
+    long docsSinceCommit;
     int autoCommitCount = 0;
     long lastAddedTime = -1;
-    
+
     public CommitTracker() {
       docsSinceCommit = 0;
       pending = null;
@@ -464,27 +471,27 @@
     }
 
     /** schedule individual commits */
-    public synchronized void scheduleCommitWithin(long commitMaxTime) 
+    public synchronized void scheduleCommitWithin(long commitMaxTime)
     {
       _scheduleCommitWithin( commitMaxTime );
     }
-    
-    private void _scheduleCommitWithin(long commitMaxTime) 
+
+    private void _scheduleCommitWithin(long commitMaxTime)
     {
       // Check if there is a commit already scheduled for longer then this time
-      if( pending != null && 
-          pending.getDelay(TimeUnit.MILLISECONDS) >= commitMaxTime ) 
+      if( pending != null &&
+          pending.getDelay(TimeUnit.MILLISECONDS) >= commitMaxTime )
       {
         pending.cancel(false);
         pending = null;
       }
-      
+
       // schedule a new commit
       if( pending == null ) {
         pending = scheduler.schedule( this, commitMaxTime, TimeUnit.MILLISECONDS );
       }
     }
-    
+
     /** Indicate that documents have been added
      */
     public void addedDocument( int commitWithin ) {
@@ -494,7 +501,7 @@
       if( docsUpperBound > 0 && (docsSinceCommit > docsUpperBound) ) {
         _scheduleCommitWithin( DOC_COMMIT_DELAY_MS );
       }
-      
+
       // maxTime-triggered autoCommit
       long ctime = (commitWithin>0) ? commitWithin : timeUpperBound;
       if( ctime > 0 ) {
@@ -530,7 +537,7 @@
         //no need for command.maxOptimizeSegments = 1;  since it is not optimizing
         commit( command );
         autoCommitCount++;
-      } 
+      }
       catch (Exception e) {
         log.error( "auto commit error..." );
         e.printStackTrace();
@@ -555,7 +562,7 @@
 
     public String toString() {
       if(timeUpperBound > 0 || docsUpperBound > 0) {
-        return 
+        return
           (timeUpperBound > 0 ? ("if uncommited for " + timeUpperBound + "ms; ") : "") +
           (docsUpperBound > 0 ? ("if " + docsUpperBound + " uncommited docs ") : "");
 
@@ -564,8 +571,8 @@
       }
     }
   }
-      
-  
+
+
   /////////////////////////////////////////////////////////////////////
   // SolrInfoMBean stuff: Statistics and Module Info
   /////////////////////////////////////////////////////////////////////

Reply via email to