Repository: cassandra
Updated Branches:
  refs/heads/cassandra-3.0 65bd45523 -> d8eec1cd6


Fixed checkAvailableDiskSpace to properly recalculate expected disk usage of 
compaction task before reducing scope

patch by Jon Haddad; reviewed by Nate McCall for CASSANDRA-12979


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d8eec1cd
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d8eec1cd
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d8eec1cd

Branch: refs/heads/cassandra-3.0
Commit: d8eec1cd6d095961b3754b0de41ffc9061cba370
Parents: 65bd455
Author: Jon Haddad <j...@jonhaddad.com>
Authored: Wed Jan 4 11:52:54 2017 -0800
Committer: Nate McCall <zznat...@gmail.com>
Committed: Tue Jan 10 15:24:36 2017 +1300

----------------------------------------------------------------------
 CHANGES.txt                                     |  2 +
 NEWS.txt                                        |  3 ++
 .../cassandra/db/compaction/CompactionTask.java | 40 +++++++++++++++-----
 3 files changed, 36 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8eec1cd/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 11b0482..a533e6f 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -18,6 +18,7 @@
  * AnticompactionRequestSerializer serializedSize is incorrect 
(CASSANDRA-12934)
  * Prevent reloading of logback.xml from UDF sandbox (CASSANDRA-12535)
  * Reenable HeapPool (CASSANDRA-12900)
+ * CompactionTasks now correctly drops sstables out of compaction when not 
enough disk space is available (CASSANDRA-12979)
 Merged from 2.2:
  * Remove support for non-JavaScript UDFs (CASSANDRA-12883)
  * Fix DynamicEndpointSnitch noop in multi-datacenter situations 
(CASSANDRA-13074)
@@ -34,6 +35,7 @@ Merged from 2.1:
  * cqlsh copy-from: sort user type fields in csv (CASSANDRA-12959)
 
 
+
 3.0.10
  * Disallow offheap_buffers memtable allocation (CASSANDRA-11039)
  * Fix CommitLogSegmentManagerTest (CASSANDRA-12283)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8eec1cd/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index b4e6551..3b8a333 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -26,6 +26,9 @@ Upgrading
    - Only Java and JavaScript are now supported UDF languages.
      The sandbox in 3.0 already prevented the use of script languages except 
Java
      and JavaScript.
+   - Compaction now correctly drops sstables out of CompactionTask when there
+     isn't enough disk space to perform the full compaction.  This should 
reduce
+     pending compaction tasks on systems with little remaining disk space.
 
 3.0.10
 =====

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8eec1cd/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java 
b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
index f0a1f47..1636ab9 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
@@ -90,12 +90,14 @@ public class CompactionTask extends AbstractCompactionTask
         return transaction.originals().size();
     }
 
-    public boolean reduceScopeForLimitedSpace()
+    public boolean reduceScopeForLimitedSpace(long expectedSize)
     {
         if (partialCompactionsAcceptable() && transaction.originals().size() > 
1)
         {
             // Try again w/o the largest one.
-            logger.warn("insufficient space to compact all requested files 
{}", StringUtils.join(transaction.originals(), ", "));
+            logger.warn("insufficient space to compact all requested files. 
{}MB required, {}",
+                        (float) expectedSize / 1024 / 1024,
+                        StringUtils.join(transaction.originals(), ", "));
             // Note that we have removed files that are still marked as 
compacting.
             // This suboptimal but ok since the caller will unmark all the 
sstables at the end.
             SSTableReader removedSSTable = 
cfs.getMaxSizeFile(transaction.originals());
@@ -128,9 +130,8 @@ public class CompactionTask extends AbstractCompactionTask
 
         // note that we need to do a rough estimate early if we can fit the 
compaction on disk - this is pessimistic, but
         // since we might remove sstables from the compaction in 
checkAvailableDiskSpace it needs to be done here
-        long expectedWriteSize = 
cfs.getExpectedCompactedFileSize(transaction.originals(), compactionType);
-        long earlySSTableEstimate = Math.max(1, expectedWriteSize / 
strategy.getMaxSSTableBytes());
-        checkAvailableDiskSpace(earlySSTableEstimate, expectedWriteSize);
+
+        checkAvailableDiskSpace();
 
         // sanity check: all sstables must belong to the same cfs
         assert !Iterables.any(transaction.originals(), new 
Predicate<SSTableReader>()
@@ -283,7 +284,12 @@ public class CompactionTask extends AbstractCompactionTask
         return minRepairedAt;
     }
 
-    protected void checkAvailableDiskSpace(long estimatedSSTables, long 
expectedWriteSize)
+    /*
+    Checks if we have enough disk space to execute the compaction.  Drops the 
largest sstable out of the Task until
+    there's enough space (in theory) to handle the compaction.  Does not take 
into account space that will be taken by
+    other compactions.
+     */
+    protected void checkAvailableDiskSpace()
     {
         if(!cfs.isCompactionDiskSpaceCheckEnabled() && compactionType == 
OperationType.COMPACTION)
         {
@@ -291,10 +297,26 @@ public class CompactionTask extends AbstractCompactionTask
             return;
         }
 
-        while (!getDirectories().hasAvailableDiskSpace(estimatedSSTables, 
expectedWriteSize))
+        CompactionStrategyManager strategy = 
cfs.getCompactionStrategyManager();
+
+        while(true)
         {
-            if (!reduceScopeForLimitedSpace())
-                throw new RuntimeException(String.format("Not enough space for 
compaction, estimated sstables = %d, expected write size = %d", 
estimatedSSTables, expectedWriteSize));
+            long expectedWriteSize = 
cfs.getExpectedCompactedFileSize(transaction.originals(), compactionType);
+            long estimatedSSTables = Math.max(1, expectedWriteSize / 
strategy.getMaxSSTableBytes());
+
+            if(cfs.getDirectories().hasAvailableDiskSpace(estimatedSSTables, 
expectedWriteSize))
+                break;
+
+            if (!reduceScopeForLimitedSpace(expectedWriteSize))
+            {
+                // we end up here if we can't take any more sstables out of 
the compaction.
+                // usually means we've run out of disk space
+                String msg = String.format("Not enough space for compaction, 
estimated sstables = %d, expected write size = %d", estimatedSSTables, 
expectedWriteSize);
+                logger.warn(msg);
+                throw new RuntimeException(msg);
+            }
+            logger.warn("Not enough space for compaction, {}MB estimated.  
Reducing scope.",
+                            (float) expectedWriteSize / 1024 / 1024);
         }
     }
 

Reply via email to