Updated Branches:
  refs/heads/cassandra-1.1 36f8a5d81 -> 7d36c1e02
  refs/heads/cassandra-1.2 83ff1da82 -> 813a937b2
  refs/heads/trunk 56e59fefc -> d11ba8ab8


Use allocator information to improve memtable memory usage estimate
patch by jbellis; reviewed by jasobrown for CASSANDRA-5497


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7d36c1e0
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7d36c1e0
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7d36c1e0

Branch: refs/heads/cassandra-1.1
Commit: 7d36c1e025abbcdd4ed50c580bbd990fd262bbed
Parents: 36f8a5d
Author: Jonathan Ellis <jbel...@apache.org>
Authored: Thu Apr 18 22:35:21 2013 -0500
Committer: Jonathan Ellis <jbel...@apache.org>
Committed: Thu Apr 18 22:36:10 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                        |    3 ++
 src/java/org/apache/cassandra/db/Memtable.java     |   11 ++++++-
 .../org/apache/cassandra/utils/SlabAllocator.java  |   23 ++++++++++++++-
 3 files changed, 35 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d36c1e0/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 203a00d..30a09c9 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,5 +1,8 @@
 1.1.12
  * Add retry mechanism to OTC for non-droppable_verbs (CASSANDRA-5393)
+ * Use allocator information to improve memtable memory usage estimate 
+   (CASSANDRA-5497)
+
 
 1.1.11
  * Fix trying to load deleted row into row cache on startup (CASSANDRA-4463)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d36c1e0/src/java/org/apache/cassandra/db/Memtable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Memtable.java 
b/src/java/org/apache/cassandra/db/Memtable.java
index 347b997..a713cac 100644
--- a/src/java/org/apache/cassandra/db/Memtable.java
+++ b/src/java/org/apache/cassandra/db/Memtable.java
@@ -120,9 +120,18 @@ public class Memtable
 
     public long getLiveSize()
     {
+
         // 25% fudge factor on the base throughput * liveRatio calculation.  
(Based on observed
         // pre-slabbing behavior -- not sure what accounts for this. May have 
changed with introduction of slabbing.)
-        return (long) (currentSize.get() * cfs.liveRatio * 1.25);
+        long estimatedSize = (long) (currentSize.get() * cfs.liveRatio * 1.25);
+
+        // cap the estimate at both ends by what the allocator can tell us
+        if (estimatedSize < allocator.getMinimumSize())
+            return allocator.getMinimumSize();
+        if (estimatedSize > allocator.getMaximumSize())
+            return allocator.getMaximumSize();
+
+        return estimatedSize;
     }
 
     public long getSerializedSize()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d36c1e0/src/java/org/apache/cassandra/utils/SlabAllocator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/SlabAllocator.java 
b/src/java/org/apache/cassandra/utils/SlabAllocator.java
index 6e477fe..24a2f65 100644
--- a/src/java/org/apache/cassandra/utils/SlabAllocator.java
+++ b/src/java/org/apache/cassandra/utils/SlabAllocator.java
@@ -21,6 +21,7 @@ package org.apache.cassandra.utils;
 
 import java.nio.ByteBuffer;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 
 import com.google.common.base.Preconditions;
@@ -49,7 +50,8 @@ public class SlabAllocator extends Allocator
     private final static int MAX_CLONED_SIZE = 128 * 1024; // bigger than this 
don't go in the region
 
     private final AtomicReference<Region> currentRegion = new 
AtomicReference<Region>();
-    private volatile int regionCount;
+    private volatile int regionCount = 0;
+    private AtomicLong unslabbed = new AtomicLong(0);
 
     public ByteBuffer allocate(int size)
     {
@@ -60,7 +62,10 @@ public class SlabAllocator extends Allocator
         // satisfy large allocations directly from JVM since they don't cause 
fragmentation
         // as badly, and fill up our regions quickly
         if (size > MAX_CLONED_SIZE)
+        {
+            unslabbed.addAndGet(size);
             return ByteBuffer.allocate(size);
+        }
 
         while (true)
         {
@@ -106,6 +111,22 @@ public class SlabAllocator extends Allocator
     }
 
     /**
+     * @return a lower bound on how much space has been allocated
+     */
+    public long getMinimumSize()
+    {
+        return unslabbed.get() + (regionCount - 1) * REGION_SIZE;
+    }
+
+    /**
+     * @return an upper bound on how much space has been allocated
+     */
+    public long getMaximumSize()
+    {
+        return unslabbed.get() + regionCount * REGION_SIZE;
+    }
+
+    /**
      * A region of memory out of which allocations are sliced.
      *
      * This serves two purposes:

Reply via email to