Modified: cassandra/trunk/test/unit/org/apache/cassandra/db/KeyCacheTest.java
URL: 
http://svn.apache.org/viewvc/cassandra/trunk/test/unit/org/apache/cassandra/db/KeyCacheTest.java?rev=1222715&r1=1222714&r2=1222715&view=diff
==============================================================================
--- cassandra/trunk/test/unit/org/apache/cassandra/db/KeyCacheTest.java 
(original)
+++ cassandra/trunk/test/unit/org/apache/cassandra/db/KeyCacheTest.java Fri Dec 
23 16:09:05 2011
@@ -20,12 +20,17 @@ package org.apache.cassandra.db;
  * 
  */
 
-
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutionException;
 
+import org.apache.cassandra.cache.KeyCacheKey;
+import org.apache.cassandra.db.filter.QueryFilter;
+import org.apache.cassandra.service.CacheService;
+import org.apache.cassandra.thrift.ColumnParent;
+
+import org.junit.AfterClass;
 import org.junit.Test;
 
 import org.apache.cassandra.CleanupHelper;
@@ -43,18 +48,11 @@ public class KeyCacheTest extends Cleanu
     private static final String TABLE1 = "KeyCacheSpace";
     private static final String COLUMN_FAMILY1 = "Standard1";
     private static final String COLUMN_FAMILY2 = "Standard2";
-    private static final String COLUMN_FAMILY3 = "Standard3";
-
-    @Test
-    public void testKeyCache50() throws IOException, ExecutionException, 
InterruptedException
-    {
-        testKeyCache(COLUMN_FAMILY1, 64);
-    }
 
-    @Test
-    public void testKeyCache100() throws IOException, ExecutionException, 
InterruptedException
+    @AfterClass
+    public static void cleanup()
     {
-        testKeyCache(COLUMN_FAMILY2, 128);
+        cleanupSavedCaches();
     }
 
     @Test
@@ -62,57 +60,48 @@ public class KeyCacheTest extends Cleanu
     {
         CompactionManager.instance.disableAutoCompaction();
 
-        ColumnFamilyStore store = 
Table.open(TABLE1).getColumnFamilyStore(COLUMN_FAMILY3);
+        ColumnFamilyStore store = 
Table.open(TABLE1).getColumnFamilyStore(COLUMN_FAMILY2);
 
         // empty the cache
-        store.invalidateKeyCache();
-        assert store.getKeyCacheSize() == 0;
+        CacheService.instance.invalidateKeyCache();
+        assert CacheService.instance.keyCache.size() == 0;
 
         // insert data and force to disk
-        insertData(TABLE1, COLUMN_FAMILY3, 0, 100);
+        insertData(TABLE1, COLUMN_FAMILY2, 0, 100);
         store.forceBlockingFlush();
 
         // populate the cache
-        readData(TABLE1, COLUMN_FAMILY3, 0, 100);
-        assertEquals(100, store.getKeyCacheSize());
+        readData(TABLE1, COLUMN_FAMILY2, 0, 100);
+        assertEquals(100, CacheService.instance.keyCache.size());
 
         // really? our caches don't implement the map interface? (hence no 
.addAll)
-        Map<Pair<Descriptor, DecoratedKey>, Long> savedMap = new 
HashMap<Pair<Descriptor, DecoratedKey>, Long>();
-        for (Pair<Descriptor, DecoratedKey> k : 
store.getKeyCache().getKeySet())
+        Map<KeyCacheKey, Long> savedMap = new HashMap<KeyCacheKey, Long>();
+        for (KeyCacheKey k : CacheService.instance.keyCache.getKeySet())
         {
-            savedMap.put(k, store.getKeyCache().get(k));
+            savedMap.put(k, CacheService.instance.keyCache.get(k));
         }
 
         // force the cache to disk
-        store.keyCache.submitWrite(Integer.MAX_VALUE).get();
-
-        // empty the cache again to make sure values came from disk
-        store.invalidateKeyCache();
-        assert store.getKeyCacheSize() == 0;
-
-        // load the cache from disk.  unregister the old mbean so we can 
recreate a new CFS object.
-        // but don't invalidate() the old CFS, which would nuke the data we 
want to try to load
-        store.unregisterMBean();
-        ColumnFamilyStore newStore = 
ColumnFamilyStore.createColumnFamilyStore(Table.open(TABLE1), COLUMN_FAMILY3);
-        assertEquals(100, newStore.getKeyCacheSize());
+        CacheService.instance.keyCache.submitWrite(Integer.MAX_VALUE).get();
 
-        assertEquals(100, savedMap.size());
-        for (Map.Entry<Pair<Descriptor, DecoratedKey>, Long> entry : 
savedMap.entrySet())
-        {
-            assert 
newStore.getKeyCache().get(entry.getKey()).equals(entry.getValue());
-        }
+        CacheService.instance.invalidateKeyCache();
+        assert CacheService.instance.keyCache.size() == 0;
     }
 
-    public void testKeyCache(String cfName, int expectedCacheSize) throws 
IOException, ExecutionException, InterruptedException
+    @Test
+    public void testKeyCache() throws IOException, ExecutionException, 
InterruptedException
     {
         CompactionManager.instance.disableAutoCompaction();
 
         Table table = Table.open(TABLE1);
-        ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
+        ColumnFamilyStore cfs = table.getColumnFamilyStore(COLUMN_FAMILY1);
+
+        // just to make sure that everything is clean
+        CacheService.instance.invalidateKeyCache();
 
-        // KeyCache should start at size 1 if we're caching X% of zero data.
-        int keyCacheSize = cfs.getKeyCacheCapacity();
-        assert keyCacheSize == 1 : keyCacheSize;
+        // KeyCache should start at size 0 if we're caching X% of zero data.
+        int keyCacheSize = CacheService.instance.keyCache.size();
+        assert keyCacheSize == 0 : keyCacheSize;
 
         DecoratedKey key1 = Util.dk("key1");
         DecoratedKey key2 = Util.dk("key2");
@@ -120,28 +109,53 @@ public class KeyCacheTest extends Cleanu
 
         // inserts
         rm = new RowMutation(TABLE1, key1.key);
-        rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes("1")), 
ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
-        rm.apply();
-        rm = new RowMutation(TABLE1, key2.key);
-        rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes("2")), 
ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
-        rm.apply();
-
-        // deletes
-        rm = new RowMutation(TABLE1, key1.key);
-        rm.delete(new QueryPath(cfName, null, ByteBufferUtil.bytes("1")), 1);
+        rm.add(new QueryPath(COLUMN_FAMILY1, null, ByteBufferUtil.bytes("1")), 
ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
         rm.apply();
         rm = new RowMutation(TABLE1, key2.key);
-        rm.delete(new QueryPath(cfName, null, ByteBufferUtil.bytes("2")), 1);
+        rm.add(new QueryPath(COLUMN_FAMILY1, null, ByteBufferUtil.bytes("2")), 
ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
         rm.apply();
 
-        // After a flush, the cache should expand to be X% of indices * 
INDEX_INTERVAL.
+        // to make sure we have SSTable
         cfs.forceBlockingFlush();
-        keyCacheSize = cfs.getKeyCacheCapacity();
-        assert keyCacheSize == expectedCacheSize : keyCacheSize;
 
-        // After a compaction, the cache should expand to be X% of zero data.
+        // reads to cache key position
+        cfs.getColumnFamily(QueryFilter.getSliceFilter(key1,
+                                                       new QueryPath(new 
ColumnParent(COLUMN_FAMILY1)),
+                                                       
ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       
ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       false,
+                                                       10));
+
+        cfs.getColumnFamily(QueryFilter.getSliceFilter(key2,
+                                                       new QueryPath(new 
ColumnParent(COLUMN_FAMILY1)),
+                                                       
ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       
ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       false,
+                                                       10));
+
+        assert CacheService.instance.keyCache.size() == 2;
+
         Util.compactAll(cfs).get();
-        keyCacheSize = cfs.getKeyCacheCapacity();
-        assert keyCacheSize == 1 : keyCacheSize;
+        keyCacheSize = CacheService.instance.keyCache.size();
+        // after compaction cache should have entries for
+        // new SSTables, if we had 2 keys in cache previously it should become 
4
+        assert keyCacheSize == 4 : keyCacheSize;
+
+        // re-read same keys to verify that key cache didn't grow further
+        cfs.getColumnFamily(QueryFilter.getSliceFilter(key1,
+                                                       new QueryPath(new 
ColumnParent(COLUMN_FAMILY1)),
+                                                       
ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       
ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       false,
+                                                       10));
+
+        cfs.getColumnFamily(QueryFilter.getSliceFilter(key2,
+                                                       new QueryPath(new 
ColumnParent(COLUMN_FAMILY1)),
+                                                       
ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       
ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       false,
+                                                       10));
+
+        assert CacheService.instance.keyCache.size() == 4;
     }
 }

Modified: cassandra/trunk/test/unit/org/apache/cassandra/db/RowCacheTest.java
URL: 
http://svn.apache.org/viewvc/cassandra/trunk/test/unit/org/apache/cassandra/db/RowCacheTest.java?rev=1222715&r1=1222714&r2=1222715&view=diff
==============================================================================
--- cassandra/trunk/test/unit/org/apache/cassandra/db/RowCacheTest.java 
(original)
+++ cassandra/trunk/test/unit/org/apache/cassandra/db/RowCacheTest.java Fri Dec 
23 16:09:05 2011
@@ -19,21 +19,27 @@
 package org.apache.cassandra.db;
 
 import java.util.Collection;
-import java.util.Set;
 
+import org.junit.AfterClass;
 import org.junit.Test;
 
 import org.apache.cassandra.CleanupHelper;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.db.filter.QueryPath;
 import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.db.filter.QueryPath;
 
 public class RowCacheTest extends CleanupHelper
 {
     private String KEYSPACE = "RowCacheSpace";
-    private String COLUMN_FAMILY_WITH_CACHE = "CachedCF";
-    private String COLUMN_FAMILY_WITHOUT_CACHE = "CFWithoutCache";
+    private String COLUMN_FAMILY = "CachedCF";
+
+    @AfterClass
+    public static void cleanup()
+    {
+        cleanupSavedCaches();
+    }
 
     @Test
     public void testRowCache() throws Exception
@@ -41,24 +47,25 @@ public class RowCacheTest extends Cleanu
         CompactionManager.instance.disableAutoCompaction();
 
         Table table = Table.open(KEYSPACE);
-        ColumnFamilyStore cachedStore  = 
table.getColumnFamilyStore(COLUMN_FAMILY_WITH_CACHE);
-        ColumnFamilyStore noCacheStore = 
table.getColumnFamilyStore(COLUMN_FAMILY_WITHOUT_CACHE);
+        ColumnFamilyStore cachedStore  = 
table.getColumnFamilyStore(COLUMN_FAMILY);
 
         // empty the row cache
-        cachedStore.invalidateRowCache();
+        CacheService.instance.invalidateRowCache();
+
+        // set global row cache size to 1 MB
+        CacheService.instance.setRowCacheCapacityInMB(1);
 
         // inserting 100 rows into both column families
-        insertData(KEYSPACE, COLUMN_FAMILY_WITH_CACHE, 0, 100);
-        insertData(KEYSPACE, COLUMN_FAMILY_WITHOUT_CACHE, 0, 100);
+        insertData(KEYSPACE, COLUMN_FAMILY, 0, 100);
 
         // now reading rows one by one and checking if row change grows
         for (int i = 0; i < 100; i++)
         {
             DecoratedKey key = Util.dk("key" + i);
-            QueryPath path = new QueryPath(COLUMN_FAMILY_WITH_CACHE, null, 
ByteBufferUtil.bytes("col" + i));
+            QueryPath path = new QueryPath(COLUMN_FAMILY, null, 
ByteBufferUtil.bytes("col" + i));
 
             cachedStore.getColumnFamily(key, path, 
ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1);
-            assert cachedStore.getRowCacheSize() == i + 1;
+            assert CacheService.instance.rowCache.size() == i + 1;
             assert cachedStore.getRawCachedRow(key) != null; // current key 
should be stored in the cache
 
             // checking if column is read correctly after cache
@@ -70,24 +77,17 @@ public class RowCacheTest extends Cleanu
             assert columns.size() == 1;
             assert column.name().equals(ByteBufferUtil.bytes("col" + i));
             assert column.value().equals(ByteBufferUtil.bytes("val" + i));
-
-            path = new QueryPath(COLUMN_FAMILY_WITHOUT_CACHE, null, 
ByteBufferUtil.bytes("col" + i));
-
-            // row cache should not get populated for the second store
-            noCacheStore.getColumnFamily(key, path, 
ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1);
-            assert noCacheStore.getRowCacheSize() == 0;
         }
 
-        // insert 10 more keys and check that row cache is still at 
store.getRowCacheCapacity()
-        insertData(KEYSPACE, COLUMN_FAMILY_WITH_CACHE, 100, 10);
+        // insert 10 more keys
+        insertData(KEYSPACE, COLUMN_FAMILY, 100, 10);
 
         for (int i = 100; i < 110; i++)
         {
             DecoratedKey key = Util.dk("key" + i);
-            QueryPath path = new QueryPath(COLUMN_FAMILY_WITH_CACHE, null, 
ByteBufferUtil.bytes("col" + i));
+            QueryPath path = new QueryPath(COLUMN_FAMILY, null, 
ByteBufferUtil.bytes("col" + i));
 
             cachedStore.getColumnFamily(key, path, 
ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1);
-            assert cachedStore.getRowCacheSize() == 
cachedStore.getRowCacheCapacity();
             assert cachedStore.getRawCachedRow(key) != null; // cache should 
be populated with the latest rows read (old ones should be popped)
 
             // checking if column is read correctly after cache
@@ -101,85 +101,58 @@ public class RowCacheTest extends Cleanu
             assert column.value().equals(ByteBufferUtil.bytes("val" + i));
         }
 
-        // clear all 100 rows from the cache
-        int keysLeft = 99;
+        // clear 100 rows from the cache
+        int keysLeft = 109;
         for (int i = 109; i >= 10; i--)
         {
             cachedStore.invalidateCachedRow(Util.dk("key" + i));
-            assert cachedStore.getRowCacheSize() == keysLeft;
+            assert CacheService.instance.rowCache.size() == keysLeft;
             keysLeft--;
         }
+
+        CacheService.instance.setRowCacheCapacityInMB(0);
     }
 
     @Test
     public void testRowCacheLoad() throws Exception
     {
-        rowCacheLoad(100, 100, Integer.MAX_VALUE, false);
+        CacheService.instance.setRowCacheCapacityInMB(1);
+        rowCacheLoad(100, Integer.MAX_VALUE, false);
+        CacheService.instance.setRowCacheCapacityInMB(0);
     }
 
     @Test
     public void testRowCachePartialLoad() throws Exception
     {
-        rowCacheLoad(100, 50, 50, false);
+        CacheService.instance.setRowCacheCapacityInMB(1);
+        rowCacheLoad(100, 50, true);
+        CacheService.instance.setRowCacheCapacityInMB(0);
     }
 
-    @Test
-    public void testRowCacheCapacityLoad() throws Exception
-    {
-        // 60 is default from DatabaseDescriptor
-        rowCacheLoad(100, 60, Integer.MAX_VALUE, true);
-    }
-
-
-    public void rowCacheLoad(int totalKeys, int expectedKeys, int keysToSave, 
boolean reduceLoadCapacity) throws Exception
+    public void rowCacheLoad(int totalKeys, int keysToSave, boolean 
reduceLoadCapacity) throws Exception
     {
         CompactionManager.instance.disableAutoCompaction();
 
-        ColumnFamilyStore store = 
Table.open(KEYSPACE).getColumnFamilyStore(COLUMN_FAMILY_WITH_CACHE);
+        ColumnFamilyStore store = 
Table.open(KEYSPACE).getColumnFamilyStore(COLUMN_FAMILY);
 
         // empty the cache
-        store.invalidateRowCache();
-        assert store.getRowCacheSize() == 0;
+        CacheService.instance.invalidateRowCache();
+        assert CacheService.instance.rowCache.size() == 0;
 
         // insert data and fill the cache
-        insertData(KEYSPACE, COLUMN_FAMILY_WITH_CACHE, 0, totalKeys);
-        readData(KEYSPACE, COLUMN_FAMILY_WITH_CACHE, 0, totalKeys);
-        assert store.getRowCacheSize() == totalKeys;
+        insertData(KEYSPACE, COLUMN_FAMILY, 0, totalKeys);
+        readData(KEYSPACE, COLUMN_FAMILY, 0, totalKeys);
+        assert CacheService.instance.rowCache.size() == totalKeys;
 
         // force the cache to disk
-        store.rowCache.submitWrite(keysToSave).get();
+        CacheService.instance.rowCache.submitWrite(keysToSave).get();
 
         if (reduceLoadCapacity)
-            store.reduceCacheSizes();
+            CacheService.instance.reduceRowCacheSize();
 
         // empty the cache again to make sure values came from disk
-        store.invalidateRowCache();
-        assert store.getRowCacheSize() == 0;
-
-        // load the cache from disk
-        store.initCaches();
-        assert store.getRowCacheSize() == expectedKeys;
-
-        // If we are loading less than the entire cache back, we can't
-        // be sure which rows we will get if all rows are equally hot.
-        int nulls = 0;
-        int nonNull = 0;
-        for (int i = 0; i < expectedKeys; i++)
-        {
-            // verify the correct data was found when we expect to get
-            // back the entire cache.  Otherwise only make assertions
-            // about how many items are read back.
-            ColumnFamily row = store.getRawCachedRow(Util.dk("key" + i));
-            if (expectedKeys == totalKeys)
-            {
-                assert row != null;
-                assert row.getColumn(ByteBufferUtil.bytes("col" + 
i)).value().equals(ByteBufferUtil.bytes("val" + i));
-            }
-            if (row == null)
-                nulls++;
-            else
-                nonNull++;
-        }
-        assert nulls + nonNull == expectedKeys;
+        CacheService.instance.invalidateRowCache();
+        assert CacheService.instance.rowCache.size() == 0;
+        assert CacheService.instance.rowCache.readSaved(KEYSPACE, 
COLUMN_FAMILY).size() == (keysToSave == Integer.MAX_VALUE ? totalKeys : 
keysToSave);
     }
 }

Modified: 
cassandra/trunk/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
URL: 
http://svn.apache.org/viewvc/cassandra/trunk/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java?rev=1222715&r1=1222714&r2=1222715&view=diff
==============================================================================
--- 
cassandra/trunk/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
 (original)
+++ 
cassandra/trunk/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
 Fri Dec 23 16:09:05 2011
@@ -40,6 +40,7 @@ import org.apache.cassandra.db.filter.Qu
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.io.util.FileDataInput;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.MmappedSegmentedFile;
@@ -167,7 +168,7 @@ public class SSTableReaderTest extends C
     {
         Table table = Table.open("Keyspace1");
         ColumnFamilyStore store = table.getColumnFamilyStore("Standard2");
-        store.getKeyCache().setCapacity(100);
+        CacheService.instance.keyCache.setCapacity(100);
 
         // insert data and compact to a single sstable
         CompactionManager.instance.disableAutoCompaction();


Reply via email to