Author: jbellis Date: Tue Jul 27 04:02:04 2010 New Revision: 979516 URL: http://svn.apache.org/viewvc?rev=979516&view=rev Log: fix commitlog tests post-1135. patch by mdennis; reviewed by jbellis for CASSANDRA-1318
Modified: cassandra/trunk/test/unit/org/apache/cassandra/db/CommitLogTest.java cassandra/trunk/test/unit/org/apache/cassandra/db/RecoveryManager2Test.java Modified: cassandra/trunk/test/unit/org/apache/cassandra/db/CommitLogTest.java URL: http://svn.apache.org/viewvc/cassandra/trunk/test/unit/org/apache/cassandra/db/CommitLogTest.java?rev=979516&r1=979515&r2=979516&view=diff ============================================================================== --- cassandra/trunk/test/unit/org/apache/cassandra/db/CommitLogTest.java (original) +++ cassandra/trunk/test/unit/org/apache/cassandra/db/CommitLogTest.java Tue Jul 27 04:02:04 2010 @@ -20,7 +20,6 @@ package org.apache.cassandra.db; import java.io.*; -import java.util.concurrent.ExecutionException; import java.util.zip.CRC32; import java.util.zip.Checksum; @@ -28,7 +27,6 @@ import org.junit.Test; import org.apache.cassandra.CleanupHelper; import org.apache.cassandra.db.commitlog.CommitLog; - import org.apache.cassandra.db.commitlog.CommitLogHeader; import org.apache.cassandra.db.filter.QueryPath; import org.apache.cassandra.utils.Pair; @@ -36,18 +34,21 @@ import org.apache.cassandra.utils.Pair; public class CommitLogTest extends CleanupHelper { @Test - public void testCleanup() throws IOException, ExecutionException, InterruptedException + public void testCleanup() throws Exception { - assert CommitLog.instance().getSegmentCount() == 1; - CommitLog.setSegmentSize(1000); + int segmentCount = CommitLog.instance().getSegmentCount(); + assert segmentCount == 1 : segmentCount + " != 1"; + + //must me large enough to hold persistent_stats + CommitLog.setSegmentSize(10000); Table table = Table.open("Keyspace1"); ColumnFamilyStore store1 = table.getColumnFamilyStore("Standard1"); ColumnFamilyStore store2 = table.getColumnFamilyStore("Standard2"); RowMutation rm; - byte[] value = new byte[501]; + byte[] value = new byte[5001]; - // add data. use relatively large values to force quick segment creation since we have a low flush threshold in the test config. + // add data, one each of Standard1/Standard2 per segment for (int i = 0; i < 10; i++) { rm = new RowMutation("Keyspace1", "key1".getBytes()); @@ -59,11 +60,13 @@ public class CommitLogTest extends Clean // nothing should get removed after flushing just Standard1 store1.forceBlockingFlush(); - assert CommitLog.instance().getSegmentCount() > 1; + segmentCount = CommitLog.instance().getSegmentCount(); + assert segmentCount > 1 : segmentCount + " !> 1"; // after flushing Standard2 we should be able to clean out all segments store2.forceBlockingFlush(); - assert CommitLog.instance().getSegmentCount() == 1; + segmentCount = CommitLog.instance().getSegmentCount(); + assert segmentCount == 1 : segmentCount + " != 1"; } @Test Modified: cassandra/trunk/test/unit/org/apache/cassandra/db/RecoveryManager2Test.java URL: http://svn.apache.org/viewvc/cassandra/trunk/test/unit/org/apache/cassandra/db/RecoveryManager2Test.java?rev=979516&r1=979515&r2=979516&view=diff ============================================================================== --- cassandra/trunk/test/unit/org/apache/cassandra/db/RecoveryManager2Test.java (original) +++ cassandra/trunk/test/unit/org/apache/cassandra/db/RecoveryManager2Test.java Tue Jul 27 04:02:04 2010 @@ -22,21 +22,19 @@ package org.apache.cassandra.db; import java.io.IOException; -import java.util.concurrent.ExecutionException; - -import org.apache.cassandra.Util; import org.junit.Test; +import static org.apache.cassandra.Util.column; import org.apache.cassandra.CleanupHelper; +import org.apache.cassandra.Util; import org.apache.cassandra.db.commitlog.CommitLog; -import static org.apache.cassandra.Util.column; - public class RecoveryManager2Test extends CleanupHelper { @Test - public void testWithFlush() throws IOException, ExecutionException, InterruptedException + /* test that commit logs do not replay flushed data */ + public void testWithFlush() throws Exception { CompactionManager.instance.disableAutoCompaction(); @@ -50,9 +48,19 @@ public class RecoveryManager2Test extend ColumnFamilyStore cfs = table1.getColumnFamilyStore("Standard1"); cfs.forceBlockingFlush(); + // forceBlockingFlush above adds persistent stats to the current commit log segment + // it ends up in the same segment as key99 meaning that segment still has unwritten data + // thus the commit log replays it when recover is called below + Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(StatisticsTable.STATISTICS_CF).forceBlockingFlush(); + + // remove all SSTable/MemTables cfs.clearUnsafe(); - CommitLog.recover(); // this is a no-op. is testing this useful? + // replay the commit log (nothing should be replayed since everything was flushed) + CommitLog.recover(); + + // since everything that was flushed was removed (i.e. clearUnsafe) + // and the commit shouldn't have replayed anything, there should be no data assert Util.getRangeSlice(cfs).isEmpty(); }