This is an automated email from the ASF dual-hosted git repository.

jolynch pushed a commit to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 1fce84f9833bd62227dbf8f5d063935457dbc18e
Merge: cd73c14 1911a88
Author: Joseph Lynch <joe.e.ly...@gmail.com>
AuthorDate: Thu Dec 9 10:24:19 2021 -0500

    Merge branch 'cassandra-3.0' into cassandra-3.11

 CHANGES.txt                                        |   1 +
 conf/cassandra.yaml                                |   7 +-
 .../apache/cassandra/cache/AutoSavingCache.java    |   8 +-
 src/java/org/apache/cassandra/config/Config.java   |   2 +
 .../cassandra/config/DatabaseDescriptor.java       |  11 ++
 .../org/apache/cassandra/db/lifecycle/View.java    |   2 +-
 .../org/apache/cassandra/service/CacheService.java |  40 ++++--
 .../test/microbench/CacheLoaderBench.java          | 137 ++++++++++++++++++++
 .../unit/org/apache/cassandra/db/KeyCacheTest.java | 138 ++++++++++++++++++++-
 9 files changed, 332 insertions(+), 14 deletions(-)

diff --cc CHANGES.txt
index 6bda91e,085e1ff..6c70235
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,16 -1,5 +1,17 @@@
 -3.0.26:
 +3.11.12
 + * Upgrade snakeyaml to 1.26 in 3.11 (CASSANDRA=17028)
 + * Add key validation to ssstablescrub (CASSANDRA-16969)
 + * Update Jackson from 2.9.10 to 2.12.5 (CASSANDRA-16851)
 + * Include SASI components to snapshots (CASSANDRA-15134)
 + * Make assassinate more resilient to missing tokens (CASSANDRA-16847)
 + * Exclude Jackson 1.x transitive dependency of hadoop* provided dependencies 
(CASSANDRA-16854)
 + * Validate SASI tokenizer options before adding index to schema 
(CASSANDRA-15135)
 + * Fixup scrub output when no data post-scrub and clear up old use of row, 
which really means partition (CASSANDRA-16835)
 + * Fix ant-junit dependency issue (CASSANDRA-16827)
 + * Reduce thread contention in CommitLogSegment and HintsBuffer 
(CASSANDRA-16072)
 + * Avoid sending CDC column if not enabled (CASSANDRA-16770)
 +Merged from 3.0:
+  * Fix slow keycache load which blocks startup for tables with many sstables 
(CASSANDRA-14898)
   * Fix rare NPE caused by batchlog replay / node decomission races 
(CASSANDRA-17049)
   * Allow users to view permissions of the roles they created (CASSANDRA-16902)
   * Fix failure handling in inter-node communication (CASSANDRA-16334)
diff --cc test/unit/org/apache/cassandra/db/KeyCacheTest.java
index ada6b5b,9cb06b9..f31df18
--- a/test/unit/org/apache/cassandra/db/KeyCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/KeyCacheTest.java
@@@ -17,33 -17,49 +17,49 @@@
   */
  package org.apache.cassandra.db;
  
 +import java.io.IOException;
+ import java.util.ArrayList;
  import java.util.Collection;
+ import java.util.Collections;
  import java.util.HashMap;
  import java.util.Iterator;
+ import java.util.List;
  import java.util.Map;
  import java.util.Set;
  import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.TimeUnit;
  
  import com.google.common.collect.ImmutableList;
 -import com.google.common.util.concurrent.Uninterruptibles;
  import org.junit.AfterClass;
  import org.junit.BeforeClass;
  import org.junit.Test;
  
  import org.apache.cassandra.SchemaLoader;
  import org.apache.cassandra.Util;
+ import org.apache.cassandra.cache.AutoSavingCache;
+ import org.apache.cassandra.cache.ICache;
  import org.apache.cassandra.cache.KeyCacheKey;
 -import org.apache.cassandra.config.CFMetaData;
  import org.apache.cassandra.config.DatabaseDescriptor;
 -import org.apache.cassandra.config.Schema;
  import org.apache.cassandra.db.compaction.OperationType;
  import org.apache.cassandra.db.compaction.CompactionManager;
  import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
  import org.apache.cassandra.exceptions.ConfigurationException;
  import org.apache.cassandra.io.sstable.format.SSTableReader;
+ import org.apache.cassandra.io.util.DataInputPlus;
  import org.apache.cassandra.schema.KeyspaceParams;
  import org.apache.cassandra.service.CacheService;
+ import org.apache.cassandra.utils.Pair;
  import org.apache.cassandra.utils.concurrent.Refs;
++import org.hamcrest.Matchers;
+ import org.mockito.Mockito;
+ import org.mockito.internal.stubbing.answers.AnswersWithDelay;
  
  import static org.junit.Assert.assertEquals;
 -import static org.junit.Assert.assertTrue;
++import static org.junit.Assert.assertNotEquals;
++import static org.junit.Assert.assertThat;
+ import static org.mockito.ArgumentMatchers.any;
+ import static org.mockito.Mockito.doAnswer;
+ import static org.mockito.Mockito.mock;
  
  public class KeyCacheTest
  {
@@@ -51,9 -68,11 +68,14 @@@
      private static final String COLUMN_FAMILY1 = "Standard1";
      private static final String COLUMN_FAMILY2 = "Standard2";
      private static final String COLUMN_FAMILY3 = "Standard3";
 +    private static final String COLUMN_FAMILY4 = "Standard4";
 +    private static final String COLUMN_FAMILY5 = "Standard5";
 +    private static final String COLUMN_FAMILY6 = "Standard6";
+     private static final String COLUMN_FAMILY7 = "Standard7";
+     private static final String COLUMN_FAMILY8 = "Standard8";
+     private static final String COLUMN_FAMILY9 = "Standard9";
+ 
+     private static final String COLUMN_FAMILY_K2_1 = "Standard1";
  
  
      @BeforeClass
@@@ -65,9 -84,14 +87,17 @@@
                                      SchemaLoader.standardCFMD(KEYSPACE1, 
COLUMN_FAMILY1),
                                      SchemaLoader.standardCFMD(KEYSPACE1, 
COLUMN_FAMILY2),
                                      SchemaLoader.standardCFMD(KEYSPACE1, 
COLUMN_FAMILY3),
 +                                    SchemaLoader.standardCFMD(KEYSPACE1, 
COLUMN_FAMILY4),
 +                                    SchemaLoader.standardCFMD(KEYSPACE1, 
COLUMN_FAMILY5),
-                                     SchemaLoader.standardCFMD(KEYSPACE1, 
COLUMN_FAMILY6));
++                                    SchemaLoader.standardCFMD(KEYSPACE1, 
COLUMN_FAMILY6),
+                                     SchemaLoader.standardCFMD(KEYSPACE1, 
COLUMN_FAMILY7),
+                                     SchemaLoader.standardCFMD(KEYSPACE1, 
COLUMN_FAMILY8),
+                                     SchemaLoader.standardCFMD(KEYSPACE1, 
COLUMN_FAMILY9));
+ 
+         SchemaLoader.createKeyspace(KEYSPACE2,
+                                     KeyspaceParams.simple(1),
+                                     SchemaLoader.standardCFMD(KEYSPACE2, 
COLUMN_FAMILY_K2_1));
+ 
      }
  
      @AfterClass
@@@ -305,9 -255,115 +335,115 @@@
          Util.getAll(Util.cmd(cfs, "key1").build());
          Util.getAll(Util.cmd(cfs, "key2").build());
  
 -        assertKeyCacheSize(noEarlyOpen ? 4 : 2, KEYSPACE1, COLUMN_FAMILY1);
 +        assertKeyCacheSize(noEarlyOpen ? 4 : 2, KEYSPACE1, cf);
      }
  
+     @Test
+     public void testKeyCacheLoadNegativeCacheLoadTime() throws Exception
+     {
+         DatabaseDescriptor.setCacheLoadTimeout(-1);
+         String cf = COLUMN_FAMILY7;
+ 
+         
createAndInvalidateCache(Collections.singletonList(Pair.create(KEYSPACE1, cf)), 
100);
+ 
+         CacheService.instance.keyCache.loadSaved();
+ 
+         // Here max time to load cache is negative which means no time left 
to load cache. So the keyCache size should
+         // be zero after loadSaved().
+         assertKeyCacheSize(0, KEYSPACE1, cf);
+         assertEquals(0, CacheService.instance.keyCache.size());
+     }
+ 
+     @Test
+     public void testKeyCacheLoadTwoTablesTime() throws Exception
+     {
+         DatabaseDescriptor.setCacheLoadTimeout(60);
+         String columnFamily1 = COLUMN_FAMILY8;
+         String columnFamily2 = COLUMN_FAMILY_K2_1;
+         int numberOfRows = 100;
+         List<Pair<String, String>> tables = new ArrayList<>(2);
+         tables.add(Pair.create(KEYSPACE1, columnFamily1));
+         tables.add(Pair.create(KEYSPACE2, columnFamily2));
+ 
+         createAndInvalidateCache(tables, numberOfRows);
+ 
+         CacheService.instance.keyCache.loadSaved();
+ 
+         // Here max time to load cache is negative which means no time left 
to load cache. So the keyCache size should
+         // be zero after load.
+         assertKeyCacheSize(numberOfRows, KEYSPACE1, columnFamily1);
+         assertKeyCacheSize(numberOfRows, KEYSPACE2, columnFamily2);
+         assertEquals(numberOfRows * tables.size(), 
CacheService.instance.keyCache.size());
+     }
+ 
+     @SuppressWarnings({ "unchecked", "rawtypes" })
+     @Test
+     public void testKeyCacheLoadCacheLoadTimeExceedingLimit() throws Exception
+     {
+         DatabaseDescriptor.setCacheLoadTimeout(2);
+         int delayMillis = 1000;
+         int numberOfRows = 100;
+ 
+         String cf = COLUMN_FAMILY9;
+ 
+         
createAndInvalidateCache(Collections.singletonList(Pair.create(KEYSPACE1, cf)), 
numberOfRows);
+ 
+         // Testing cache load. Here using custom built AutoSavingCache 
instance as simulating delay is not possible with
+         // 'CacheService.instance.keyCache'. 'AutoSavingCache.loadSaved()' is 
returning no.of entries loaded so we don't need
+         // to instantiate ICache.class.
+         CacheService.KeyCacheSerializer keyCacheSerializer = new 
CacheService.KeyCacheSerializer();
+         CacheService.KeyCacheSerializer keyCacheSerializerSpy = 
Mockito.spy(keyCacheSerializer);
+         AutoSavingCache autoSavingCache = new 
AutoSavingCache(mock(ICache.class),
+                                                               
CacheService.CacheType.KEY_CACHE,
+                                                               
keyCacheSerializerSpy);
+ 
+         doAnswer(new AnswersWithDelay(delayMillis, answer -> 
keyCacheSerializer.deserialize(answer.getArgument(0),
+                                                                               
              answer.getArgument(1)) ))
+                
.when(keyCacheSerializerSpy).deserialize(any(DataInputPlus.class), 
any(ColumnFamilyStore.class));
+ 
+         long maxExpectedKeyCache = Math.min(numberOfRows,
+                                             1 + 
TimeUnit.SECONDS.toMillis(DatabaseDescriptor.getCacheLoadTimeout()) / 
delayMillis);
+ 
+         long keysLoaded = autoSavingCache.loadSaved();
 -        assertTrue(keysLoaded < maxExpectedKeyCache);
 -        assertTrue(0 != keysLoaded);
++        assertThat(keysLoaded, 
Matchers.lessThanOrEqualTo(maxExpectedKeyCache));
++        assertNotEquals(0, keysLoaded);
+         Mockito.verify(keyCacheSerializerSpy, 
Mockito.times(1)).cleanupAfterDeserialize();
+     }
+ 
+     private void createAndInvalidateCache(List<Pair<String, String>> tables, 
int numberOfRows) throws ExecutionException, InterruptedException
+     {
+         CompactionManager.instance.disableAutoCompaction();
+ 
+         // empty the cache
+         CacheService.instance.invalidateKeyCache();
+         assertEquals(0, CacheService.instance.keyCache.size());
+ 
+         for(Pair<String, String> entry : tables)
+         {
+             String keyspace = entry.left;
+             String cf = entry.right;
+             ColumnFamilyStore store = 
Keyspace.open(keyspace).getColumnFamilyStore(cf);
+ 
+             // insert data and force to disk
+             SchemaLoader.insertData(keyspace, cf, 0, numberOfRows);
+             store.forceBlockingFlush();
+         }
+         for(Pair<String, String> entry : tables)
+         {
+             String keyspace = entry.left;
+             String cf = entry.right;
+             // populate the cache
+             readData(keyspace, cf, 0, numberOfRows);
+             assertKeyCacheSize(numberOfRows, keyspace, cf);
+         }
+ 
+         // force the cache to disk
+         
CacheService.instance.keyCache.submitWrite(CacheService.instance.keyCache.size()).get();
+ 
+         CacheService.instance.invalidateKeyCache();
+         assertEquals(0, CacheService.instance.keyCache.size());
+     }
+ 
      private static void readData(String keyspace, String columnFamily, int 
startRow, int numberOfRows)
      {
          ColumnFamilyStore store = 
Keyspace.open(keyspace).getColumnFamilyStore(columnFamily);

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to