Repository: hbase
Updated Branches:
  refs/heads/master 14fb57cab -> aace02a23


HBASE-17782 Extend IdReadWriteLock to support using both weak and soft reference


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aace02a2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aace02a2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aace02a2

Branch: refs/heads/master
Commit: aace02a230a61cc7e91eb240598435c36c9af403
Parents: 14fb57c
Author: Yu Li <l...@apache.org>
Authored: Wed Mar 15 11:07:42 2017 +0800
Committer: Yu Li <l...@apache.org>
Committed: Wed Mar 15 11:07:42 2017 +0800

----------------------------------------------------------------------
 .../hbase/io/hfile/bucket/BucketCache.java      |  5 +-
 .../hadoop/hbase/util/IdReadWriteLock.java      | 58 ++++++++++++++++----
 .../hbase/wal/RegionGroupingProvider.java       | 13 +++--
 .../hadoop/hbase/util/TestIdReadWriteLock.java  | 31 +++++++++--
 4 files changed, 86 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/aace02a2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index cb23ca9..3e9c376 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.IdReadWriteLock;
+import org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
 import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -185,9 +186,11 @@ public class BucketCache implements BlockCache, HeapSize {
   /**
    * A ReentrantReadWriteLock to lock on a particular block identified by 
offset.
    * The purpose of this is to avoid freeing the block which is being read.
+   * <p>
+   * Key set of offsets in BucketCache is limited so soft reference is the 
best choice here.
    */
   @VisibleForTesting
-  final IdReadWriteLock offsetLock = new IdReadWriteLock();
+  final IdReadWriteLock offsetLock = new IdReadWriteLock(ReferenceType.SOFT);
 
   private final NavigableSet<BlockCacheKey> blocksByHFile =
       new ConcurrentSkipListSet<>(new Comparator<BlockCacheKey>() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/aace02a2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
index deb2265..2a83029 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import java.lang.ref.Reference;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -44,16 +45,48 @@ import com.google.common.annotations.VisibleForTesting;
 public class IdReadWriteLock {
   // The number of lock we want to easily support. It's not a maximum.
   private static final int NB_CONCURRENT_LOCKS = 1000;
-  // The pool to get entry from, entries are mapped by soft reference and will 
be
-  // automatically garbage-collected when JVM memory pressure is high
-  private final ObjectPool<Long, ReentrantReadWriteLock> lockPool =
-      new SoftObjectPool<>(
-          new ObjectPool.ObjectFactory<Long, ReentrantReadWriteLock>() {
-            @Override
-            public ReentrantReadWriteLock createObject(Long id) {
-              return new ReentrantReadWriteLock();
-            }
-          }, NB_CONCURRENT_LOCKS);
+  /**
+   * The pool to get entry from, entries are mapped by {@link Reference} and 
will be automatically
+   * garbage-collected by JVM
+   */
+  private final ObjectPool<Long, ReentrantReadWriteLock> lockPool;
+  private final ReferenceType refType;
+
+  public IdReadWriteLock() {
+    this(ReferenceType.WEAK);
+  }
+
+  /**
+   * Constructor of IdReadWriteLock
+   * @param referenceType type of the reference used in lock pool, {@link 
ReferenceType#WEAK} by
+   *          default. Use {@link ReferenceType#SOFT} if the key set is 
limited and the locks will
+   *          be reused with a high frequency
+   */
+  public IdReadWriteLock(ReferenceType referenceType) {
+    this.refType = referenceType;
+    switch (referenceType) {
+    case SOFT:
+      lockPool = new SoftObjectPool<>(new ObjectPool.ObjectFactory<Long, 
ReentrantReadWriteLock>() {
+        @Override
+        public ReentrantReadWriteLock createObject(Long id) {
+          return new ReentrantReadWriteLock();
+        }
+      }, NB_CONCURRENT_LOCKS);
+      break;
+    case WEAK:
+    default:
+      lockPool = new WeakObjectPool<>(new ObjectPool.ObjectFactory<Long, 
ReentrantReadWriteLock>() {
+        @Override
+        public ReentrantReadWriteLock createObject(Long id) {
+          return new ReentrantReadWriteLock();
+        }
+      }, NB_CONCURRENT_LOCKS);
+    }
+  }
+
+  public static enum ReferenceType {
+    WEAK, SOFT
+  }
 
   /**
    * Get the ReentrantReadWriteLock corresponding to the given id
@@ -93,4 +126,9 @@ public class IdReadWriteLock {
       Thread.sleep(50);
     }
   }
+
+  @VisibleForTesting
+  public ReferenceType getReferenceType() {
+    return this.refType;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/aace02a2/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
index dee36e8..5a29731 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
@@ -27,7 +27,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.locks.Lock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -36,7 +35,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 // imports for classes still in regionserver.wal
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.IdReadWriteLock;
+import org.apache.hadoop.hbase.util.IdLock;
 
 /**
  * A WAL Provider that returns a WAL per group of regions.
@@ -132,7 +131,7 @@ public class RegionGroupingProvider implements WALProvider {
   /** A group-provider mapping, make sure one-one rather than many-one mapping 
*/
   private final ConcurrentMap<String, WALProvider> cached = new 
ConcurrentHashMap<>();
 
-  private final IdReadWriteLock createLock = new IdReadWriteLock();
+  private final IdLock createLock = new IdLock();
 
   private RegionGroupingStrategy strategy = null;
   private WALFactory factory = null;
@@ -181,16 +180,18 @@ public class RegionGroupingProvider implements 
WALProvider {
   private WAL getWAL(final String group) throws IOException {
     WALProvider provider = cached.get(group);
     if (provider == null) {
-      Lock lock = createLock.getLock(group.hashCode()).writeLock();
-      lock.lock();
+      IdLock.Entry lockEntry = null;
       try {
+        lockEntry = createLock.getLockEntry(group.hashCode());
         provider = cached.get(group);
         if (provider == null) {
           provider = createProvider(group);
           cached.put(group, provider);
         }
       } finally {
-        lock.unlock();
+        if (lockEntry != null) {
+          createLock.releaseLockEntry(lockEntry);
+        }
       }
     }
     return provider.getWAL(null, null);

http://git-wip-us.apache.org/repos/asf/hbase/blob/aace02a2/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java
index 295816f..7dd2a63 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.util;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertEquals;
 
+import java.util.Arrays;
 import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.Callable;
@@ -38,9 +39,13 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
+@RunWith(Parameterized.class)
 @Category({MiscTests.class, MediumTests.class})
 // Medium as it creates 100 threads; seems better to run it isolated
 public class TestIdReadWriteLock {
@@ -51,7 +56,14 @@ public class TestIdReadWriteLock {
   private static final int NUM_THREADS = 128;
   private static final int NUM_SECONDS = 15;
 
-  private IdReadWriteLock idLock = new IdReadWriteLock();
+  @Parameterized.Parameter
+  public IdReadWriteLock idLock;
+
+  @Parameterized.Parameters
+  public static Iterable<Object[]> data() {
+    return Arrays.asList(new Object[][] { { new 
IdReadWriteLock(ReferenceType.WEAK) },
+        { new IdReadWriteLock(ReferenceType.SOFT) } });
+  }
 
   private Map<Long, String> idOwner = new ConcurrentHashMap<>();
 
@@ -111,11 +123,22 @@ public class TestIdReadWriteLock {
         Future<Boolean> result = ecs.take();
         assertTrue(result.get());
       }
-      // make sure the entry pool won't be cleared when JVM memory is enough
-      // even after GC and purge call
       int entryPoolSize = idLock.purgeAndGetEntryPoolSize();
       LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize);
-      assertEquals(NUM_IDS, entryPoolSize);
+      ReferenceType refType = idLock.getReferenceType();
+      switch (refType) {
+      case WEAK:
+        // make sure the entry pool will be cleared after GC and purge call
+        assertEquals(0, entryPoolSize);
+        break;
+      case SOFT:
+        // make sure the entry pool won't be cleared when JVM memory is enough
+        // even after GC and purge call
+        assertEquals(NUM_IDS, entryPoolSize);
+        break;
+      default:
+        break;
+      }
     } finally {
       exec.shutdown();
       exec.awaitTermination(5000, TimeUnit.MILLISECONDS);

Reply via email to