This is an automated email from the ASF dual-hosted git repository.

ramkrishna pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new 5c55fc6  HBASE-22929 - MemStoreLAB ChunkCreator may memory leak(ram) 
(#614)
5c55fc6 is described below

commit 5c55fc6e3d1e780bbe9463e16eb9131faccb872c
Author: ramkrish86 <ram_krish...@hotmail.com>
AuthorDate: Fri Sep 13 10:01:46 2019 +0530

    HBASE-22929 - MemStoreLAB ChunkCreator may memory leak(ram) (#614)
---
 .../hadoop/hbase/regionserver/MemStoreLABImpl.java |  5 ++
 .../hadoop/hbase/regionserver/StoreScanner.java    |  2 +
 .../regionserver/TestStoreScannerClosure.java      | 57 ++++++++++++++++++++++
 3 files changed, 64 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
index 949c453..886b262 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
@@ -272,6 +272,11 @@ public class MemStoreLABImpl implements MemStoreLAB {
     }
   }
 
+  @VisibleForTesting
+  int getOpenScannerCount() {
+    return this.openScannerCount.get();
+  }
+
   /**
    * Called when opening a scanner on the data of this MemStoreLAB
    */
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index dfc4251..2c9c59f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -896,12 +896,14 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
         // need for the updateReaders() to happen.
         LOG.debug("StoreScanner already has the close lock. There is no need 
to updateReaders");
         // no lock acquired.
+        clearAndClose(memStoreScanners);
         return;
       }
       // lock acquired
       updateReaders = true;
       if (this.closing) {
         LOG.debug("StoreScanner already closing. There is no need to 
updateReaders");
+        clearAndClose(memStoreScanners);
         return;
       }
       flushed = true;
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java
index 1fbef26..610d0b1 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java
@@ -29,6 +29,7 @@ import java.util.NavigableSet;
 import java.util.Random;
 import java.util.TreeSet;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeepDeletedCells;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -114,6 +116,61 @@ public class TestStoreScannerClosure {
   }
 
   @Test
+  public void testScannerCloseAndUpdateReadersWithMemstoreScanner() throws 
Exception {
+    Put p = new Put(Bytes.toBytes("row"));
+    p.addColumn(fam, Bytes.toBytes("q1"), Bytes.toBytes("val"));
+    region.put(p);
+    // create the store scanner here.
+    // for easiness, use Long.MAX_VALUE as read pt
+    try (ExtendedStoreScanner scan = new 
ExtendedStoreScanner(region.getStore(fam), scanInfo,
+        new Scan(), getCols("q1"), Long.MAX_VALUE)) {
+      p = new Put(Bytes.toBytes("row1"));
+      p.addColumn(fam, Bytes.toBytes("q1"), Bytes.toBytes("val"));
+      region.put(p);
+      HStore store = region.getStore(fam);
+      ReentrantReadWriteLock lock = store.lock;
+      // use the lock to manually get a new memstore scanner. this is what
+      // HStore#notifyChangedReadersObservers does under the lock.(lock is not 
needed here
+      //since it is just a testcase).
+      lock.readLock().lock();
+      final List<KeyValueScanner> memScanners = 
store.memstore.getScanners(Long.MAX_VALUE);
+      lock.readLock().unlock();
+      Thread closeThread = new Thread() {
+        public void run() {
+          // close should be completed
+          scan.close(false, true);
+        }
+      };
+      closeThread.start();
+      Thread updateThread = new Thread() {
+        public void run() {
+          try {
+            // use the updated memstoreScanners and pass it to updateReaders
+            scan.updateReaders(true, Collections.emptyList(), memScanners);
+          } catch (IOException e) {
+            e.printStackTrace();
+          }
+        }
+      };
+      updateThread.start();
+      // wait for close and updateThread to complete
+      closeThread.join();
+      updateThread.join();
+      MemStoreLAB memStoreLAB;
+      for (KeyValueScanner scanner : memScanners) {
+        if (scanner instanceof SegmentScanner) {
+          memStoreLAB = ((SegmentScanner) scanner).segment.getMemStoreLAB();
+          if (memStoreLAB != null) {
+            // There should be no unpooled chunks
+            int openScannerCount = ((MemStoreLABImpl) 
memStoreLAB).getOpenScannerCount();
+            assertTrue("The memstore should not have unpooled chunks", 
openScannerCount == 0);
+          }
+        }
+      }
+    }
+  }
+
+  @Test
   public void testScannerCloseAndUpdateReaders1() throws Exception {
     testScannerCloseAndUpdateReaderInternal(true, false);
   }

Reply via email to