Repository: hbase Updated Branches: refs/heads/master 413d663f9 -> 07b623670
HBASE-15050 Block Ref counting does not work in Region Split cases (Ram) Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/07b62367 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/07b62367 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/07b62367 Branch: refs/heads/master Commit: 07b623670647686084f8f5fd2038e2bafcfdac54 Parents: 413d663 Author: ramkrishna <ramkrishna.s.vasude...@gmail.com> Authored: Wed Dec 30 14:39:03 2015 +0530 Committer: ramkrishna <ramkrishna.s.vasude...@gmail.com> Committed: Wed Dec 30 14:39:03 2015 +0530 ---------------------------------------------------------------------- .../hadoop/hbase/io/HalfStoreFileReader.java | 8 +++ .../hadoop/hbase/io/hfile/HFileReaderImpl.java | 8 ++- .../client/TestBlockEvictionFromClient.java | 53 ++++++++++++++++++++ 3 files changed, 68 insertions(+), 1 deletion(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/07b62367/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 0dd7742..067d24c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -314,6 +314,10 @@ public class HalfStoreFileReader extends StoreFile.Reader { } } catch (IOException e) { LOG.warn("Failed seekBefore " + Bytes.toStringBinary(this.splitkey), e); + } finally { + if (scanner != null) { + scanner.close(); + } } return null; } @@ -335,6 +339,10 @@ public class HalfStoreFileReader extends StoreFile.Reader { firstKeySeeked = true; } catch (IOException e) { LOG.warn("Failed seekTo first KV in the file", e); + } finally { + if(scanner != null) { + scanner.close(); + } } } return this.firstKey; http://git-wip-us.apache.org/repos/asf/hbase/blob/07b62367/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 4e2ca7d..fcf7b5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -986,7 +986,13 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { return new KeyValue.KeyOnlyKeyValue(keyBuf.array(), keyBuf.arrayOffset() + keyPair.getSecond(), currKeyLen); } else { - return new ByteBufferedKeyOnlyKeyValue(keyBuf, keyPair.getSecond(), currKeyLen); + // Better to do a copy here instead of holding on to this BB so that + // we could release the blocks referring to this key. This key is specifically used + // in HalfStoreFileReader to get the firstkey and lastkey by creating a new scanner + // every time. So holding onto the BB (incase of DBB) is not advised here. + byte[] key = new byte[currKeyLen]; + ByteBufferUtils.copyFromBufferToArray(key, keyBuf, keyPair.getSecond(), 0, currKeyLen); + return new KeyValue.KeyOnlyKeyValue(key, 0, currKeyLen); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/07b62367/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index a812623..f4d668c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -74,6 +74,8 @@ public class TestBlockEvictionFromClient { private static int NO_OF_THREADS = 3; private static byte[] ROW = Bytes.toBytes("testRow"); private static byte[] ROW1 = Bytes.toBytes("testRow1"); + private static byte[] ROW2 = Bytes.toBytes("testRow2"); + private static byte[] ROW3 = Bytes.toBytes("testRow3"); private static byte[] FAMILY = Bytes.toBytes("testFamily"); private static byte[][] FAMILIES_1 = new byte[1][0]; private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); @@ -554,6 +556,57 @@ public class TestBlockEvictionFromClient { } @Test + public void testBlockRefCountAfterSplits() throws IOException, InterruptedException { + HTable table = null; + try { + TableName tableName = TableName.valueOf("testBlockRefCountAfterSplits"); + table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024); + // get the block cache and region + RegionLocator locator = table.getRegionLocator(); + String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); + Region region = + TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); + Store store = region.getStores().iterator().next(); + CacheConfig cacheConf = store.getCacheConfig(); + cacheConf.setEvictOnClose(true); + BlockCache cache = cacheConf.getBlockCache(); + + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, data); + table.put(put); + region.flush(true); + put = new Put(ROW1); + put.addColumn(FAMILY, QUALIFIER, data); + table.put(put); + region.flush(true); + byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER); + put = new Put(ROW2); + put.addColumn(FAMILY, QUALIFIER2, data2); + table.put(put); + put = new Put(ROW3); + put.addColumn(FAMILY, QUALIFIER2, data2); + table.put(put); + region.flush(true); + TEST_UTIL.getAdmin().split(tableName, ROW1); + List<HRegionInfo> tableRegions = TEST_UTIL.getAdmin().getTableRegions(tableName); + // Wait for splits + while (tableRegions.size() != 2) { + tableRegions = TEST_UTIL.getAdmin().getTableRegions(tableName); + Thread.sleep(100); + } + region.compact(true); + Iterator<CachedBlock> iterator = cache.iterator(); + // Though the split had created the HalfStorefileReader - the firstkey and lastkey scanners + // should be closed inorder to return those blocks + iterateBlockCache(cache, iterator); + } finally { + if (table != null) { + table.close(); + } + } + } + + @Test public void testMultiGets() throws IOException, InterruptedException { HTable table = null; try {