Repository: hbase Updated Branches: refs/heads/branch-2 0082f5559 -> a458d7c40
http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java index 7689fcd..1c627f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java @@ -31,17 +31,17 @@ import org.apache.hadoop.hbase.util.Pair; /** * RegionServerAccounting keeps record of some basic real time information about - * the Region Server. Currently, it keeps record the global memstore size and global memstore heap - * overhead. It also tracks the replay edits per region. + * the Region Server. Currently, it keeps record the global memstore size and global memstore + * on-heap and off-heap overhead. It also tracks the replay edits per region. */ @InterfaceAudience.Private public class RegionServerAccounting { // memstore data size - private final LongAdder globalMemstoreDataSize = new LongAdder(); - // memstore heap size. When off heap MSLAB in place, this will be only heap overhead of the Cell - // POJOs and entry overhead of them onto memstore. When on heap MSLAB, this will be include heap - // overhead as well as the cell data size. Ya cell data is in on heap area only then. - private final LongAdder globalMemstoreHeapSize = new LongAdder(); + private final LongAdder globalMemStoreDataSize = new LongAdder(); + // memstore heap size. + private final LongAdder globalMemStoreHeapSize = new LongAdder(); + // memstore off-heap size. + private final LongAdder globalMemStoreOffHeapSize = new LongAdder(); // Store the edits size during replaying WAL. Use this to roll back the // global memstore size once a region opening failed. @@ -114,14 +114,21 @@ public class RegionServerAccounting { * @return the global Memstore data size in the RegionServer */ public long getGlobalMemStoreDataSize() { - return globalMemstoreDataSize.sum(); + return globalMemStoreDataSize.sum(); } /** * @return the global memstore heap size in the RegionServer */ public long getGlobalMemStoreHeapSize() { - return this.globalMemstoreHeapSize.sum(); + return this.globalMemStoreHeapSize.sum(); + } + + /** + * @return the global memstore heap size in the RegionServer + */ + public long getGlobalMemStoreOffHeapSize() { + return this.globalMemStoreOffHeapSize.sum(); } /** @@ -129,13 +136,15 @@ public class RegionServerAccounting { * the global Memstore size */ public void incGlobalMemStoreSize(MemStoreSize memStoreSize) { - globalMemstoreDataSize.add(memStoreSize.getDataSize()); - globalMemstoreHeapSize.add(memStoreSize.getHeapSize()); + globalMemStoreDataSize.add(memStoreSize.getDataSize()); + globalMemStoreHeapSize.add(memStoreSize.getHeapSize()); + globalMemStoreOffHeapSize.add(memStoreSize.getOffHeapSize()); } public void decGlobalMemStoreSize(MemStoreSize memStoreSize) { - globalMemstoreDataSize.add(-memStoreSize.getDataSize()); - globalMemstoreHeapSize.add(-memStoreSize.getHeapSize()); + globalMemStoreDataSize.add(-memStoreSize.getDataSize()); + globalMemStoreHeapSize.add(-memStoreSize.getHeapSize()); + globalMemStoreOffHeapSize.add(-memStoreSize.getOffHeapSize()); } /** @@ -151,13 +160,13 @@ public class RegionServerAccounting { } } else { // If the configured memstore is offheap, check for two things - // 1) If the global memstore data size is greater than the configured + // 1) If the global memstore off-heap size is greater than the configured // 'hbase.regionserver.offheap.global.memstore.size' // 2) If the global memstore heap size is greater than the configured onheap // global memstore limit 'hbase.regionserver.global.memstore.size'. // We do this to avoid OOME incase of scenarios where the heap is occupied with // lot of onheap references to the cells in memstore - if (getGlobalMemStoreDataSize() >= globalMemStoreLimit) { + if (getGlobalMemStoreOffHeapSize() >= globalMemStoreLimit) { // Indicates that global memstore size is above the configured // 'hbase.regionserver.offheap.global.memstore.size' return FlushType.ABOVE_OFFHEAP_HIGHER_MARK; @@ -181,8 +190,8 @@ public class RegionServerAccounting { return FlushType.ABOVE_ONHEAP_LOWER_MARK; } } else { - if (getGlobalMemStoreDataSize() >= globalMemStoreLimitLowMark) { - // Indicates that the offheap memstore's data size is greater than the global memstore + if (getGlobalMemStoreOffHeapSize() >= globalMemStoreLimitLowMark) { + // Indicates that the offheap memstore's size is greater than the global memstore // lower limit return FlushType.ABOVE_OFFHEAP_LOWER_MARK; } else if (getGlobalMemStoreHeapSize() >= globalOnHeapMemstoreLimitLowMark) { @@ -203,7 +212,7 @@ public class RegionServerAccounting { if (memType == MemoryType.HEAP) { return (getGlobalMemStoreHeapSize()) * 1.0 / globalMemStoreLimitLowMark; } else { - return Math.max(getGlobalMemStoreDataSize() * 1.0 / globalMemStoreLimitLowMark, + return Math.max(getGlobalMemStoreOffHeapSize() * 1.0 / globalMemStoreLimitLowMark, getGlobalMemStoreHeapSize() * 1.0 / globalOnHeapMemstoreLimitLowMark); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java index a1f5755..5b98a27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java @@ -66,7 +66,7 @@ public class RegionServicesForStores { } public void addMemStoreSize(MemStoreSize size) { - region.addAndGetMemStoreSize(size); + region.incMemStoreSize(size); } public RegionInfo getRegionInfo() { @@ -89,6 +89,6 @@ public class RegionServicesForStores { @VisibleForTesting long getMemStoreSize() { - return region.getMemStoreSize(); + return region.getMemStoreDataSize(); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index 5bfab52..66a2ad5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -23,7 +23,6 @@ import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.SortedSet; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hbase.Cell; @@ -48,9 +47,9 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti @InterfaceAudience.Private public abstract class Segment { - public final static long FIXED_OVERHEAD = ClassSize.align((long)ClassSize.OBJECT - + 6 * ClassSize.REFERENCE // cellSet, comparator, memStoreLAB, dataSize, - // heapSize, and timeRangeTracker + public final static long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT + + 5 * ClassSize.REFERENCE // cellSet, comparator, memStoreLAB, memStoreSizing, + // and timeRangeTracker + Bytes.SIZEOF_LONG // minSequenceId + Bytes.SIZEOF_BOOLEAN); // tagsPresent public final static long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.ATOMIC_REFERENCE @@ -62,8 +61,7 @@ public abstract class Segment { private MemStoreLAB memStoreLAB; // Sum of sizes of all Cells added to this Segment. Cell's heapSize is considered. This is not // including the heap overhead of this class. - protected final AtomicLong dataSize; - protected final AtomicLong heapSize; + protected final MemStoreSizing segmentSize; protected final TimeRangeTracker timeRangeTracker; protected volatile boolean tagsPresent; @@ -71,8 +69,23 @@ public abstract class Segment { // and there is no need in true Segments state protected Segment(CellComparator comparator, TimeRangeTracker trt) { this.comparator = comparator; - this.dataSize = new AtomicLong(0); - this.heapSize = new AtomicLong(0); + this.segmentSize = new MemStoreSizing(); + this.timeRangeTracker = trt; + } + + protected Segment(CellComparator comparator, List<ImmutableSegment> segments, + TimeRangeTracker trt) { + long dataSize = 0; + long heapSize = 0; + long OffHeapSize = 0; + for (Segment segment : segments) { + MemStoreSize memStoreSize = segment.getMemStoreSize(); + dataSize += memStoreSize.getDataSize(); + heapSize += memStoreSize.getHeapSize(); + OffHeapSize += memStoreSize.getOffHeapSize(); + } + this.comparator = comparator; + this.segmentSize = new MemStoreSizing(dataSize, heapSize, OffHeapSize); this.timeRangeTracker = trt; } @@ -82,8 +95,7 @@ public abstract class Segment { this.comparator = comparator; this.minSequenceId = Long.MAX_VALUE; this.memStoreLAB = memStoreLAB; - this.dataSize = new AtomicLong(0); - this.heapSize = new AtomicLong(0); + this.segmentSize = new MemStoreSizing(); this.tagsPresent = false; this.timeRangeTracker = trt; } @@ -93,8 +105,7 @@ public abstract class Segment { this.comparator = segment.getComparator(); this.minSequenceId = segment.getMinSequenceId(); this.memStoreLAB = segment.getMemStoreLAB(); - this.dataSize = new AtomicLong(segment.keySize()); - this.heapSize = new AtomicLong(segment.heapSize.get()); + this.segmentSize = new MemStoreSizing(segment.getMemStoreSize()); this.tagsPresent = segment.isTagsPresent(); this.timeRangeTracker = segment.getTimeRangeTracker(); } @@ -134,17 +145,6 @@ public abstract class Segment { } /** - * @return the first cell in the segment that has equal or greater key than the given cell - */ - public Cell getFirstAfter(Cell cell) { - SortedSet<Cell> snTailSet = tailSet(cell); - if (!snTailSet.isEmpty()) { - return snTailSet.first(); - } - return null; - } - - /** * Closing a segment before it is being discarded */ public void close() { @@ -221,27 +221,39 @@ public abstract class Segment { return this; } + public MemStoreSize getMemStoreSize() { + return this.segmentSize; + } + /** * @return Sum of all cell's size. */ public long keySize() { - return this.dataSize.get(); + return this.segmentSize.getDataSize(); } /** * @return The heap size of this segment. */ public long heapSize() { - return this.heapSize.get(); + return this.segmentSize.getHeapSize(); + } + + /** + * @return The off-heap size of this segment. + */ + public long offHeapSize() { + return this.segmentSize.getOffHeapSize(); } /** * Updates the size counters of the segment by the given delta */ //TODO - protected void incSize(long delta, long heapOverhead) { - this.dataSize.addAndGet(delta); - this.heapSize.addAndGet(heapOverhead); + protected void incSize(long delta, long heapOverhead, long offHeapOverhead) { + synchronized (this) { + this.segmentSize.incMemStoreSize(delta, heapOverhead, offHeapOverhead); + } } public long getMinSequenceId() { @@ -303,9 +315,10 @@ public abstract class Segment { cellSize = getCellLength(cellToAdd); } long heapSize = heapSizeChange(cellToAdd, succ); - incSize(cellSize, heapSize); + long offHeapSize = offHeapSizeChange(cellToAdd, succ); + incSize(cellSize, heapSize, offHeapSize); if (memstoreSizing != null) { - memstoreSizing.incMemStoreSize(cellSize, heapSize); + memstoreSizing.incMemStoreSize(cellSize, heapSize, offHeapSize); } getTimeRangeTracker().includeTimestamp(cellToAdd); minSequenceId = Math.min(minSequenceId, cellToAdd.getSequenceId()); @@ -327,10 +340,48 @@ public abstract class Segment { * heap size itself and additional overhead because of addition on to CSLM. */ protected long heapSizeChange(Cell cell, boolean succ) { + long res = 0; if (succ) { - return ClassSize - .align(indexEntrySize() + PrivateCellUtil.estimatedHeapSizeOf(cell)); + boolean onHeap = true; + MemStoreLAB memStoreLAB = getMemStoreLAB(); + if(memStoreLAB != null) { + onHeap = memStoreLAB.isOnHeap(); + } + res += indexEntryOnHeapSize(onHeap); + if(onHeap) { + res += PrivateCellUtil.estimatedSizeOfCell(cell); + } + res = ClassSize.align(res); } + return res; + } + + protected long offHeapSizeChange(Cell cell, boolean succ) { + long res = 0; + if (succ) { + boolean offHeap = false; + MemStoreLAB memStoreLAB = getMemStoreLAB(); + if(memStoreLAB != null) { + offHeap = memStoreLAB.isOffHeap(); + } + res += indexEntryOffHeapSize(offHeap); + if(offHeap) { + res += PrivateCellUtil.estimatedSizeOfCell(cell); + } + res = ClassSize.align(res); + } + return res; + } + + protected long indexEntryOnHeapSize(boolean onHeap) { + // in most cases index is allocated on-heap + // override this method when it is not always the case, e.g., in CCM + return indexEntrySize(); + } + + protected long indexEntryOffHeapSize(boolean offHeap) { + // in most cases index is allocated on-heap + // override this method when it is not always the case, e.g., in CCM return 0; } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 0b9b547..1624810 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -600,7 +600,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Update the progress of the scanner context scannerContext.incrementSizeProgress(cellSize, - PrivateCellUtil.estimatedHeapSizeOf(cell)); + PrivateCellUtil.estimatedSizeOfCell(cell)); scannerContext.incrementBatchProgress(1); if (matcher.isUserScan() && totalBytesRead > maxRowSize) { http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java index e7c7caf..1d4dc1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java @@ -168,7 +168,7 @@ public class WALEdit implements HeapSize { public long heapSize() { long ret = ClassSize.ARRAYLIST; for (Cell cell : cells) { - ret += PrivateCellUtil.estimatedHeapSizeOf(cell); + ret += PrivateCellUtil.estimatedSizeOfCell(cell); } return ret; } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java index f66a828..2e2d978 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java @@ -99,7 +99,7 @@ public class TestGlobalMemStoreSize { long globalMemStoreSize = 0; for (RegionInfo regionInfo : ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) { - globalMemStoreSize += server.getRegion(regionInfo.getEncodedName()).getMemStoreSize(); + globalMemStoreSize += server.getRegion(regionInfo.getEncodedName()).getMemStoreDataSize(); } assertEquals(server.getRegionServerAccounting().getGlobalMemStoreDataSize(), globalMemStoreSize); @@ -130,7 +130,7 @@ public class TestGlobalMemStoreSize { for (RegionInfo regionInfo : ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) { HRegion r = server.getRegion(regionInfo.getEncodedName()); - long l = r.getMemStoreSize(); + long l = r.getMemStoreDataSize(); if (l > 0) { // Only meta could have edits at this stage. Give it another flush // clear them. http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java index 3038744..965243f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java @@ -391,7 +391,7 @@ public class TestPartialResultsFromClientSide { // Estimate the cell heap size. One difference is that on server side, the KV Heap size is // estimated differently in case the cell is backed up by MSLAB byte[] (no overhead for // backing array). Thus below calculation is a bit brittle. - CELL_HEAP_SIZE = PrivateCellUtil.estimatedHeapSizeOf(result.rawCells()[0]) + CELL_HEAP_SIZE = PrivateCellUtil.estimatedSizeOfCell(result.rawCells()[0]) - (ClassSize.ARRAY+3); if (LOG.isInfoEnabled()) LOG.info("Cell heap size: " + CELL_HEAP_SIZE); scanner.close(); http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java index 162be35..5a3ba82 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics; import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.FilterList.Operator; @@ -156,7 +155,7 @@ public class TestServerSideScanMetricsFromClientSide { assertTrue(result.rawCells() != null); assertTrue(result.rawCells().length == 1); - CELL_HEAP_SIZE = PrivateCellUtil.estimatedHeapSizeOf(result.rawCells()[0]); + CELL_HEAP_SIZE = PrivateCellUtil.estimatedSizeOfCell(result.rawCells()[0]); scanner.close(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index fbb87bb..762dbd1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -215,30 +215,30 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { ASYNC_CONN.getTable(tableName) .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-1"))) .join(); - assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); + assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0); // flush region and wait flush operation finished. LOG.info("flushing region: " + Bytes.toStringBinary(hri.getRegionName())); admin.flushRegion(hri.getRegionName()).get(); LOG.info("blocking until flush is complete: " + Bytes.toStringBinary(hri.getRegionName())); Threads.sleepWithoutInterrupt(500); - while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0) { + while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0) { Threads.sleep(50); } // check the memstore. - assertEquals(0, regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize()); + assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize(), 0); // write another put into the specific region ASYNC_CONN.getTable(tableName) .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-2"))) .join(); - assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); + assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0); admin.flush(tableName).get(); Threads.sleepWithoutInterrupt(500); - while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0) { + while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0) { Threads.sleep(50); } // check the memstore. - assertEquals(0, regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize()); + assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize(), 0); } private void waitUntilMobCompactionFinished(TableName tableName) http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java index 48d9a93..d6f32f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java @@ -36,9 +36,7 @@ import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; import org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy; import org.apache.hadoop.hbase.client.backoff.ServerStatistics; import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.MemStoreSize; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -67,7 +65,7 @@ public class TestClientPushback { private static final TableName tableName = TableName.valueOf("client-pushback"); private static final byte[] family = Bytes.toBytes("f"); private static final byte[] qualifier = Bytes.toBytes("q"); - private static final long flushSizeBytes = 256; + private static final long flushSizeBytes = 512; @BeforeClass public static void setupCluster() throws Exception{ @@ -110,7 +108,7 @@ public class TestClientPushback { mutator.flush(); // get the current load on RS. Hopefully memstore isn't flushed since we wrote the the data - int load = (int) ((((HRegion) region).addAndGetMemStoreSize(new MemStoreSize(0, 0)) * 100) + int load = (int) ((region.getMemStoreHeapSize() * 100) / flushSizeBytes); LOG.debug("Done writing some data to "+tableName); http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java index dc3f8da..207e1fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java @@ -93,7 +93,7 @@ public class TestFlushFromClient { t.put(puts); } assertFalse(getRegionInfo().isEmpty()); - assertTrue(getRegionInfo().stream().allMatch(r -> r.getMemStoreSize() != 0)); + assertTrue(getRegionInfo().stream().allMatch(r -> r.getMemStoreDataSize() != 0)); } @After @@ -108,7 +108,7 @@ public class TestFlushFromClient { public void testFlushTable() throws Exception { try (Admin admin = TEST_UTIL.getAdmin()) { admin.flush(tableName); - assertFalse(getRegionInfo().stream().anyMatch(r -> r.getMemStoreSize() != 0)); + assertFalse(getRegionInfo().stream().anyMatch(r -> r.getMemStoreDataSize() != 0)); } } @@ -116,7 +116,7 @@ public class TestFlushFromClient { public void testAsyncFlushTable() throws Exception { AsyncAdmin admin = asyncConn.getAdmin(); admin.flush(tableName).get(); - assertFalse(getRegionInfo().stream().anyMatch(r -> r.getMemStoreSize() != 0)); + assertFalse(getRegionInfo().stream().anyMatch(r -> r.getMemStoreDataSize() != 0)); } @Test @@ -125,7 +125,7 @@ public class TestFlushFromClient { for (HRegion r : getRegionInfo()) { admin.flushRegion(r.getRegionInfo().getRegionName()); TimeUnit.SECONDS.sleep(1); - assertEquals(0, r.getMemStoreSize()); + assertEquals(0, r.getMemStoreDataSize()); } } } @@ -136,7 +136,7 @@ public class TestFlushFromClient { for (HRegion r : getRegionInfo()) { admin.flushRegion(r.getRegionInfo().getRegionName()).get(); TimeUnit.SECONDS.sleep(1); - assertEquals(0, r.getMemStoreSize()); + assertEquals(0, r.getMemStoreDataSize()); } } @@ -148,7 +148,7 @@ public class TestFlushFromClient { .stream().map(JVMClusterUtil.RegionServerThread::getRegionServer) .collect(Collectors.toList())) { admin.flushRegionServer(rs.getServerName()); - assertFalse(getRegionInfo(rs).stream().anyMatch(r -> r.getMemStoreSize() != 0)); + assertFalse(getRegionInfo(rs).stream().anyMatch(r -> r.getMemStoreDataSize() != 0)); } } } @@ -161,7 +161,7 @@ public class TestFlushFromClient { .stream().map(JVMClusterUtil.RegionServerThread::getRegionServer) .collect(Collectors.toList())) { admin.flushRegionServer(rs.getServerName()).get(); - assertFalse(getRegionInfo(rs).stream().anyMatch(r -> r.getMemStoreSize() != 0)); + assertFalse(getRegionInfo(rs).stream().anyMatch(r -> r.getMemStoreDataSize() != 0)); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java index a336274..cadce8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java @@ -53,7 +53,7 @@ public class TestSizeFailures { private static byte [] FAMILY = Bytes.toBytes("testFamily"); protected static int SLAVES = 1; private static TableName TABLENAME; - private static final int NUM_ROWS = 1000 * 1000, NUM_COLS = 10; + private static final int NUM_ROWS = 1000 * 1000, NUM_COLS = 9; @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -129,7 +129,7 @@ public class TestSizeFailures { long rowsObserved = entry.getKey(); long entriesObserved = entry.getValue(); - // Verify that we see 1M rows and 10M cells + // Verify that we see 1M rows and 9M cells assertEquals(NUM_ROWS, rowsObserved); assertEquals(NUM_ROWS * NUM_COLS, entriesObserved); } @@ -152,7 +152,7 @@ public class TestSizeFailures { long rowsObserved = entry.getKey(); long entriesObserved = entry.getValue(); - // Verify that we see 1M rows and 10M cells + // Verify that we see 1M rows and 9M cells assertEquals(NUM_ROWS, rowsObserved); assertEquals(NUM_ROWS * NUM_COLS, entriesObserved); } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java index 336d342..4a92d4c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.MemStoreSize; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WALEdit; @@ -111,7 +110,7 @@ public class TestNegativeMemStoreSizeWithSlowCoprocessor { if (Bytes.equals(put.getRow(), Bytes.toBytes("row2"))) { region.flush(false); - Assert.assertTrue(region.addAndGetMemStoreSize(new MemStoreSize()) >= 0); + Assert.assertTrue(region.getMemStoreDataSize() >= 0); } } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index d68191c..505c2f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -699,7 +699,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { int totalCellsLen1 = addRowsByKeys(memstore, keys1);// Adding 4 cells. int oneCellOnCSLMHeapSize = 120; int oneCellOnCAHeapSize = 88; - assertEquals(totalCellsLen1, region.getMemStoreSize()); + assertEquals(totalCellsLen1, region.getMemStoreDataSize()); long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * oneCellOnCSLMHeapSize; assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact @@ -780,7 +780,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { int totalCellsLen1 = addRowsByKeys(memstore, keys1);// Adding 3 cells. int oneCellOnCSLMHeapSize = 120; - assertEquals(totalCellsLen1, region.getMemStoreSize()); + assertEquals(totalCellsLen1, region.getMemStoreDataSize()); long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 3 * oneCellOnCSLMHeapSize; assertEquals(totalHeapSize, memstore.heapSize()); @@ -838,7 +838,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { LOG.debug("added kv: " + kv.getKeyString() + ", timestamp:" + kv.getTimestamp()); } regionServicesForStores.addMemStoreSize(new MemStoreSize(hmc.getActive().keySize() - size, - hmc.getActive().heapSize() - heapOverhead)); + hmc.getActive().heapSize() - heapOverhead, 0)); return totalLen; } @@ -859,7 +859,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { LOG.debug("added kv: " + kv.getKeyString() + ", timestamp:" + kv.getTimestamp()); } regionServicesForStores.addMemStoreSize(new MemStoreSize(hmc.getActive().keySize() - size, - hmc.getActive().heapSize() - heapOverhead)); + hmc.getActive().heapSize() - heapOverhead, 0)); return totalLen; } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java index 106c215..9523091 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java @@ -221,7 +221,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore long cellBeforeFlushSize = cellBeforeFlushSize(); long cellAfterFlushSize = cellAfterFlushSize(); long totalHeapSize1 = MutableSegment.DEEP_OVERHEAD + 4 * cellBeforeFlushSize; - assertEquals(totalCellsLen1, region.getMemStoreSize()); + assertEquals(totalCellsLen1, region.getMemStoreDataSize()); assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); MemStoreSize size = memstore.getFlushableSize(); http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index 2c12341..b0302f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -321,7 +321,7 @@ public class TestEndToEndSplitTransaction { admin.flushRegion(regionName); log("blocking until flush is complete: " + Bytes.toStringBinary(regionName)); Threads.sleepWithoutInterrupt(500); - while (rs.getOnlineRegion(regionName).getMemStoreSize() > 0) { + while (rs.getOnlineRegion(regionName).getMemStoreDataSize() > 0) { Threads.sleep(50); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 88e1aa2..31dfa2a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -310,7 +310,7 @@ public class TestHRegion { region.put(put); // Close with something in memstore and something in the snapshot. Make sure all is cleared. region.close(); - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -391,14 +391,14 @@ public class TestHRegion { HRegion region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); HStore store = region.getStore(COLUMN_FAMILY_BYTES); - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); // Put one value byte [] value = Bytes.toBytes(method); Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value); region.put(put); - long onePutSize = region.getMemStoreSize(); + long onePutSize = region.getMemStoreDataSize(); assertTrue(onePutSize > 0); RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class); @@ -414,7 +414,7 @@ public class TestHRegion { } catch (IOException expected) { } long expectedSize = onePutSize * 2; - assertEquals("memstoreSize should be incremented", expectedSize, region.getMemStoreSize()); + assertEquals("memstoreSize should be incremented", expectedSize, region.getMemStoreDataSize()); assertEquals("flushable size should be incremented", expectedSize, store.getFlushableSize().getDataSize()); @@ -459,13 +459,13 @@ public class TestHRegion { // Initialize region region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); - long size = region.getMemStoreSize(); + long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) null)); region.put(p1); - final long sizeOfOnePut = region.getMemStoreSize(); + final long sizeOfOnePut = region.getMemStoreDataSize(); // Fail a flush which means the current memstore will hang out as memstore 'snapshot'. try { LOG.info("Flushing"); @@ -478,7 +478,7 @@ public class TestHRegion { // Make it so all writes succeed from here on out ffs.fault.set(false); // Check sizes. Should still be the one entry. - Assert.assertEquals(sizeOfOnePut, region.getMemStoreSize()); + Assert.assertEquals(sizeOfOnePut, region.getMemStoreDataSize()); // Now add two entries so that on this next flush that fails, we can see if we // subtract the right amount, the snapshot size only. Put p2 = new Put(row); @@ -486,13 +486,13 @@ public class TestHRegion { p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[])null)); region.put(p2); long expectedSize = sizeOfOnePut * 3; - Assert.assertEquals(expectedSize, region.getMemStoreSize()); + Assert.assertEquals(expectedSize, region.getMemStoreDataSize()); // Do a successful flush. It will clear the snapshot only. Thats how flushes work. // If already a snapshot, we clear it else we move the memstore to be snapshot and flush // it region.flush(true); // Make sure our memory accounting is right. - Assert.assertEquals(sizeOfOnePut * 2, region.getMemStoreSize()); + Assert.assertEquals(sizeOfOnePut * 2, region.getMemStoreDataSize()); } finally { HBaseTestingUtility.closeRegionAndWAL(region); } @@ -524,7 +524,7 @@ public class TestHRegion { // Initialize region region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); - long size = region.getMemStoreSize(); + long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index 6d2f735..770a60a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -366,7 +366,7 @@ public class TestHRegionReplayEvents { verifyData(secondaryRegion, 0, lastReplayed, cq, families); HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1")); long storeMemstoreSize = store.getMemStoreSize().getHeapSize(); - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); long storeFlushableSize = store.getFlushableSize().getHeapSize(); long storeSize = store.getSize(); long storeSizeUncompressed = store.getStoreSizeUncompressed(); @@ -395,7 +395,7 @@ public class TestHRegionReplayEvents { assertTrue(storeFlushableSize > newFlushableSize); // assert that the region memstore is smaller now - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(regionMemstoreSize > newRegionMemstoreSize); // assert that the store sizes are bigger @@ -465,7 +465,7 @@ public class TestHRegionReplayEvents { // first verify that everything is replayed and visible before flush event replay HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1")); long storeMemstoreSize = store.getMemStoreSize().getHeapSize(); - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); long storeFlushableSize = store.getFlushableSize().getHeapSize(); if (flushDesc.getAction() == FlushAction.START_FLUSH) { @@ -505,7 +505,7 @@ public class TestHRegionReplayEvents { assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber()); - assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); // memstore is not empty verifyData(secondaryRegion, 0, numRows, cq, families); // Test case 2: replay a flush start marker with a smaller seqId @@ -518,7 +518,7 @@ public class TestHRegionReplayEvents { assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber()); - assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); // memstore is not empty verifyData(secondaryRegion, 0, numRows, cq, families); // Test case 3: replay a flush start marker with a larger seqId @@ -531,7 +531,7 @@ public class TestHRegionReplayEvents { assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber()); - assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); // memstore is not empty verifyData(secondaryRegion, 0, numRows, cq, families); LOG.info("-- Verifying edits from secondary"); @@ -600,7 +600,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); // Test case 1: replay the a flush commit marker smaller than what we have prepared LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" @@ -620,7 +620,7 @@ public class TestHRegionReplayEvents { assertTrue(newFlushableSize > 0); // assert that the memstore is not dropped // assert that the region memstore is same as before - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertEquals(regionMemstoreSize, newRegionMemstoreSize); assertNotNull(secondaryRegion.getPrepareFlushResult()); // not dropped @@ -690,7 +690,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); // Test case 1: replay the a flush commit marker larger than what we have prepared LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" @@ -710,7 +710,7 @@ public class TestHRegionReplayEvents { assertTrue(newFlushableSize > 0); // assert that the memstore is not dropped // assert that the region memstore is smaller than before, but not empty - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize > 0); assertTrue(regionMemstoreSize > newRegionMemstoreSize); @@ -791,7 +791,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); // Test case 1: replay a flush commit marker without start flush marker assertNull(secondaryRegion.getPrepareFlushResult()); @@ -820,7 +820,7 @@ public class TestHRegionReplayEvents { } // assert that the region memstore is same as before (we could not drop) - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); if (droppableMemstore) { assertTrue(0 == newRegionMemstoreSize); } else { @@ -890,7 +890,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(regionMemstoreSize == 0); // now replay the region open event that should contain new file locations @@ -907,7 +907,7 @@ public class TestHRegionReplayEvents { assertTrue(newFlushableSize == MutableSegment.DEEP_OVERHEAD); // assert that the region memstore is empty - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize == 0); assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot should be dropped if any @@ -986,7 +986,7 @@ public class TestHRegionReplayEvents { assertTrue(newSnapshotSize.getDataSize() == 0); // assert that the region memstore is empty - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize == 0); assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot should be dropped if any @@ -1434,7 +1434,7 @@ public class TestHRegionReplayEvents { LOG.info("-- Replaying edits in secondary"); // Test case 4: replay some edits, ensure that memstore is dropped. - assertTrue(secondaryRegion.getMemStoreSize() == 0); + assertTrue(secondaryRegion.getMemStoreDataSize() == 0); putDataWithFlushes(primaryRegion, 400, 400, 0); numRows = 400; @@ -1452,11 +1452,11 @@ public class TestHRegionReplayEvents { } } - assertTrue(secondaryRegion.getMemStoreSize() > 0); + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); secondaryRegion.refreshStoreFiles(); - assertTrue(secondaryRegion.getMemStoreSize() == 0); + assertTrue(secondaryRegion.getMemStoreDataSize() == 0); LOG.info("-- Verifying edits from primary"); verifyData(primaryRegion, 0, numRows, cq, families); http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index ea40200..9479890 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -265,7 +265,7 @@ public class TestHStore { MemStoreSizing kvSize = new MemStoreSizing(); store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), kvSize); // add the heap size of active (mutable) segment - kvSize.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD); + kvSize.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD, 0); size = store.memstore.getFlushableSize(); assertEquals(kvSize, size); // Flush. Bug #1 from HBASE-10466. Make sure size calculation on failed flush is right. @@ -278,12 +278,12 @@ public class TestHStore { } // due to snapshot, change mutable to immutable segment kvSize.incMemStoreSize(0, - CSLMImmutableSegment.DEEP_OVERHEAD_CSLM-MutableSegment.DEEP_OVERHEAD); + CSLMImmutableSegment.DEEP_OVERHEAD_CSLM-MutableSegment.DEEP_OVERHEAD, 0); size = store.memstore.getFlushableSize(); assertEquals(kvSize, size); MemStoreSizing kvSize2 = new MemStoreSizing(); store.add(new KeyValue(row, family, qf2, 2, (byte[])null), kvSize2); - kvSize2.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD); + kvSize2.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD, 0); // Even though we add a new kv, we expect the flushable size to be 'same' since we have // not yet cleared the snapshot -- the above flush failed. assertEquals(kvSize, size); http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index 353cc28..fded9ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -149,7 +149,7 @@ public class TestPerColumnFamilyFlush { } } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1 = region.getOldestSeqIdOfStore(FAMILY1); @@ -192,7 +192,7 @@ public class TestPerColumnFamilyFlush { cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemStoreSize(); + totalMemstoreSize = region.getMemStoreDataSize(); smallestSeqInRegionCurrentMemstore = getWAL(region) .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -230,7 +230,7 @@ public class TestPerColumnFamilyFlush { cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemStoreSize(); + totalMemstoreSize = region.getMemStoreDataSize(); smallestSeqInRegionCurrentMemstore = getWAL(region) .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -265,7 +265,7 @@ public class TestPerColumnFamilyFlush { // Since we won't find any CF above the threshold, and hence no specific // store to flush, we should flush all the memstores. - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -289,7 +289,7 @@ public class TestPerColumnFamilyFlush { } } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. MemStoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); @@ -312,7 +312,7 @@ public class TestPerColumnFamilyFlush { cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemStoreSize(); + totalMemstoreSize = region.getMemStoreDataSize(); long smallestSeqInRegionCurrentMemstore = region.getWAL().getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -381,7 +381,7 @@ public class TestPerColumnFamilyFlush { long totalMemstoreSize; long cf1MemstoreSize, cf2MemstoreSize, cf3MemstoreSize; - totalMemstoreSize = desiredRegion.getMemStoreSize(); + totalMemstoreSize = desiredRegion.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. cf1MemstoreSize = desiredRegion.getStore(FAMILY1).getMemStoreSize().getDataSize(); @@ -504,12 +504,12 @@ public class TestPerColumnFamilyFlush { @Override public boolean evaluate() throws Exception { - return desiredRegion.getMemStoreSize() == 0; + return desiredRegion.getMemStoreDataSize() == 0; } @Override public String explainFailure() throws Exception { - long memstoreSize = desiredRegion.getMemStoreSize(); + long memstoreSize = desiredRegion.getMemStoreDataSize(); if (memstoreSize > 0) { return "Still have unflushed entries in memstore, memstore size is " + memstoreSize; } @@ -551,7 +551,7 @@ public class TestPerColumnFamilyFlush { put.addColumn(FAMILY3, qf, value3); table.put(put); // slow down to let regionserver flush region. - while (region.getMemStoreSize() > memstoreFlushSize) { + while (region.getMemStoreHeapSize() > memstoreFlushSize) { Thread.sleep(100); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java index ede9cae..7bd9e16 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java @@ -42,7 +42,7 @@ public class TestRegionServerAccounting { // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); MemStoreSize memstoreSize = - new MemStoreSize(3L * 1024 * 1024 * 1024, 1L * 1024 * 1024 * 1024); + new MemStoreSize((3L * 1024L * 1024L * 1024L), (1L * 1024L * 1024L * 1024L), 0); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -55,7 +55,7 @@ public class TestRegionServerAccounting { // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); MemStoreSize memstoreSize = - new MemStoreSize(3L * 1024 * 1024 * 1024, 1L * 1024 * 1024 * 1024); + new MemStoreSize((3L * 1024L * 1024L * 1024L), (1L * 1024L * 1024L * 1024L), 0); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); @@ -65,12 +65,12 @@ public class TestRegionServerAccounting { public void testOffheapMemstoreHigherWaterMarkLimitsDueToDataSize() { Configuration conf = HBaseConfiguration.create(); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach offheap limit as data size is higher and not due to heap size MemStoreSize memstoreSize = - new MemStoreSize(3L * 1024 * 1024 * 1024, 1L * 1024 * 1024 * 1024); + new MemStoreSize((3L * 1024L * 1024L * 1024L), 0, (1L * 1024L * 1024L * 1024L)); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_OFFHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -81,12 +81,12 @@ public class TestRegionServerAccounting { Configuration conf = HBaseConfiguration.create(); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach higher limit as heap size is higher and not due to offheap size MemStoreSize memstoreSize = - new MemStoreSize(3L * 1024 * 1024, 2L * 1024 * 1024 * 1024); + new MemStoreSize((3L * 1024L * 1024L), (2L * 1024L * 1024L * 1024L), 0); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -96,12 +96,12 @@ public class TestRegionServerAccounting { public void testOffheapMemstoreLowerWaterMarkLimitsDueToDataSize() { Configuration conf = HBaseConfiguration.create(); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach offheap limit as data size is higher and not due to heap size MemStoreSize memstoreSize = - new MemStoreSize(3L * 1024 * 1024 * 1024, 1L * 1024 * 1024 * 1024); + new MemStoreSize((3L * 1024L * 1024L * 1024L), 0, (1L * 1024L * 1024L * 1024L)); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_OFFHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); @@ -112,12 +112,12 @@ public class TestRegionServerAccounting { Configuration conf = HBaseConfiguration.create(); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach higher limit as heap size is higher and not due to offheap size MemStoreSize memstoreSize = - new MemStoreSize(3L * 1024 * 1024, 2L * 1024 * 1024 * 1024); + new MemStoreSize((3L * 1024L * 1024L), (2L * 1024L * 1024L * 1024L), 0); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index 2d5a369..788a708 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -264,8 +264,8 @@ public class TestWALLockup { @Override public void run() { try { - if (region.getMemStoreSize() <= 0) { - throw new IOException("memstore size=" + region.getMemStoreSize()); + if (region.getMemStoreDataSize() <= 0) { + throw new IOException("memstore size=" + region.getMemStoreDataSize()); } region.flush(false); } catch (IOException e) { http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 3f73d37..15bf2a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -172,7 +172,7 @@ public class TestWalAndCompactingMemStoreFlush { region.put(createPut(2, i)); } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1PhaseI = region.getOldestSeqIdOfStore(FAMILY1); @@ -363,13 +363,13 @@ public class TestWalAndCompactingMemStoreFlush { s = s + "----AFTER THIRD AND FORTH FLUSH, The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseV + ". After additional inserts and last flush, the entire region size is:" + region - .getMemStoreSize() + .getMemStoreDataSize() + "\n----------------------------------\n"; // Since we won't find any CF above the threshold, and hence no specific // store to flush, we should flush all the memstores // Also compacted memstores are flushed to disk. - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); System.out.println(s); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -411,7 +411,7 @@ public class TestWalAndCompactingMemStoreFlush { /*------------------------------------------------------------------------------*/ /*------------------------------------------------------------------------------*/ /* PHASE I - collect sizes */ - long totalMemstoreSizePhaseI = region.getMemStoreSize(); + long totalMemstoreSizePhaseI = region.getMemStoreDataSize(); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1PhaseI = region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2PhaseI = region.getOldestSeqIdOfStore(FAMILY2); @@ -474,7 +474,7 @@ public class TestWalAndCompactingMemStoreFlush { .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3); - long totalMemstoreSizePhaseII = region.getMemStoreSize(); + long totalMemstoreSizePhaseII = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ /* PHASE II - validation */ @@ -517,7 +517,7 @@ public class TestWalAndCompactingMemStoreFlush { /* PHASE III - collect sizes */ // How much does the CF1 memstore occupy now? Will be used later. MemStoreSize cf1MemstoreSizePhaseIII = region.getStore(FAMILY1).getMemStoreSize(); - long totalMemstoreSizePhaseIII = region.getMemStoreSize(); + long totalMemstoreSizePhaseIII = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ /* PHASE III - validation */ @@ -575,7 +575,7 @@ public class TestWalAndCompactingMemStoreFlush { MemStoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getMemStoreSize(); long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region) .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); - long totalMemstoreSizePhaseV = region.getMemStoreSize(); + long totalMemstoreSizePhaseV = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ /* PHASE V - validation */ @@ -663,7 +663,7 @@ public class TestWalAndCompactingMemStoreFlush { ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore).setCompositeSnapshot(false); ((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore).setCompositeSnapshot(false); - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. MemStoreSize cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getMemStoreSize(); @@ -794,7 +794,7 @@ public class TestWalAndCompactingMemStoreFlush { region.put(createPut(2, i)); } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // test in-memory flashing into CAM here ((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).setIndexType(