This is an automated email from the ASF dual-hosted git repository.
apurtell pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2 by this push:
new c816083cc6b Backport "HBASE-28600 Enable setting blockcache on-heap
sizes in bytes (#6422)" to branch-2 (#6544)
c816083cc6b is described below
commit c816083cc6bb347d760a55983abda55f29505171
Author: JinHyuk Kim <[email protected]>
AuthorDate: Tue Jan 14 11:26:43 2025 +0900
Backport "HBASE-28600 Enable setting blockcache on-heap sizes in bytes
(#6422)" to branch-2 (#6544)
Signed-off-by: Andrew Purtell <[email protected]>
---
.../java/org/apache/hadoop/hbase/HConstants.java | 5 +
.../java/org/apache/hadoop/hbase/StorageSize.java | 123 +++++
.../java/org/apache/hadoop/hbase/StorageUnit.java | 530 +++++++++++++++++++++
hbase-common/src/main/resources/hbase-default.xml | 8 +
.../org/apache/hadoop/hbase/TestStorageSize.java | 67 +++
.../hadoop/hbase/io/util/MemorySizeUtil.java | 65 ++-
.../hbase/regionserver/HeapMemoryManager.java | 20 +-
.../hadoop/hbase/io/hfile/TestCacheConfig.java | 8 +
8 files changed, 800 insertions(+), 26 deletions(-)
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 3b2a58827f1..12f8bc6df03 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1060,6 +1060,11 @@ public final class HConstants {
public static final float HFILE_BLOCK_CACHE_SIZE_DEFAULT = 0.4f;
+ /**
+ * Configuration key for the memory size of the block cache
+ */
+ public static final String HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY =
"hfile.block.cache.memory.size";
+
/**
* Configuration key for setting the fix size of the block size, default do
nothing and it should
* be explicitly set by user or only used within ClientSideRegionScanner. if
it's set less than
diff --git
a/hbase-common/src/main/java/org/apache/hadoop/hbase/StorageSize.java
b/hbase-common/src/main/java/org/apache/hadoop/hbase/StorageSize.java
new file mode 100644
index 00000000000..83ebdb58c2a
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/StorageSize.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.apache.commons.lang3.StringUtils.isBlank;
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+
+import java.util.Locale;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+/**
+ * This class is adapted from the Hadoop 3.x source code to HBase as part of a
backporting effort to
+ * support storage size parsing functionality in older versions of HBase.
+ * <p>
+ * Source: <a href=
+ *
"https://github.com/apache/hadoop/blob/branch-3.1.0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageSize.java">
+ * Hadoop 3.1.0 StorageSize.java </a>
+ * </p>
+ */
[email protected]
+public class StorageSize {
+ private final StorageUnit unit;
+ private final double value;
+
+ /**
+ * Constucts a Storage Measure, which contains the value and the unit of
measure.
+ * @param unit - Unit of Measure
+ * @param value - Numeric value.
+ */
+ public StorageSize(StorageUnit unit, double value) {
+ this.unit = unit;
+ this.value = value;
+ }
+
+ private static void checkState(boolean state, String errorString) {
+ if (!state) {
+ throw new IllegalStateException(errorString);
+ }
+ }
+
+ public static double getStorageSize(String value, double defaultValue,
StorageUnit targetUnit) {
+ Preconditions.checkNotNull(targetUnit, "Conversion unit cannot be null.");
+
+ if (isBlank(value)) {
+ return targetUnit.getDefault(defaultValue);
+ }
+
+ final StorageSize measure = parse(value);
+ double byteValue = measure.getUnit().toBytes(measure.getValue());
+ return targetUnit.fromBytes(byteValue);
+ }
+
+ public static StorageSize parse(String value) {
+ checkState(isNotBlank(value), "value cannot be blank");
+ String sanitizedValue = value.trim().toLowerCase(Locale.ENGLISH);
+ StorageUnit parsedUnit = null;
+ for (StorageUnit unit : StorageUnit.values()) {
+ if (
+ sanitizedValue.endsWith(unit.getShortName()) ||
sanitizedValue.endsWith(unit.getLongName())
+ || sanitizedValue.endsWith(unit.getSuffixChar())
+ ) {
+ parsedUnit = unit;
+ break;
+ }
+ }
+
+ if (parsedUnit == null) {
+ throw new IllegalArgumentException(
+ value + " is not in expected format." + "Expected format is
<number><unit>. e.g. 1000MB");
+ }
+
+ String suffix = "";
+ boolean found = false;
+
+ // We are trying to get the longest match first, so the order of
+ // matching is getLongName, getShortName and then getSuffixChar.
+ if (!found && sanitizedValue.endsWith(parsedUnit.getLongName())) {
+ found = true;
+ suffix = parsedUnit.getLongName();
+ }
+
+ if (!found && sanitizedValue.endsWith(parsedUnit.getShortName())) {
+ found = true;
+ suffix = parsedUnit.getShortName();
+ }
+
+ if (!found && sanitizedValue.endsWith(parsedUnit.getSuffixChar())) {
+ found = true;
+ suffix = parsedUnit.getSuffixChar();
+ }
+
+ checkState(found, "Something is wrong, we have to find a " + "match.
Internal error.");
+
+ String valString = sanitizedValue.substring(0, value.length() -
suffix.length());
+ return new StorageSize(parsedUnit, Double.parseDouble(valString));
+
+ }
+
+ public StorageUnit getUnit() {
+ return unit;
+ }
+
+ public double getValue() {
+ return value;
+ }
+}
diff --git
a/hbase-common/src/main/java/org/apache/hadoop/hbase/StorageUnit.java
b/hbase-common/src/main/java/org/apache/hadoop/hbase/StorageUnit.java
new file mode 100644
index 00000000000..76403f19538
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/StorageUnit.java
@@ -0,0 +1,530 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * This class is adapted from the Hadoop 3.x source code to HBase as part of a
backporting effort to
+ * support storage size parsing functionality in older versions of HBase.
+ * <p>
+ * Source: <a href=
+ *
"https://github.com/apache/hadoop/blob/branch-3.1.0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageUnit.java">
+ * Hadoop 3.1.0 StorageUnit.java </a>
+ * </p>
+ */
[email protected]
+public enum StorageUnit {
+ /*
+ * We rely on BYTES being the last to get the longest matching short names
first. The short name
+ * of bytes is b, and it will match with other longer names. If we change
this order, the
+ * corresponding code in StorageSize#parse needs to be changed too, since
values() call returns
+ * the Enums in declared order, and we depend on it.
+ */
+
+ EB {
+ @Override
+ public double toBytes(double value) {
+ return multiply(value, EXABYTES);
+ }
+
+ @Override
+ public double toKBs(double value) {
+ return multiply(value, EXABYTES / KILOBYTES);
+ }
+
+ @Override
+ public double toMBs(double value) {
+ return multiply(value, EXABYTES / MEGABYTES);
+ }
+
+ @Override
+ public double toGBs(double value) {
+ return multiply(value, EXABYTES / GIGABYTES);
+ }
+
+ @Override
+ public double toTBs(double value) {
+ return multiply(value, EXABYTES / TERABYTES);
+ }
+
+ @Override
+ public double toPBs(double value) {
+ return multiply(value, EXABYTES / PETABYTES);
+ }
+
+ @Override
+ public double toEBs(double value) {
+ return value;
+ }
+
+ @Override
+ public String getLongName() {
+ return "exabytes";
+ }
+
+ @Override
+ public String getShortName() {
+ return "eb";
+ }
+
+ @Override
+ public String getSuffixChar() {
+ return "e";
+ }
+
+ @Override
+ public double getDefault(double value) {
+ return toEBs(value);
+ }
+
+ @Override
+ public double fromBytes(double value) {
+ return divide(value, EXABYTES);
+ }
+ },
+ PB {
+ @Override
+ public double toBytes(double value) {
+ return multiply(value, PETABYTES);
+ }
+
+ @Override
+ public double toKBs(double value) {
+ return multiply(value, PETABYTES / KILOBYTES);
+ }
+
+ @Override
+ public double toMBs(double value) {
+ return multiply(value, PETABYTES / MEGABYTES);
+ }
+
+ @Override
+ public double toGBs(double value) {
+ return multiply(value, PETABYTES / GIGABYTES);
+ }
+
+ @Override
+ public double toTBs(double value) {
+ return multiply(value, PETABYTES / TERABYTES);
+ }
+
+ @Override
+ public double toPBs(double value) {
+ return value;
+ }
+
+ @Override
+ public double toEBs(double value) {
+ return divide(value, EXABYTES / PETABYTES);
+ }
+
+ @Override
+ public String getLongName() {
+ return "petabytes";
+ }
+
+ @Override
+ public String getShortName() {
+ return "pb";
+ }
+
+ @Override
+ public String getSuffixChar() {
+ return "p";
+ }
+
+ @Override
+ public double getDefault(double value) {
+ return toPBs(value);
+ }
+
+ @Override
+ public double fromBytes(double value) {
+ return divide(value, PETABYTES);
+ }
+ },
+ TB {
+ @Override
+ public double toBytes(double value) {
+ return multiply(value, TERABYTES);
+ }
+
+ @Override
+ public double toKBs(double value) {
+ return multiply(value, TERABYTES / KILOBYTES);
+ }
+
+ @Override
+ public double toMBs(double value) {
+ return multiply(value, TERABYTES / MEGABYTES);
+ }
+
+ @Override
+ public double toGBs(double value) {
+ return multiply(value, TERABYTES / GIGABYTES);
+ }
+
+ @Override
+ public double toTBs(double value) {
+ return value;
+ }
+
+ @Override
+ public double toPBs(double value) {
+ return divide(value, PETABYTES / TERABYTES);
+ }
+
+ @Override
+ public double toEBs(double value) {
+ return divide(value, EXABYTES / TERABYTES);
+ }
+
+ @Override
+ public String getLongName() {
+ return "terabytes";
+ }
+
+ @Override
+ public String getShortName() {
+ return "tb";
+ }
+
+ @Override
+ public String getSuffixChar() {
+ return "t";
+ }
+
+ @Override
+ public double getDefault(double value) {
+ return toTBs(value);
+ }
+
+ @Override
+ public double fromBytes(double value) {
+ return divide(value, TERABYTES);
+ }
+ },
+ GB {
+ @Override
+ public double toBytes(double value) {
+ return multiply(value, GIGABYTES);
+ }
+
+ @Override
+ public double toKBs(double value) {
+ return multiply(value, GIGABYTES / KILOBYTES);
+ }
+
+ @Override
+ public double toMBs(double value) {
+ return multiply(value, GIGABYTES / MEGABYTES);
+ }
+
+ @Override
+ public double toGBs(double value) {
+ return value;
+ }
+
+ @Override
+ public double toTBs(double value) {
+ return divide(value, TERABYTES / GIGABYTES);
+ }
+
+ @Override
+ public double toPBs(double value) {
+ return divide(value, PETABYTES / GIGABYTES);
+ }
+
+ @Override
+ public double toEBs(double value) {
+ return divide(value, EXABYTES / GIGABYTES);
+ }
+
+ @Override
+ public String getLongName() {
+ return "gigabytes";
+ }
+
+ @Override
+ public String getShortName() {
+ return "gb";
+ }
+
+ @Override
+ public String getSuffixChar() {
+ return "g";
+ }
+
+ @Override
+ public double getDefault(double value) {
+ return toGBs(value);
+ }
+
+ @Override
+ public double fromBytes(double value) {
+ return divide(value, GIGABYTES);
+ }
+ },
+ MB {
+ @Override
+ public double toBytes(double value) {
+ return multiply(value, MEGABYTES);
+ }
+
+ @Override
+ public double toKBs(double value) {
+ return multiply(value, MEGABYTES / KILOBYTES);
+ }
+
+ @Override
+ public double toMBs(double value) {
+ return value;
+ }
+
+ @Override
+ public double toGBs(double value) {
+ return divide(value, GIGABYTES / MEGABYTES);
+ }
+
+ @Override
+ public double toTBs(double value) {
+ return divide(value, TERABYTES / MEGABYTES);
+ }
+
+ @Override
+ public double toPBs(double value) {
+ return divide(value, PETABYTES / MEGABYTES);
+ }
+
+ @Override
+ public double toEBs(double value) {
+ return divide(value, EXABYTES / MEGABYTES);
+ }
+
+ @Override
+ public String getLongName() {
+ return "megabytes";
+ }
+
+ @Override
+ public String getShortName() {
+ return "mb";
+ }
+
+ @Override
+ public String getSuffixChar() {
+ return "m";
+ }
+
+ @Override
+ public double fromBytes(double value) {
+ return divide(value, MEGABYTES);
+ }
+
+ @Override
+ public double getDefault(double value) {
+ return toMBs(value);
+ }
+ },
+ KB {
+ @Override
+ public double toBytes(double value) {
+ return multiply(value, KILOBYTES);
+ }
+
+ @Override
+ public double toKBs(double value) {
+ return value;
+ }
+
+ @Override
+ public double toMBs(double value) {
+ return divide(value, MEGABYTES / KILOBYTES);
+ }
+
+ @Override
+ public double toGBs(double value) {
+ return divide(value, GIGABYTES / KILOBYTES);
+ }
+
+ @Override
+ public double toTBs(double value) {
+ return divide(value, TERABYTES / KILOBYTES);
+ }
+
+ @Override
+ public double toPBs(double value) {
+ return divide(value, PETABYTES / KILOBYTES);
+ }
+
+ @Override
+ public double toEBs(double value) {
+ return divide(value, EXABYTES / KILOBYTES);
+ }
+
+ @Override
+ public String getLongName() {
+ return "kilobytes";
+ }
+
+ @Override
+ public String getShortName() {
+ return "kb";
+ }
+
+ @Override
+ public String getSuffixChar() {
+ return "k";
+ }
+
+ @Override
+ public double getDefault(double value) {
+ return toKBs(value);
+ }
+
+ @Override
+ public double fromBytes(double value) {
+ return divide(value, KILOBYTES);
+ }
+ },
+ BYTES {
+ @Override
+ public double toBytes(double value) {
+ return value;
+ }
+
+ @Override
+ public double toKBs(double value) {
+ return divide(value, KILOBYTES);
+ }
+
+ @Override
+ public double toMBs(double value) {
+ return divide(value, MEGABYTES);
+ }
+
+ @Override
+ public double toGBs(double value) {
+ return divide(value, GIGABYTES);
+ }
+
+ @Override
+ public double toTBs(double value) {
+ return divide(value, TERABYTES);
+ }
+
+ @Override
+ public double toPBs(double value) {
+ return divide(value, PETABYTES);
+ }
+
+ @Override
+ public double toEBs(double value) {
+ return divide(value, EXABYTES);
+ }
+
+ @Override
+ public String getLongName() {
+ return "bytes";
+ }
+
+ @Override
+ public String getShortName() {
+ return "b";
+ }
+
+ @Override
+ public String getSuffixChar() {
+ return "b";
+ }
+
+ @Override
+ public double getDefault(double value) {
+ return toBytes(value);
+ }
+
+ @Override
+ public double fromBytes(double value) {
+ return value;
+ }
+ };
+
+ private static final double BYTE = 1L;
+ private static final double KILOBYTES = BYTE * 1024L;
+ private static final double MEGABYTES = KILOBYTES * 1024L;
+ private static final double GIGABYTES = MEGABYTES * 1024L;
+ private static final double TERABYTES = GIGABYTES * 1024L;
+ private static final double PETABYTES = TERABYTES * 1024L;
+ private static final double EXABYTES = PETABYTES * 1024L;
+ private static final int PRECISION = 4;
+
+ /**
+ * Using BigDecimal to avoid issues with overflow and underflow.
+ * @param value - value
+ * @param divisor - divisor.
+ * @return -- returns a double that represents this value
+ */
+ private static double divide(double value, double divisor) {
+ BigDecimal val = new BigDecimal(value);
+ BigDecimal bDivisor = new BigDecimal(divisor);
+ return val.divide(bDivisor).setScale(PRECISION,
RoundingMode.HALF_UP).doubleValue();
+ }
+
+ /**
+ * Using BigDecimal so we can throw if we are overflowing the Long.Max.
+ * @param first - First Num.
+ * @param second - Second Num.
+ * @return Returns a double
+ */
+ private static double multiply(double first, double second) {
+ BigDecimal firstVal = new BigDecimal(first);
+ BigDecimal secondVal = new BigDecimal(second);
+ return firstVal.multiply(secondVal).setScale(PRECISION,
RoundingMode.HALF_UP).doubleValue();
+ }
+
+ public abstract double toBytes(double value);
+
+ public abstract double toKBs(double value);
+
+ public abstract double toMBs(double value);
+
+ public abstract double toGBs(double value);
+
+ public abstract double toTBs(double value);
+
+ public abstract double toPBs(double value);
+
+ public abstract double toEBs(double value);
+
+ public abstract String getLongName();
+
+ public abstract String getShortName();
+
+ public abstract String getSuffixChar();
+
+ public abstract double getDefault(double value);
+
+ public abstract double fromBytes(double value);
+
+ public String toString() {
+ return getLongName();
+ }
+}
diff --git a/hbase-common/src/main/resources/hbase-default.xml
b/hbase-common/src/main/resources/hbase-default.xml
index 4cce153e60c..15b970c7e8d 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1000,6 +1000,14 @@ possible configurations would overwhelm and obscure the
important.
Set to 0 to disable but it's not recommended; you need at least
enough cache to hold the storefile indices.</description>
</property>
+ <property>
+ <name>hfile.block.cache.memory.size</name>
+ <value></value>
+ <description>Defines the maximum heap memory allocated for the HFile block
cache,
+ specified in bytes or human-readable formats like '10m' for megabytes or
'10g' for gigabytes.
+ This configuration allows setting an absolute memory size instead of a
percentage of the maximum heap.
+ Takes precedence over hfile.block.cache.size if both are
specified.</description>
+ </property>
<property>
<name>hfile.block.index.cacheonwrite</name>
<value>false</value>
diff --git
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestStorageSize.java
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestStorageSize.java
new file mode 100644
index 00000000000..386221947d5
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestStorageSize.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MiscTests.class, SmallTests.class })
+public class TestStorageSize {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestStorageSize.class);
+
+ @Test
+ public void testParse() {
+ // megabytes
+ StorageSize size = StorageSize.parse("20m");
+ assertEquals(StorageUnit.MB, size.getUnit());
+ assertEquals(20.0d, size.getValue(), 0.0001);
+ size = StorageSize.parse("40mb");
+ assertEquals(StorageUnit.MB, size.getUnit());
+ assertEquals(40.0d, size.getValue(), 0.0001);
+ size = StorageSize.parse("60megabytes");
+ assertEquals(StorageUnit.MB, size.getUnit());
+ assertEquals(60.0d, size.getValue(), 0.0001);
+
+ // gigabytes
+ size = StorageSize.parse("10g");
+ assertEquals(StorageUnit.GB, size.getUnit());
+ assertEquals(10.0d, size.getValue(), 0.0001);
+ size = StorageSize.parse("30gb");
+ assertEquals(StorageUnit.GB, size.getUnit());
+ assertEquals(30.0d, size.getValue(), 0.0001);
+ size = StorageSize.parse("50gigabytes");
+ assertEquals(StorageUnit.GB, size.getUnit());
+ assertEquals(50.0d, size.getValue(), 0.0001);
+ }
+
+ @Test
+ public void testGetStorageSize() {
+ assertEquals(1024 * 1024 * 4, StorageSize.getStorageSize("4m", -1,
StorageUnit.BYTES), 0.0001);
+ assertEquals(1024 * 6, StorageSize.getStorageSize("6g", -1,
StorageUnit.MB), 0.0001);
+ assertEquals(-1, StorageSize.getStorageSize(null, -1, StorageUnit.BYTES),
0.0001);
+ assertEquals(-2, StorageSize.getStorageSize("", -2, StorageUnit.BYTES),
0.0001);
+ }
+}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
index bf014dfb530..15aeb2153e6 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
@@ -22,6 +22,8 @@ import java.lang.management.MemoryType;
import java.lang.management.MemoryUsage;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.StorageSize;
+import org.apache.hadoop.hbase.StorageUnit;
import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
@@ -93,11 +95,16 @@ public class MemorySizeUtil {
) {
throw new RuntimeException("Current heap configuration for MemStore and
BlockCache exceeds "
+ "the threshold required for successful cluster operation. "
- + "The combined value cannot exceed 0.8. Please check "
- + "the settings for hbase.regionserver.global.memstore.size and "
- + "hfile.block.cache.size in your configuration. "
- + "hbase.regionserver.global.memstore.size is " + globalMemstoreSize
- + " hfile.block.cache.size is " + blockCacheUpperLimit);
+ + "The combined value cannot exceed 0.8. Please check " + "the
settings for "
+ + MEMSTORE_SIZE_KEY + " and either " +
HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY + " or "
+ + HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + " in your configuration. " +
MEMSTORE_SIZE_KEY
+ + "=" + globalMemstoreSize + ", " +
HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY + "="
+ + conf.get(HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY) + ", "
+ + HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + "="
+ + conf.get(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY) + ". (Note: If both "
+ + HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY + " and "
+ + HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + " are set, " + "the system
will use "
+ + HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY + ")");
}
}
@@ -195,10 +202,30 @@ public class MemorySizeUtil {
* Retrieve configured size for on heap block cache as percentage of total
heap.
*/
public static float getBlockCacheHeapPercent(final Configuration conf) {
- // L1 block cache is always on heap
- float l1CachePercent = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
+ // Check if an explicit block cache size is configured.
+ long l1CacheSizeInBytes = getBlockCacheSizeInBytes(conf);
+ if (l1CacheSizeInBytes > 0) {
+ final MemoryUsage usage = safeGetHeapMemoryUsage();
+ return usage == null ? 0 : (float) l1CacheSizeInBytes / usage.getMax();
+ }
+
+ return conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
- return l1CachePercent;
+ }
+
+ /**
+ * Retrieve an explicit block cache size in bytes in the configuration.
+ * @param conf used to read cache configs
+ * @return the number of bytes to use for LRU, negative if disabled.
+ * @throws IllegalArgumentException if HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY
format is invalid
+ */
+ public static long getBlockCacheSizeInBytes(Configuration conf) {
+ final String key = HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY;
+ try {
+ return Long.parseLong(conf.get(key));
+ } catch (NumberFormatException e) {
+ return (long) StorageSize.getStorageSize(conf.get(key), -1,
StorageUnit.BYTES);
+ }
}
/**
@@ -207,8 +234,7 @@ public class MemorySizeUtil {
* @throws IllegalArgumentException if HFILE_BLOCK_CACHE_SIZE_KEY is > 1.0
*/
public static long getOnHeapCacheSize(final Configuration conf) {
- float cachePercentage =
conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
- HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
+ final float cachePercentage = getBlockCacheHeapPercent(conf);
if (cachePercentage <= 0.0001f) {
return -1;
}
@@ -216,18 +242,22 @@ public class MemorySizeUtil {
throw new IllegalArgumentException(
HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + " must be between 0.0 and 1.0,
and not > 1.0");
}
- long max = -1L;
+
final MemoryUsage usage = safeGetHeapMemoryUsage();
- if (usage != null) {
- max = usage.getMax();
+ if (usage == null) {
+ return -1;
}
+ final long heapMax = usage.getMax();
float onHeapCacheFixedSize =
(float) conf.getLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY,
- HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT) / max;
+ HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT) / heapMax;
// Calculate the amount of heap to give the heap.
- return (onHeapCacheFixedSize > 0 && onHeapCacheFixedSize < cachePercentage)
- ? (long) (max * onHeapCacheFixedSize)
- : (long) (max * cachePercentage);
+ if (onHeapCacheFixedSize > 0 && onHeapCacheFixedSize < cachePercentage) {
+ return (long) (heapMax * onHeapCacheFixedSize);
+ } else {
+ final long cacheSizeInBytes = getBlockCacheSizeInBytes(conf);
+ return cacheSizeInBytes > 0 ? cacheSizeInBytes : (long) (heapMax *
cachePercentage);
+ }
}
/**
@@ -243,5 +273,4 @@ public class MemorySizeUtil {
}
return (long) (bucketCacheSize * 1024 * 1024);
}
-
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
index fe2737b0a7d..1b28846efad 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import static
org.apache.hadoop.hbase.HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY;
import static org.apache.hadoop.hbase.HConstants.HFILE_BLOCK_CACHE_SIZE_KEY;
import java.lang.management.MemoryUsage;
@@ -128,8 +129,7 @@ public class HeapMemoryManager {
private boolean doInit(Configuration conf) {
boolean tuningEnabled = true;
globalMemStorePercent = MemorySizeUtil.getGlobalMemStoreHeapPercent(conf,
false);
- blockCachePercent =
- conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY,
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
+ blockCachePercent = MemorySizeUtil.getBlockCacheHeapPercent(conf);
MemorySizeUtil.checkForClusterFreeHeapMemoryLimit(conf);
// Initialize max and min range for memstore heap space
globalMemStorePercentMinRange =
@@ -160,16 +160,20 @@ public class HeapMemoryManager {
blockCachePercentMinRange = conf.getFloat(BLOCK_CACHE_SIZE_MIN_RANGE_KEY,
blockCachePercent);
blockCachePercentMaxRange = conf.getFloat(BLOCK_CACHE_SIZE_MAX_RANGE_KEY,
blockCachePercent);
if (blockCachePercent < blockCachePercentMinRange) {
- LOG.warn("Setting " + BLOCK_CACHE_SIZE_MIN_RANGE_KEY + " to " +
blockCachePercent
- + ", same value as " + HFILE_BLOCK_CACHE_SIZE_KEY
- + " because supplied value greater than initial block cache size.");
+ LOG.warn(
+ "Setting {} to {} (lookup order: {} -> {}), "
+ + "because supplied value greater than initial block cache size.",
+ BLOCK_CACHE_SIZE_MIN_RANGE_KEY, blockCachePercent,
HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY,
+ HFILE_BLOCK_CACHE_SIZE_KEY);
blockCachePercentMinRange = blockCachePercent;
conf.setFloat(BLOCK_CACHE_SIZE_MIN_RANGE_KEY, blockCachePercentMinRange);
}
if (blockCachePercent > blockCachePercentMaxRange) {
- LOG.warn("Setting " + BLOCK_CACHE_SIZE_MAX_RANGE_KEY + " to " +
blockCachePercent
- + ", same value as " + HFILE_BLOCK_CACHE_SIZE_KEY
- + " because supplied value less than initial block cache size.");
+ LOG.warn(
+ "Setting {} to {} (lookup order: {} -> {}), "
+ + "because supplied value less than initial block cache size.",
+ BLOCK_CACHE_SIZE_MAX_RANGE_KEY, blockCachePercent,
HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY,
+ HFILE_BLOCK_CACHE_SIZE_KEY);
blockCachePercentMaxRange = blockCachePercent;
conf.setFloat(BLOCK_CACHE_SIZE_MAX_RANGE_KEY, blockCachePercentMaxRange);
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index 542bcda138e..bee8ca0667d 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -394,6 +394,14 @@ public class TestCacheConfig {
long onHeapCacheSize = MemorySizeUtil.getOnHeapCacheSize(copyConf);
assertEquals(null,
copyConf.get(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY));
assertTrue(onHeapCacheSize > 0 && onHeapCacheSize != fixedSize);
+ // when HBASE_BLOCK_CACHE_MEMORY_SIZE is set in number
+ copyConf.setLong(HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY, 3 * 1024 *
1024);
+ onHeapCacheSize = MemorySizeUtil.getOnHeapCacheSize(copyConf);
+ assertEquals(3 * 1024 * 1024, onHeapCacheSize);
+ // when HBASE_BLOCK_CACHE_MEMORY_SIZE is set in human-readable format
+ copyConf.set(HConstants.HFILE_BLOCK_CACHE_MEMORY_SIZE_KEY, "2m");
+ onHeapCacheSize = MemorySizeUtil.getOnHeapCacheSize(copyConf);
+ assertEquals(2 * 1024 * 1024, onHeapCacheSize);
// when HBASE_BLOCK_CACHE_FIXED_SIZE_KEY is set, it will be a fixed size
copyConf.setLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY,
fixedSize);
onHeapCacheSize = MemorySizeUtil.getOnHeapCacheSize(copyConf);