virajjasani commented on a change in pull request #1588:
URL: https://github.com/apache/hbase/pull/1588#discussion_r415322441
##########
File path:
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java
##########
@@ -101,5 +101,13 @@ public boolean isOffPeakHour(int targetHour) {
}
return targetHour < endHour || startHour <= targetHour;
}
+
+ @Override
+ public String toString() {
+ return "{" + "startHour=" + startHour + ", endHour="
Review comment:
Please use auto-generator of toString() provided by IDEs.
##########
File path: hbase-common/src/main/resources/hbase-default.xml
##########
@@ -998,24 +998,6 @@ possible configurations would overwhelm and obscure the
important.
See http://hbase.apache.org/book.html#offheap.blockcache for more
information.
</description>
</property>
- <property>
- <name>hbase.hstore.compaction.throughput.lower.bound</name>
- <value>52428800</value>
- <description>The target lower bound on aggregate compaction throughput, in
bytes/sec. Allows
- you to tune the minimum available compaction throughput when the
- PressureAwareCompactionThroughputController throughput controller is
active. (It is active by
- default.)</description>
- </property>
- <property>
- <name>hbase.hstore.compaction.throughput.higher.bound</name>
- <value>104857600</value>
- <description>The target upper bound on aggregate compaction throughput, in
bytes/sec. Allows
- you to control aggregate compaction throughput demand when the
- PressureAwareCompactionThroughputController throughput controller is
active. (It is active by
- default.) The maximum throughput will be tuned between the lower and upper
bounds when
- compaction pressure is within the range [0.0, 1.0]. If compaction pressure
is 1.0 or greater
- the higher bound will be ignored until pressure returns to the normal
range.</description>
- </property>
Review comment:
I understand code has default value but let's keep this as is. It is
easy to refer default values here.
##########
File path:
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestDynamicConfWithThroughputController.java
##########
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.throttle;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileReader;
+import java.net.URL;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.StoreEngine;
+import
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+@Category({ RegionServerTests.class, MediumTests.class })
+public class TestDynamicConfWithThroughputController {
+
+ @ClassRule public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule
+ .forClass(TestDynamicConfWithThroughputController.class);
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestDynamicConfWithThroughputController.class);
+
+ private static final HBaseTestingUtility TEST_UTIL =
+ new HBaseTestingUtility();
+
+ private static final double EPSILON = 1E-6;
+
+ long throughputLimit = 1024L * 1024;
+
+ long newMaxThroughputUpperBound = throughputLimit * 6;
+
+ long newMaxThroughputLowerBound = throughputLimit * 3;
+
+ long newOffpeakLimit = throughputLimit * 4;
+
+ @Test
+ public void testThroughputTuning() throws Exception {
+
+ File sourceConfFile = null;
+ File bakConfFile = null;
+ try {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY,
+ DefaultStoreEngine.class.getName());
+ conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY,
100);
+ conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY,
200);
+ conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
+ conf.set(
+
CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
+ PressureAwareCompactionThroughputController.class.getName());
+ int tunePeriod = 5000;
+ conf.setInt(
+ PressureAwareCompactionThroughputController
+ .HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD,
+ tunePeriod);
+
+ TEST_UTIL.startMiniCluster(1);
+ HRegionServer regionServer =
+ TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
+
+ PressureAwareCompactionThroughputController throughputController =
+ (PressureAwareCompactionThroughputController)
regionServer.compactSplitThread
+ .getCompactionThroughputController();
+ throughputLimit =
+ PressureAwareCompactionThroughputController
+ .DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND;
+ LOG.debug("conf:" + conf.getResource("hbase-site.xml"));
+ assertEquals(throughputLimit, throughputController.getMaxThroughput(),
+ EPSILON);
+
+ LOG.debug("conf:" + conf.getResource("hbase-site.xml"));
+ URL oldConfFile = conf.getResource("hbase-site.xml");
+
+ sourceConfFile = new File(oldConfFile.getPath());
+
+ File newConfFile = new File(oldConfFile.getPath() + "_new");
+ bakConfFile = new File(oldConfFile.getPath() + "_bak_src");
+ FileUtils.copyFile(sourceConfFile, newConfFile);
+ FileUtils.copyFile(sourceConfFile, bakConfFile);
+
+ addNewConf(newConfFile.getAbsolutePath());
+ sourceConfFile.delete();
+ FileUtils.copyFile(newConfFile, sourceConfFile);
+
+ LOG.info("updateConfiguration");
+ Admin admin = TEST_UTIL.getConnection().getAdmin();
+ LOG.debug("regionServer.getServerName():" +
regionServer.getServerName());
+
+ admin.updateConfiguration(regionServer.getServerName());
+
+ assertEquals(newMaxThroughputUpperBound,
+ throughputController.getMaxThroughputUpperBound(), EPSILON);
+
+ assertEquals(newMaxThroughputLowerBound,
+ throughputController.getMaxThroughputLowerBound(), EPSILON);
+
+ LOG.info("assertTrue OffPeakHour");
+
+ Thread.sleep(tunePeriod + 3000);
+ if (throughputController.getOffPeakHours().isOffPeakHour()) {
+ assertEquals(newOffpeakLimit, throughputController.getMaxThroughput(),
+ EPSILON);
+ } else {
+ assertEquals(newMaxThroughputLowerBound,
+ throughputController.getMaxThroughput(), EPSILON);
+ }
+
+ } finally {
+
+ sourceConfFile.delete();
+ FileUtils.moveFile(bakConfFile, sourceConfFile);
+
+ }
+ }
+
+ public static String txt2String(File file) {
+ StringBuilder result = new StringBuilder();
+ try {
+ BufferedReader br = new BufferedReader(new FileReader(file));
+ String s = null;
+ while ((s = br.readLine()) != null) {
+ result.append(System.lineSeparator() + s);
+ }
+ br.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ return result.toString();
+ }
+
+ public void addNewConf(String newConfFile) {
+
+ DocumentBuilderFactory bdg = DocumentBuilderFactory.newInstance();
+ try {
+ DocumentBuilder builder = bdg.newDocumentBuilder();
+
+ Document dm = builder.parse(new FileInputStream(newConfFile));
+ Element root = dm.getDocumentElement();
+
+ Element propertyHigher = makeElement(dm,
+ PressureAwareCompactionThroughputController
+ .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
+ String.valueOf(newMaxThroughputUpperBound));
+ root.appendChild(propertyHigher);
+
+ Element propertLower = makeElement(dm,
+ PressureAwareCompactionThroughputController
+ .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
+ String.valueOf(newMaxThroughputLowerBound));
+ root.appendChild(propertLower);
+
+ Element propertyStartHour = makeElement(dm,
+ CompactionConfiguration.HBASE_HSTORE_OFFPEAK_START_HOUR,
+ String.valueOf(0));
+ root.appendChild(propertyStartHour);
+
+ Element propertyEndHour =
+ makeElement(dm,
CompactionConfiguration.HBASE_HSTORE_OFFPEAK_END_HOUR,
+ String.valueOf(6));
+ root.appendChild(propertyEndHour);
+
+ Element propertyOffpeakLimit = makeElement(dm,
+ PressureAwareCompactionThroughputController
+ .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK,
+ String.valueOf(newOffpeakLimit));
+ root.appendChild(propertyOffpeakLimit);
+
+ Transformer tfd = TransformerFactory.newInstance().newTransformer();
+ tfd.transform(new DOMSource(dm), new StreamResult(newConfFile));
+ } catch (ParserConfigurationException e) {
+
+ } catch (Exception e) {
+ LOG.error("addNewConf error", e);
+ }
Review comment:
We don't want any of above Exceptions to be suppressed right? Let's
remove them from here and keep them in `throws` list at method level.
##########
File path: hbase-common/src/main/resources/hbase-default.xml
##########
@@ -998,24 +998,6 @@ possible configurations would overwhelm and obscure the
important.
See http://hbase.apache.org/book.html#offheap.blockcache for more
information.
</description>
</property>
- <property>
- <name>hbase.hstore.compaction.throughput.lower.bound</name>
- <value>52428800</value>
- <description>The target lower bound on aggregate compaction throughput, in
bytes/sec. Allows
- you to tune the minimum available compaction throughput when the
- PressureAwareCompactionThroughputController throughput controller is
active. (It is active by
- default.)</description>
- </property>
- <property>
- <name>hbase.hstore.compaction.throughput.higher.bound</name>
- <value>104857600</value>
- <description>The target upper bound on aggregate compaction throughput, in
bytes/sec. Allows
- you to control aggregate compaction throughput demand when the
- PressureAwareCompactionThroughputController throughput controller is
active. (It is active by
- default.) The maximum throughput will be tuned between the lower and upper
bounds when
- compaction pressure is within the range [0.0, 1.0]. If compaction pressure
is 1.0 or greater
- the higher bound will be ignored until pressure returns to the normal
range.</description>
- </property>
Review comment:
What is the intention behind removal of these configs from hbase-default?
##########
File path:
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
##########
@@ -54,7 +54,7 @@
public static final String
HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND =
"hbase.hstore.compaction.throughput.lower.bound";
- private static final long
DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND =
Review comment:
Please keep this private, this should be used by core code only. In
Test, you can directly provide value: 50L * 1024 * 1024
##########
File path:
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
##########
@@ -146,4 +146,42 @@ public String toString() {
+ throughputDesc(getMaxThroughput()) + ", activeCompactions=" +
activeOperations.size()
+ "]";
}
+
+ @Override public void updateConfig(Configuration newConf) {
+ this.maxThroughputUpperBound = newConf
+ .getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
+ DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND);
+ this.maxThroughputLowerBound = newConf
+ .getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
+ DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND);
+ this.maxThroughputOffpeak = newConf
+ .getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK,
+ DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK);
+ this.offPeakHours = OffPeakHours.getInstance(newConf);
+ this.controlPerSize = this.maxThroughputLowerBound;
+
+ LOG.info("updateParam Compaction throughput configurations, higher bound: "
+ + throughputDesc(maxThroughputUpperBound) + ", lower bound "
+ + throughputDesc(maxThroughputLowerBound) + ", off peak: "
+ + throughputDesc(maxThroughputOffpeak) + ", tuning period: "
+ + tuningPeriod + " ms, offPeakHours: " + offPeakHours);
Review comment:
Use `{}` for all params as arguments
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]