Repository: hadoop
Updated Branches:
  refs/heads/branch-2 41d0d9a32 -> 481e7248d


HDFS-9318. considerLoad factor can be improved. Contributed by Kuhu Shukla.
(cherry picked from commit bf6aa30a156b3c5cac5469014a5989e0dfdc7256)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/481e7248
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/481e7248
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/481e7248

Branch: refs/heads/branch-2
Commit: 481e7248dee5b0a3e0f3148c3cdde133a637b990
Parents: 41d0d9a
Author: Kihwal Lee <kih...@apache.org>
Authored: Fri Nov 6 14:08:10 2015 -0600
Committer: Kihwal Lee <kih...@apache.org>
Committed: Fri Nov 6 14:08:10 2015 -0600

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../BlockPlacementPolicyDefault.java            |  7 ++-
 .../src/main/resources/hdfs-default.xml         |  9 ++++
 .../TestReplicationPolicyConsiderLoad.java      | 56 ++++++++++++++++++++
 5 files changed, 77 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1c8e840..4539cd3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -836,6 +836,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-9282. Make data directory count and storage raw capacity related tests
     FsDataset-agnostic. (Tony Wu via lei)
 
+    HDFS-9318. considerLoad factor can be improved. (Kuhu Shukla via kihwal)
+
   BUG FIXES
 
     HDFS-8091: ACLStatus and XAttributes should be presented to

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1e6143c..e73aa2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -179,6 +179,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY =
       
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY;
   public static final boolean DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT = 
true;
+  public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR =
+      "dfs.namenode.replication.considerLoad.factor";
+  public static final double
+      DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT = 2.0;
   public static final String  DFS_NAMENODE_REPLICATION_INTERVAL_KEY =
       
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
   public static final int     DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d94179b..13b17e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -58,6 +58,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
       };
 
   protected boolean considerLoad; 
+  protected double considerLoadFactor;
   private boolean preferLocalNode = true;
   protected NetworkTopology clusterMap;
   protected Host2NodesMap host2datanodeMap;
@@ -79,6 +80,9 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
                          Host2NodesMap host2datanodeMap) {
     this.considerLoad = conf.getBoolean(
         DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+    this.considerLoadFactor = conf.getDouble(
+        DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
+        DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);
     this.stats = stats;
     this.clusterMap = clusterMap;
     this.host2datanodeMap = host2datanodeMap;
@@ -809,7 +813,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
     // check the communication traffic of the target machine
     if (considerLoad) {
-      final double maxLoad = 2.0 * stats.getInServiceXceiverAverage();
+      final double maxLoad = considerLoadFactor *
+          stats.getInServiceXceiverAverage();
       final int nodeLoad = node.getXceiverCount();
       if (nodeLoad > maxLoad) {
         logNodeIsNotChosen(node, "the node is too busy (load: " + nodeLoad

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 4f45cd4..52037da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -301,6 +301,15 @@
   <description>Decide if chooseTarget considers the target's load or not
   </description>
 </property>
+
+  <property>
+    <name>dfs.namenode.replication.considerLoad.factor</name>
+    <value>2.0</value>
+    <description>The factor by which a node's load can exceed the average
+      before being rejected for writes, only if considerLoad is true.
+    </description>
+  </property>
+
 <property>
   <name>dfs.default.chunk.view.size</name>
   <value>32768</value>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
index 6e84b91..123e635 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -51,6 +52,8 @@ public class TestReplicationPolicyConsiderLoad
 
   @Override
   DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) {
+    conf.setDouble(DFSConfigKeys
+        .DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR, 1.2);
     final String[] racks = {
         "/rack1",
         "/rack1",
@@ -124,4 +127,57 @@ public class TestReplicationPolicyConsiderLoad
       namenode.getNamesystem().writeUnlock();
     }
   }
+
+  @Test
+  public void testConsiderLoadFactor() throws IOException {
+    namenode.getNamesystem().writeLock();
+    try {
+      dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[0],
+          BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[0]),
+          dataNodes[0].getCacheCapacity(),
+          dataNodes[0].getCacheUsed(),
+          5, 0, null);
+      dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[1],
+          BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[1]),
+          dataNodes[1].getCacheCapacity(),
+          dataNodes[1].getCacheUsed(),
+          10, 0, null);
+      dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[2],
+          BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[2]),
+          dataNodes[2].getCacheCapacity(),
+          dataNodes[2].getCacheUsed(),
+          5, 0, null);
+
+      dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[3],
+          BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
+          dataNodes[3].getCacheCapacity(),
+          dataNodes[3].getCacheUsed(),
+          10, 0, null);
+      dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[4],
+          BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),
+          dataNodes[4].getCacheCapacity(),
+          dataNodes[4].getCacheUsed(),
+          15, 0, null);
+      dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[5],
+          BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),
+          dataNodes[5].getCacheCapacity(),
+          dataNodes[5].getCacheUsed(),
+          15, 0, null);
+      //Add values in above heartbeats
+      double load = 5 + 10 + 15 + 10 + 15 + 5;
+      // Call chooseTarget()
+      DatanodeDescriptor writerDn = dataNodes[0];
+      DatanodeStorageInfo[] targets = 
namenode.getNamesystem().getBlockManager()
+          .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3, writerDn,
+              new ArrayList<DatanodeStorageInfo>(), false, null,
+              1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+      for(DatanodeStorageInfo info : targets) {
+        assertTrue("The node "+info.getDatanodeDescriptor().getName()+
+                " has higher load and should not have been picked!",
+            info.getDatanodeDescriptor().getXceiverCount() <= (load/6)*1.2);
+      }
+    } finally {
+      namenode.getNamesystem().writeUnlock();
+    }
+  }
 }

Reply via email to