Author: kihwal Date: Mon May 19 17:54:18 2014 New Revision: 1595980 URL: http://svn.apache.org/r1595980 Log: svn merge -c 1595978 merging from trunk to branch-2 to fix:HDFS-6397. NN shows inconsistent value in deadnode count. Contributed by Mohammad Kamrul Islam.
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1595980&r1=1595979&r2=1595980&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon May 19 17:54:18 2014 @@ -329,6 +329,9 @@ Release 2.4.1 - UNRELEASED HDFS-6325. Append should fail if the last block has insufficient number of replicas (Keith Pak via cos) + HDFS-6397. NN shows inconsistent value in deadnode count. + (Mohammad Kamrul Islam via kihwal) + Release 2.4.0 - 2014-04-07 INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1595980&r1=1595979&r2=1595980&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Mon May 19 17:54:18 2014 @@ -1056,15 +1056,7 @@ public class DatanodeManager { /** @return the number of dead datanodes. */ public int getNumDeadDataNodes() { - int numDead = 0; - synchronized (datanodeMap) { - for(DatanodeDescriptor dn : datanodeMap.values()) { - if (isDatanodeDead(dn) ) { - numDead++; - } - } - } - return numDead; + return getDatanodeListForReport(DatanodeReportType.DEAD).size(); } /** @return list of datanodes where decommissioning is in progress. */ Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java?rev=1595980&r1=1595979&r2=1595980&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java Mon May 19 17:54:18 2014 @@ -132,4 +132,44 @@ public class TestHostsFiles { cluster.shutdown(); } } + + @Test + public void testHostsIncludeForDeadCount() throws Exception { + Configuration conf = getConf(); + + // Configure an excludes file + FileSystem localFileSys = FileSystem.getLocal(conf); + Path workingDir = localFileSys.getWorkingDirectory(); + Path dir = new Path(workingDir, "build/test/data/temp/decommission"); + Path excludeFile = new Path(dir, "exclude"); + Path includeFile = new Path(dir, "include"); + assertTrue(localFileSys.mkdirs(dir)); + StringBuilder includeHosts = new StringBuilder(); + includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777") + .append("\n"); + DFSTestUtil.writeFile(localFileSys, excludeFile, ""); + DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString()); + conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); + conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); + + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + final FSNamesystem ns = cluster.getNameNode().getNamesystem(); + assertTrue(ns.getNumDeadDataNodes() == 2); + assertTrue(ns.getNumLiveDataNodes() == 0); + + // Testing using MBeans + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName mxbeanName = new ObjectName( + "Hadoop:service=NameNode,name=FSNamesystemState"); + String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + ""; + assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2); + assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } }