Author: junping_du Date: Fri May 16 09:59:24 2014 New Revision: 1595146 URL: http://svn.apache.org/r1595146 Log: Merge r1595145 from trunk: HDFS-6250. Fix test failed in TestBalancerWithNodeGroup.testBalancerWithRackLocality (Contributed by Binglin Chang and Chen He
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1595146&r1=1595145&r2=1595146&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri May 16 09:59:24 2014 @@ -237,6 +237,9 @@ Release 2.5.0 - UNRELEASED HDFS-6400. Cannot execute hdfs oiv_legacy. (Akira AJISAKA via kihwal) + HDFS-6250. Fix test failed in TestBalancerWithNodeGroup.testBalancerWithRackLocality + (Binglin Chang and Chen He via junping_du) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java?rev=1595146&r1=1595145&r2=1595146&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java Fri May 16 09:59:24 2014 @@ -170,7 +170,7 @@ class NameNodeConnector { } /* The idea for making sure that there is no more than one balancer - * running in an HDFS is to create a file in the HDFS, writes the IP address + * running in an HDFS is to create a file in the HDFS, writes the hostname * of the machine on which the balancer is running to the file, but did not * close the file until the balancer exits. * This prevents the second balancer from running because it can not Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java?rev=1595146&r1=1595145&r2=1595146&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java Fri May 16 09:59:24 2014 @@ -22,8 +22,9 @@ import static org.junit.Assert.assertEqu import java.io.IOException; import java.net.URI; import java.util.Collection; -import java.util.HashMap; -import java.util.Map; +import java.util.HashSet; +import java.util.List; +import java.util.Set; import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; @@ -39,6 +40,9 @@ import org.apache.hadoop.hdfs.MiniDFSClu import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup; import org.apache.hadoop.net.NetworkTopology; @@ -53,7 +57,7 @@ public class TestBalancerWithNodeGroup { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestBalancerWithNodeGroup"); - final private static long CAPACITY = 6000L; + final private static long CAPACITY = 5000L; final private static String RACK0 = "/rack0"; final private static String RACK1 = "/rack1"; final private static String NODEGROUP0 = "/nodegroup0"; @@ -77,6 +81,7 @@ public class TestBalancerWithNodeGroup { static Configuration createConf() { Configuration conf = new HdfsConfiguration(); TestBalancer.initConf(conf); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, NetworkTopologyWithNodeGroup.class.getName()); conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, @@ -191,6 +196,19 @@ public class TestBalancerWithNodeGroup { LOG.info("Rebalancing with default factor."); } + private Set<ExtendedBlock> getBlocksOnRack(List<LocatedBlock> blks, String rack) { + Set<ExtendedBlock> ret = new HashSet<ExtendedBlock>(); + for (LocatedBlock blk : blks) { + for (DatanodeInfo di : blk.getLocations()) { + if (rack.equals(NetworkTopology.getFirstHalf(di.getNetworkLocation()))) { + ret.add(blk.getBlock()); + break; + } + } + } + return ret; + } + /** * Create a cluster with even distribution, and a new empty node is added to * the cluster, then test rack locality for balancer policy. @@ -220,9 +238,14 @@ public class TestBalancerWithNodeGroup { // fill up the cluster to be 30% full long totalUsedSpace = totalCapacity * 3 / 10; - TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes, + long length = totalUsedSpace / numOfDatanodes; + TestBalancer.createFile(cluster, filePath, length, (short) numOfDatanodes, 0); + LocatedBlocks lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, + length); + Set<ExtendedBlock> before = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0); + long newCapacity = CAPACITY; String newRack = RACK1; String newNodeGroup = NODEGROUP2; @@ -235,22 +258,9 @@ public class TestBalancerWithNodeGroup { // run balancer and validate results runBalancerCanFinish(conf, totalUsedSpace, totalCapacity); - DatanodeInfo[] datanodeReport = - client.getDatanodeReport(DatanodeReportType.ALL); - - Map<String, Integer> rackToUsedCapacity = new HashMap<String, Integer>(); - for (DatanodeInfo datanode: datanodeReport) { - String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation()); - int usedCapacity = (int) datanode.getDfsUsed(); - - if (rackToUsedCapacity.get(rack) != null) { - rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack)); - } else { - rackToUsedCapacity.put(rack, usedCapacity); - } - } - assertEquals(rackToUsedCapacity.size(), 2); - assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1)); + lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, length); + Set<ExtendedBlock> after = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0); + assertEquals(before, after); } finally { cluster.shutdown();