Author: wang Date: Tue Jun 3 22:06:11 2014 New Revision: 1599807 URL: http://svn.apache.org/r1599807 Log: Revert bad HDFS-6268 commit from branch-2
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/ ------------------------------------------------------------------------------ Reverse-merged /hadoop/common/trunk/hadoop-hdfs-project:r1598746,1599734 Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Reverse-merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1599734 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1599807&r1=1599806&r2=1599807&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jun 3 22:06:11 2014 @@ -147,9 +147,6 @@ Release 2.5.0 - UNRELEASED HDFS-6109 let sync_file_range() system call run in background (Liang Xie via stack) - HDFS-6268. Better sorting in NetworkTopology#pseudoSortByDistance when - no local node is found. (wang) - OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Reverse-merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1599734 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1599807&r1=1599806&r2=1599807&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Tue Jun 3 22:06:11 2014 @@ -351,8 +351,7 @@ public class DatanodeManager { DFSUtil.DECOM_COMPARATOR; for (LocatedBlock b : locatedblocks) { - networktopology.sortByDistance(client, b.getLocations(), b - .getBlock().getBlockId()); + networktopology.pseudoSortByDistance(client, b.getLocations()); // Move decommissioned/stale datanodes to the bottom Arrays.sort(b.getLocations(), comparator); } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1599807&r1=1599806&r2=1599807&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Jun 3 22:06:11 2014 @@ -1618,11 +1618,9 @@ public class FSNamesystem implements Nam blockManager.getDatanodeManager().sortLocatedBlocks( clientMachine, blocks.getLocatedBlocks()); - // lastBlock is not part of getLocatedBlocks(), might need to sort it too LocatedBlock lastBlock = blocks.getLastLocatedBlock(); if (lastBlock != null) { - ArrayList<LocatedBlock> lastBlockList = - Lists.newArrayListWithCapacity(1); + ArrayList<LocatedBlock> lastBlockList = new ArrayList<LocatedBlock>(); lastBlockList.add(lastBlock); blockManager.getDatanodeManager().sortLocatedBlocks( clientMachine, lastBlockList); Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ ------------------------------------------------------------------------------ Reverse-merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1599734 Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ ------------------------------------------------------------------------------ Reverse-merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1599734 Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Reverse-merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1599734 Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ ------------------------------------------------------------------------------ Reverse-merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1599734 Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ ------------------------------------------------------------------------------ Reverse-merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1599734 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1599807&r1=1599806&r2=1599807&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Tue Jun 3 22:06:11 2014 @@ -169,9 +169,6 @@ public class TestGetBlocks { if (stm != null) { stm.close(); } - if (client != null) { - client.close(); - } cluster.shutdown(); } } Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ ------------------------------------------------------------------------------ Reverse-merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot:r1599734 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java?rev=1599807&r1=1599806&r2=1599807&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java Tue Jun 3 22:06:11 2014 @@ -143,10 +143,10 @@ public class TestSnapshotFileLength { // Make sure we can read the entire file via its non-snapshot path. fileStatus = hdfs.getFileStatus(file1); - assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen()); + assertEquals(fileStatus.getLen(), BLOCKSIZE * 2); fis = hdfs.open(file1); bytesRead = fis.read(buffer, 0, buffer.length); - assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead); + assertEquals(bytesRead, BLOCKSIZE * 2); fis.close(); Path file1snap1 = @@ -156,23 +156,21 @@ public class TestSnapshotFileLength { assertEquals(fileStatus.getLen(), BLOCKSIZE); // Make sure we can only read up to the snapshot length. bytesRead = fis.read(buffer, 0, buffer.length); - assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead); + assertEquals(bytesRead, BLOCKSIZE); fis.close(); - PrintStream outBackup = System.out; - PrintStream errBackup = System.err; + PrintStream psBackup = System.out; ByteArrayOutputStream bao = new ByteArrayOutputStream(); System.setOut(new PrintStream(bao)); System.setErr(new PrintStream(bao)); // Make sure we can cat the file upto to snapshot length FsShell shell = new FsShell(); - try { + try{ ToolRunner.run(conf, shell, new String[] { "-cat", "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" }); - assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size()); - } finally { - System.setOut(outBackup); - System.setErr(errBackup); + assertEquals(bao.size(), BLOCKSIZE); + }finally{ + System.setOut(psBackup); } } } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1599807&r1=1599806&r2=1599807&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Tue Jun 3 22:06:11 2014 @@ -54,8 +54,7 @@ public class TestNetworkTopology { DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"), DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"), DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"), - DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3"), - DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3") + DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3") }; for (int i = 0; i < dataNodes.length; i++) { cluster.add(dataNodes[i]); @@ -118,14 +117,14 @@ public class TestNetworkTopology { } @Test - public void testSortByDistance() throws Exception { + public void testPseudoSortByDistance() throws Exception { DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3]; // array contains both local node & local rack node testNodes[0] = dataNodes[1]; testNodes[1] = dataNodes[2]; testNodes[2] = dataNodes[0]; - cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF); + cluster.pseudoSortByDistance(dataNodes[0], testNodes ); assertTrue(testNodes[0] == dataNodes[0]); assertTrue(testNodes[1] == dataNodes[1]); assertTrue(testNodes[2] == dataNodes[2]); @@ -134,7 +133,7 @@ public class TestNetworkTopology { testNodes[0] = dataNodes[1]; testNodes[1] = dataNodes[3]; testNodes[2] = dataNodes[0]; - cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF); + cluster.pseudoSortByDistance(dataNodes[0], testNodes ); assertTrue(testNodes[0] == dataNodes[0]); assertTrue(testNodes[1] == dataNodes[1]); assertTrue(testNodes[2] == dataNodes[3]); @@ -143,50 +142,21 @@ public class TestNetworkTopology { testNodes[0] = dataNodes[5]; testNodes[1] = dataNodes[3]; testNodes[2] = dataNodes[1]; - cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF); + cluster.pseudoSortByDistance(dataNodes[0], testNodes ); assertTrue(testNodes[0] == dataNodes[1]); assertTrue(testNodes[1] == dataNodes[3]); assertTrue(testNodes[2] == dataNodes[5]); - + // array contains local rack node which happens to be in position 0 testNodes[0] = dataNodes[1]; testNodes[1] = dataNodes[5]; testNodes[2] = dataNodes[3]; - cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF); - assertTrue(testNodes[0] == dataNodes[1]); - assertTrue(testNodes[1] == dataNodes[3]); - assertTrue(testNodes[2] == dataNodes[5]); - - // Same as previous, but with a different random seed to test randomization - testNodes[0] = dataNodes[1]; - testNodes[1] = dataNodes[5]; - testNodes[2] = dataNodes[3]; - cluster.sortByDistance(dataNodes[0], testNodes, 0xDEAD); - // sortByDistance does not take the "data center" layer into consideration + cluster.pseudoSortByDistance(dataNodes[0], testNodes ); + // peudoSortByDistance does not take the "data center" layer into consideration // and it doesn't sort by getDistance, so 1, 5, 3 is also valid here assertTrue(testNodes[0] == dataNodes[1]); assertTrue(testNodes[1] == dataNodes[5]); assertTrue(testNodes[2] == dataNodes[3]); - - // Array is just local rack nodes - // Expect a random first node depending on the seed (normally the block ID). - DatanodeDescriptor first = null; - boolean foundRandom = false; - for (int i=5; i<=7; i++) { - testNodes[0] = dataNodes[5]; - testNodes[1] = dataNodes[6]; - testNodes[2] = dataNodes[7]; - cluster.sortByDistance(dataNodes[i], testNodes, 0xBEADED+i); - if (first == null) { - first = testNodes[0]; - } else { - if (first != testNodes[0]) { - foundRandom = true; - break; - } - } - } - assertTrue("Expected to find a different first location", foundRandom); } @Test