Author: szetszwo Date: Wed Jul 23 17:30:06 2014 New Revision: 1612883 URL: http://svn.apache.org/r1612883 Log: Merge r1609845 through r1612880 from trunk.
Added: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java - copied unchanged from r1612880, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsMultihoming.apt.vm - copied unchanged from r1612880, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsMultihoming.apt.vm Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1612503-1612880 Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1612883&r1=1612882&r2=1612883&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Wed Jul 23 17:30:06 2014 @@ -1051,8 +1051,12 @@ public class RpcProgramNfs3 extends RpcP @Override public REMOVE3Response remove(XDR xdr, RpcInfo info) { + return remove(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK); - SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -1083,17 +1087,19 @@ public class RpcProgramNfs3 extends RpcP return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE); } + WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), + preOpDirAttr); + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { + return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, errWcc); + } + String fileIdPath = dirFileIdPath + "/" + fileName; HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath); if (fstat == null) { - WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), - preOpDirAttr); - return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, dirWcc); + return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc); } if (fstat.isDir()) { - WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), - preOpDirAttr); - return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, dirWcc); + return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc); } boolean result = dfsClient.delete(fileIdPath, false); Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1612883&r1=1612882&r2=1612883&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Jul 23 17:30:06 2014 @@ -331,6 +331,9 @@ Release 2.6.0 - UNRELEASED datanodes and change datanode to write block replicas using the specified storage type. (szetszwo) + HDFS-6701. Make seed optional in NetworkTopology#sortByDistance. + (Ashwin Shankar via wang) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) @@ -368,6 +371,12 @@ Release 2.6.0 - UNRELEASED HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails with Client cannot authenticate via:[TOKEN, KERBEROS] error. (jing9) + HDFS-6704. Fix the command to launch JournalNode in HDFS-HA document. + (Akira AJISAKA via jing9) + + HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause + a null pointer exception. (Masatake Iwasaki via brandonli) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -616,6 +625,8 @@ Release 2.5.0 - UNRELEASED HDFS-6680. BlockPlacementPolicyDefault does not choose favored nodes correctly. (szetszwo) + HDFS-6712. Document HDFS Multihoming Settings. (Arpit Agarwal) + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) @@ -898,6 +909,9 @@ Release 2.5.0 - UNRELEASED HDFS-6378. NFS registration should timeout instead of hanging when portmap/rpcbind is not available (Abhiraj Butala via brandonli) + HDFS-6703. NFS: Files can be deleted from a read-only mount + (Srikanth Upputuri via brandonli) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1612503-1612880 Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1612883&r1=1612882&r2=1612883&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Wed Jul 23 17:30:06 2014 @@ -214,6 +214,9 @@ public class DFSConfigKeys extends Commo public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version"; public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT"; + public static final String DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK = "dfs.namenode.randomize-block-locations-per-block"; + public static final boolean DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT = false; + public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum"; public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1; Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1612883&r1=1612882&r2=1612883&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Wed Jul 23 17:30:06 2014 @@ -345,7 +345,8 @@ public class DatanodeManager { /** Sort the located blocks by the distance to the target host. */ public void sortLocatedBlocks(final String targethost, - final List<LocatedBlock> locatedblocks) { + final List<LocatedBlock> locatedblocks, + boolean randomizeBlockLocationsPerBlock) { //sort the blocks // As it is possible for the separation of node manager and datanode, // here we should get node but not datanode only . @@ -372,8 +373,8 @@ public class DatanodeManager { --lastActiveIndex; } int activeLen = lastActiveIndex + 1; - networktopology.sortByDistance(client, b.getLocations(), activeLen, - b.getBlock().getBlockId()); + networktopology.sortByDistance(client, b.getLocations(), activeLen, b + .getBlock().getBlockId(), randomizeBlockLocationsPerBlock); } } Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1612883&r1=1612882&r2=1612883&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Jul 23 17:30:06 2014 @@ -83,6 +83,9 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT; + import static org.apache.hadoop.util.Time.now; import java.io.BufferedWriter; @@ -527,6 +530,8 @@ public class FSNamesystem implements Nam private final FSImage fsImage; + private boolean randomizeBlockLocationsPerBlock; + /** * Notify that loading of this FSDirectory is complete, and * it is imageLoaded for use @@ -836,6 +841,10 @@ public class FSNamesystem implements Nam alwaysUseDelegationTokensForTests = conf.getBoolean( DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT); + + this.randomizeBlockLocationsPerBlock = conf.getBoolean( + DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK, + DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT); this.dtSecretManager = createDelegationTokenSecretManager(conf); this.dir = new FSDirectory(this, conf); @@ -1699,17 +1708,17 @@ public class FSNamesystem implements Nam LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true, true); if (blocks != null) { - blockManager.getDatanodeManager().sortLocatedBlocks( - clientMachine, blocks.getLocatedBlocks()); - + blockManager.getDatanodeManager().sortLocatedBlocks(clientMachine, + blocks.getLocatedBlocks(), randomizeBlockLocationsPerBlock); + // lastBlock is not part of getLocatedBlocks(), might need to sort it too LocatedBlock lastBlock = blocks.getLastLocatedBlock(); if (lastBlock != null) { ArrayList<LocatedBlock> lastBlockList = Lists.newArrayListWithCapacity(1); lastBlockList.add(lastBlock); - blockManager.getDatanodeManager().sortLocatedBlocks( - clientMachine, lastBlockList); + blockManager.getDatanodeManager().sortLocatedBlocks(clientMachine, + lastBlockList, randomizeBlockLocationsPerBlock); } } return blocks; Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java?rev=1612883&r1=1612882&r2=1612883&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java Wed Jul 23 17:30:06 2014 @@ -122,6 +122,11 @@ public class DFSZKFailoverController ext "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); + if (nnId == null) { + String msg = "Could not get the namenode ID of this node. " + + "You may run zkfc on the node other than namenode."; + throw new HadoopIllegalArgumentException(msg); + } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1612883&r1=1612882&r2=1612883&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Wed Jul 23 17:30:06 2014 @@ -2040,4 +2040,17 @@ </description> </property> +<property> + <name>dfs.namenode.randomize-block-locations-per-block</name> + <value>false</value> + <description>When fetching replica locations of a block, the replicas + are sorted based on network distance. This configuration parameter + determines whether the replicas at the same network distance are randomly + shuffled. By default, this is false, such that repeated requests for a block's + replicas always result in the same order. This potentially improves page cache + behavior. However, for some network topologies, it is desirable to shuffle this + order for better load balancing. + </description> +</property> + </configuration> Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm?rev=1612883&r1=1612882&r2=1612883&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm Wed Jul 23 17:30:06 2014 @@ -416,8 +416,8 @@ HDFS High Availability Using the Quorum After all of the necessary configuration options have been set, you must start the JournalNode daemons on the set of machines where they will run. This - can be done by running the command "<hdfs-daemon.sh journalnode>" and waiting - for the daemon to start on each of the relevant machines. + can be done by running the command "<hadoop-daemon.sh start journalnode>" and + waiting for the daemon to start on each of the relevant machines. Once the JournalNodes have been started, one must initially synchronize the two HA NameNodes' on-disk metadata. Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1612883&r1=1612882&r2=1612883&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Wed Jul 23 17:30:06 2014 @@ -60,7 +60,14 @@ public class TestNetworkTopology { DFSTestUtil.getDatanodeDescriptor("10.10.10.10", "/d3/r1"), DFSTestUtil.getDatanodeDescriptor("11.11.11.11", "/d3/r1"), DFSTestUtil.getDatanodeDescriptor("12.12.12.12", "/d3/r2"), - DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2") + DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2"), + DFSTestUtil.getDatanodeDescriptor("14.14.14.14", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("15.15.15.15", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("16.16.16.16", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("17.17.17.17", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("18.18.18.18", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("19.19.19.19", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("20.20.20.20", "/d4/r1"), }; for (int i = 0; i < dataNodes.length; i++) { cluster.add(dataNodes[i]); @@ -107,7 +114,7 @@ public class TestNetworkTopology { @Test public void testRacks() throws Exception { - assertEquals(cluster.getNumOfRacks(), 5); + assertEquals(cluster.getNumOfRacks(), 6); assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1])); assertFalse(cluster.isOnSameRack(dataNodes[1], dataNodes[2])); assertTrue(cluster.isOnSameRack(dataNodes[2], dataNodes[3])); @@ -133,7 +140,7 @@ public class TestNetworkTopology { testNodes[1] = dataNodes[2]; testNodes[2] = dataNodes[0]; cluster.sortByDistance(dataNodes[0], testNodes, - testNodes.length, 0xDEADBEEF); + testNodes.length, 0xDEADBEEF, false); assertTrue(testNodes[0] == dataNodes[0]); assertTrue(testNodes[1] == dataNodes[1]); assertTrue(testNodes[2] == dataNodes[2]); @@ -146,7 +153,7 @@ public class TestNetworkTopology { dtestNodes[3] = dataNodes[9]; dtestNodes[4] = dataNodes[10]; cluster.sortByDistance(dataNodes[8], dtestNodes, - dtestNodes.length - 2, 0xDEADBEEF); + dtestNodes.length - 2, 0xDEADBEEF, false); assertTrue(dtestNodes[0] == dataNodes[8]); assertTrue(dtestNodes[1] == dataNodes[11]); assertTrue(dtestNodes[2] == dataNodes[12]); @@ -158,7 +165,7 @@ public class TestNetworkTopology { testNodes[1] = dataNodes[3]; testNodes[2] = dataNodes[0]; cluster.sortByDistance(dataNodes[0], testNodes, - testNodes.length, 0xDEADBEEF); + testNodes.length, 0xDEADBEEF, false); assertTrue(testNodes[0] == dataNodes[0]); assertTrue(testNodes[1] == dataNodes[1]); assertTrue(testNodes[2] == dataNodes[3]); @@ -168,7 +175,7 @@ public class TestNetworkTopology { testNodes[1] = dataNodes[3]; testNodes[2] = dataNodes[1]; cluster.sortByDistance(dataNodes[0], testNodes, - testNodes.length, 0xDEADBEEF); + testNodes.length, 0xDEADBEEF, false); assertTrue(testNodes[0] == dataNodes[1]); assertTrue(testNodes[1] == dataNodes[3]); assertTrue(testNodes[2] == dataNodes[5]); @@ -178,7 +185,7 @@ public class TestNetworkTopology { testNodes[1] = dataNodes[5]; testNodes[2] = dataNodes[3]; cluster.sortByDistance(dataNodes[0], testNodes, - testNodes.length, 0xDEADBEEF); + testNodes.length, 0xDEADBEEF, false); assertTrue(testNodes[0] == dataNodes[1]); assertTrue(testNodes[1] == dataNodes[3]); assertTrue(testNodes[2] == dataNodes[5]); @@ -188,7 +195,7 @@ public class TestNetworkTopology { testNodes[1] = dataNodes[5]; testNodes[2] = dataNodes[3]; cluster.sortByDistance(dataNodes[0], testNodes, - testNodes.length, 0xDEAD); + testNodes.length, 0xDEAD, false); // sortByDistance does not take the "data center" layer into consideration // and it doesn't sort by getDistance, so 1, 5, 3 is also valid here assertTrue(testNodes[0] == dataNodes[1]); @@ -204,7 +211,27 @@ public class TestNetworkTopology { testNodes[1] = dataNodes[6]; testNodes[2] = dataNodes[7]; cluster.sortByDistance(dataNodes[i], testNodes, - testNodes.length, 0xBEADED+i); + testNodes.length, 0xBEADED+i, false); + if (first == null) { + first = testNodes[0]; + } else { + if (first != testNodes[0]) { + foundRandom = true; + break; + } + } + } + assertTrue("Expected to find a different first location", foundRandom); + // Array of rack local nodes with randomizeBlockLocationsPerBlock set to + // true + // Expect random order of block locations for same block + first = null; + for (int i = 1; i <= 4; i++) { + testNodes[0] = dataNodes[13]; + testNodes[1] = dataNodes[14]; + testNodes[2] = dataNodes[15]; + cluster.sortByDistance(dataNodes[15 + i], testNodes, testNodes.length, + 0xBEADED, true); if (first == null) { first = testNodes[0]; } else {