Author: vinayakumarb
Date: Wed Jun 11 12:20:48 2014
New Revision: 1601869

URL: http://svn.apache.org/r1601869
Log:
Merged revision(s) 1601144-1601868, 1598456-1601149 from hadoop/common/trunk

Modified:
    hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/   (props 
changed)
    hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ 
  (props changed)
    
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
    
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
    
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm
    
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
    
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
    
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java

Propchange: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1601151-1601868

Modified: 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1601869&r1=1601868&r2=1601869&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
Wed Jun 11 12:20:48 2014
@@ -482,10 +482,16 @@ Release 2.5.0 - UNRELEASED
     HDFS-6297. Add CLI testcases to reflect new features of dfs and dfsadmin
     (Dasha Boudnik via cos)
 
+    HDFS-6399. Add note about setfacl in HDFS permissions guide.
+    (cnauroth via wang)
+
   OPTIMIZATIONS
 
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
 
+    HDFS-6460. Ignore stale and decommissioned nodes in
+    NetworkTopology#sortByDistance. (Yongjun Zhang via wang)
+
   BUG FIXES 
 
     HDFS-6112. NFS Gateway docs are incorrect for allowed hosts configuration.
@@ -654,6 +660,15 @@ Release 2.5.0 - UNRELEASED
     HDFS-6497. Make TestAvailableSpaceVolumeChoosingPolicy deterministic
     (cmccabe)
 
+    HDFS-6500. Snapshot shouldn't be removed silently after renaming to an 
+    existing snapshot. (Nicholas SZE via junping_du)
+
+    HDFS-6257. TestCacheDirectives#testExceedsCapacity fails occasionally
+    (cmccabe)
+
+    HDFS-6364. Incorrect check for unknown datanode in Balancer. (Benoy
+    Antony via Arpit Agarwal)
+
 Release 2.4.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Propchange: 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged 
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1601151-1601868

Modified: 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java?rev=1601869&r1=1601868&r2=1601869&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 (original)
+++ 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 Wed Jun 11 12:20:48 2014
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.security.token.Token;
 
-import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
 /**

Modified: 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1601869&r1=1601868&r2=1601869&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 (original)
+++ 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 Wed Jun 11 12:20:48 2014
@@ -696,7 +696,7 @@ public class Balancer {
             // update locations
             for (String datanodeUuid : blk.getDatanodeUuids()) {
               final BalancerDatanode d = datanodeMap.get(datanodeUuid);
-              if (datanode != null) { // not an unknown datanode
+              if (d != null) { // not an unknown datanode
                 block.addLocation(d);
               }
             }

Modified: 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1601869&r1=1601868&r2=1601869&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 (original)
+++ 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 Wed Jun 11 12:20:48 2014
@@ -331,6 +331,18 @@ public class DatanodeManager {
     return heartbeatManager;
   }
 
+  private boolean isInactive(DatanodeInfo datanode) {
+    if (datanode.isDecommissioned()) {
+      return true;
+    }
+
+    if (avoidStaleDataNodesForRead) {
+      return datanode.isStale(staleInterval);
+    }
+      
+    return false;
+  }
+  
   /** Sort the located blocks by the distance to the target host. */
   public void sortLocatedBlocks(final String targethost,
       final List<LocatedBlock> locatedblocks) {
@@ -351,10 +363,17 @@ public class DatanodeManager {
         DFSUtil.DECOM_COMPARATOR;
         
     for (LocatedBlock b : locatedblocks) {
-      networktopology.sortByDistance(client, b.getLocations(), b
-          .getBlock().getBlockId());
+      DatanodeInfo[] di = b.getLocations();
       // Move decommissioned/stale datanodes to the bottom
-      Arrays.sort(b.getLocations(), comparator);
+      Arrays.sort(di, comparator);
+      
+      int lastActiveIndex = di.length - 1;
+      while (lastActiveIndex > 0 && isInactive(di[lastActiveIndex])) {
+          --lastActiveIndex;
+      }
+      int activeLen = lastActiveIndex + 1;      
+      networktopology.sortByDistance(client, b.getLocations(), activeLen,
+          b.getBlock().getBlockId());
     }
   }
   

Modified: 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1601869&r1=1601868&r2=1601869&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
 (original)
+++ 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
 Wed Jun 11 12:20:48 2014
@@ -234,7 +234,7 @@ public class INodeDirectorySnapshottable
    *           name does not exist or a snapshot with the new name already
    *           exists
    */
-  public void renameSnapshot(String path, String oldName, String newName)
+  void renameSnapshot(String path, String oldName, String newName)
       throws SnapshotException {
     if (newName.equals(oldName)) {
       return;
@@ -246,7 +246,7 @@ public class INodeDirectorySnapshottable
     } else {
       final byte[] newNameBytes = DFSUtil.string2Bytes(newName);
       int indexOfNew = searchSnapshot(newNameBytes);
-      if (indexOfNew > 0) {
+      if (indexOfNew >= 0) {
         throw new SnapshotException("The snapshot " + newName
             + " already exists for directory " + path);
       }

Modified: 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm?rev=1601869&r1=1601868&r2=1601869&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm
 (original)
+++ 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm
 Wed Jun 11 12:20:48 2014
@@ -395,8 +395,8 @@ HDFS Permissions Guide
        permission checking is turned off, but all other behavior is
        unchanged. Switching from one parameter value to the other does not
        change the mode, owner or group of files or directories.
-       Regardless of whether permissions are on or off, chmod, chgrp and
-       chown always check permissions. These functions are only useful in
+       Regardless of whether permissions are on or off, chmod, chgrp, chown and
+       setfacl always check permissions. These functions are only useful in
        the permissions context, and so there is no backwards compatibility
        issue. Furthermore, this allows administrators to reliably set
        owners and permissions in advance of turning on regular permissions

Modified: 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java?rev=1601869&r1=1601868&r2=1601869&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
 (original)
+++ 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
 Wed Jun 11 12:20:48 2014
@@ -186,6 +186,17 @@ public class TestSnapshotCommands {
     FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn.rename");
     FsShellRun("-ls /sub1/.snapshot/sn.rename", 0, 
"/sub1/.snapshot/sn.rename/sub1sub1");
     FsShellRun("-ls /sub1/.snapshot/sn.rename", 0, 
"/sub1/.snapshot/sn.rename/sub1sub2");
+
+    //try renaming from a non-existing snapshot
+    FsShellRun("-renameSnapshot /sub1 sn.nonexist sn.rename", 1,
+        "renameSnapshot: The snapshot sn.nonexist does not exist for directory 
/sub1");
+
+    //try renaming to existing snapshots
+    FsShellRun("-createSnapshot /sub1 sn.new");
+    FsShellRun("-renameSnapshot /sub1 sn.new sn.rename", 1,
+        "renameSnapshot: The snapshot sn.rename already exists for directory 
/sub1");
+    FsShellRun("-renameSnapshot /sub1 sn.rename sn.new", 1,
+        "renameSnapshot: The snapshot sn.new already exists for directory 
/sub1");
   }
 
   @Test

Modified: 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1601869&r1=1601868&r2=1601869&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
 (original)
+++ 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
 Wed Jun 11 12:20:48 2014
@@ -72,7 +72,9 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -1401,6 +1403,20 @@ public class TestCacheDirectives {
         .build());
   }
 
+  /**
+   * Check that the NameNode is not attempting to cache anything.
+   */
+  private void checkPendingCachedEmpty(MiniDFSCluster cluster)
+      throws Exception {
+    final DatanodeManager datanodeManager =
+        cluster.getNamesystem().getBlockManager().getDatanodeManager();
+    for (DataNode dn : cluster.getDataNodes()) {
+      DatanodeDescriptor descriptor =
+          datanodeManager.getDatanode(dn.getDatanodeId());
+      Assert.assertTrue(descriptor.getPendingCached().isEmpty());
+    }
+  }
+
   @Test(timeout=60000)
   public void testExceedsCapacity() throws Exception {
     // Create a giant file
@@ -1418,21 +1434,16 @@ public class TestCacheDirectives {
         .setPath(fileName).setReplication((short) 1).build());
     waitForCachedBlocks(namenode, -1, numCachedReplicas,
         "testExceeds:1");
-    // Check that no DNs saw an excess CACHE message
-    int lines = appender.countLinesWithMessage(
-        "more bytes in the cache: " +
-        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
-    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
+    checkPendingCachedEmpty(cluster);
+    Thread.sleep(1000);
+    checkPendingCachedEmpty(cluster);
+
     // Try creating a file with giant-sized blocks that exceed cache capacity
     dfs.delete(fileName, false);
     DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
         (short) 1, 0xFADED);
-    // Nothing will get cached, so just force sleep for a bit
-    Thread.sleep(4000);
-    // Still should not see any excess commands
-    lines = appender.countLinesWithMessage(
-        "more bytes in the cache: " +
-        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
-    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
+    checkPendingCachedEmpty(cluster);
+    Thread.sleep(1000);
+    checkPendingCachedEmpty(cluster);
   }
 }

Modified: 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1601869&r1=1601868&r2=1601869&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
 (original)
+++ 
hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
 Wed Jun 11 12:20:48 2014
@@ -55,11 +55,18 @@ public class TestNetworkTopology {
         DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
         DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
         DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3"),
-        DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3")
+        DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3"),
+        DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d3/r1"),
+        DFSTestUtil.getDatanodeDescriptor("10.10.10.10", "/d3/r1"),
+        DFSTestUtil.getDatanodeDescriptor("11.11.11.11", "/d3/r1"),
+        DFSTestUtil.getDatanodeDescriptor("12.12.12.12", "/d3/r2"),
+        DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2")        
     };
     for (int i = 0; i < dataNodes.length; i++) {
       cluster.add(dataNodes[i]);
     }
+    dataNodes[9].setDecommissioned();
+    dataNodes[10].setDecommissioned();
   }
   
   @Test
@@ -100,7 +107,7 @@ public class TestNetworkTopology {
 
   @Test
   public void testRacks() throws Exception {
-    assertEquals(cluster.getNumOfRacks(), 3);
+    assertEquals(cluster.getNumOfRacks(), 5);
     assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1]));
     assertFalse(cluster.isOnSameRack(dataNodes[1], dataNodes[2]));
     assertTrue(cluster.isOnSameRack(dataNodes[2], dataNodes[3]));
@@ -125,16 +132,33 @@ public class TestNetworkTopology {
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[2];
     testNodes[2] = dataNodes[0];
-    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
+    cluster.sortByDistance(dataNodes[0], testNodes,
+        testNodes.length, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[2]);
 
+    // array contains both local node & local rack node & decommissioned node
+    DatanodeDescriptor[] dtestNodes = new DatanodeDescriptor[5];
+    dtestNodes[0] = dataNodes[8];
+    dtestNodes[1] = dataNodes[12];
+    dtestNodes[2] = dataNodes[11];
+    dtestNodes[3] = dataNodes[9];
+    dtestNodes[4] = dataNodes[10];
+    cluster.sortByDistance(dataNodes[8], dtestNodes,
+        dtestNodes.length - 2, 0xDEADBEEF);
+    assertTrue(dtestNodes[0] == dataNodes[8]);
+    assertTrue(dtestNodes[1] == dataNodes[11]);
+    assertTrue(dtestNodes[2] == dataNodes[12]);
+    assertTrue(dtestNodes[3] == dataNodes[9]);
+    assertTrue(dtestNodes[4] == dataNodes[10]);
+
     // array contains local node
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[3];
     testNodes[2] = dataNodes[0];
-    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
+    cluster.sortByDistance(dataNodes[0], testNodes,
+        testNodes.length, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[3]);
@@ -143,7 +167,8 @@ public class TestNetworkTopology {
     testNodes[0] = dataNodes[5];
     testNodes[1] = dataNodes[3];
     testNodes[2] = dataNodes[1];
-    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
+    cluster.sortByDistance(dataNodes[0], testNodes,
+        testNodes.length, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[3]);
     assertTrue(testNodes[2] == dataNodes[5]);
@@ -152,7 +177,8 @@ public class TestNetworkTopology {
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[5];
     testNodes[2] = dataNodes[3];
-    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
+    cluster.sortByDistance(dataNodes[0], testNodes,
+        testNodes.length, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[3]);
     assertTrue(testNodes[2] == dataNodes[5]);
@@ -161,7 +187,8 @@ public class TestNetworkTopology {
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[5];
     testNodes[2] = dataNodes[3];
-    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEAD);
+    cluster.sortByDistance(dataNodes[0], testNodes,
+        testNodes.length, 0xDEAD);
     // sortByDistance does not take the "data center" layer into consideration
     // and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
     assertTrue(testNodes[0] == dataNodes[1]);
@@ -176,7 +203,8 @@ public class TestNetworkTopology {
       testNodes[0] = dataNodes[5];
       testNodes[1] = dataNodes[6];
       testNodes[2] = dataNodes[7];
-      cluster.sortByDistance(dataNodes[i], testNodes, 0xBEADED+i);
+      cluster.sortByDistance(dataNodes[i], testNodes,
+          testNodes.length, 0xBEADED+i);
       if (first == null) {
         first = testNodes[0];
       } else {


Reply via email to