Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-11890 c84858b36 -> 8cda82c8f


HDFS-9266 Avoid unsafe split and append on fields that might be IPv6 literals


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cda82c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cda82c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cda82c8

Branch: refs/heads/HADOOP-11890
Commit: 8cda82c8f64347d4d518dd23ec30f8d69837abe5
Parents: c84858b
Author: Elliott Clark <ecl...@apache.org>
Authored: Fri Oct 23 11:48:01 2015 -0700
Committer: Elliott Clark <ecl...@apache.org>
Committed: Fri Oct 23 11:48:01 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |   3 +-
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  |   2 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |   2 +-
 .../hdfs/qjournal/client/IPCLoggerChannel.java  |   7 +-
 .../server/blockmanagement/DatanodeManager.java |   9 +-
 .../server/datanode/BlockPoolSliceStorage.java  |  12 +-
 .../hdfs/server/datanode/DataXceiver.java       |   2 +-
 .../hdfs/server/namenode/Checkpointer.java      |   5 +-
 .../hadoop/hdfs/server/namenode/NNStorage.java  |  11 +-
 .../ha/ConfiguredFailoverProxyProvider.java     |   4 +-
 .../namenode/ha/IPFailoverProxyProvider.java    |   3 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |  13 +-
 .../org/apache/hadoop/hdfs/tools/GetConf.java   |   3 +-
 .../offlineImageViewer/WebImageViewer.java      |   5 +-
 .../apache/hadoop/hdfs/BlockReaderTestUtil.java |   2 +-
 .../hadoop/hdfs/TestDFSAddressConfig.java       |  10 +-
 .../org/apache/hadoop/hdfs/TestDFSUtil.java     |  44 ++++--
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |   7 +-
 .../apache/hadoop/hdfs/TestFileCreation.java    | 148 ++++++++++---------
 .../client/TestQuorumJournalManager.java        |   6 +-
 .../datanode/TestBlockPoolSliceStorage.java     |  28 +++-
 .../TestNameNodeRespectsBindHostKeys.java       |  81 ++++++++--
 .../server/namenode/TestNameNodeRpcServer.java  |  12 +-
 23 files changed, 284 insertions(+), 135 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index f249692..98be99a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -56,6 +56,7 @@ import 
org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
 import org.apache.hadoop.hdfs.util.IOUtilsClient;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -857,6 +858,6 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
    */
   public static String getFileName(final InetSocketAddress s,
       final String poolId, final long blockId) {
-    return s.toString() + ":" + poolId + ":" + blockId;
+    return NetUtils.getSocketAddressString(s) + ":" + poolId + ":" + blockId;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
index 942c37c..dded792 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
@@ -362,7 +362,7 @@ public class RemoteBlockReader2  implements BlockReader {
    */
   public static String getFileName(final InetSocketAddress s,
       final String poolId, final long blockId) {
-    return s.toString() + ":" + poolId + ":" + blockId;
+    return NetUtils.getSocketAddressString(s) + ":" + poolId + ":" + blockId;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index d938997..db0f7cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -200,7 +200,7 @@ class JsonUtilClient {
     if (ipAddr == null) {
       String name = getString(m, "name", null);
       if (name != null) {
-        int colonIdx = name.indexOf(':');
+        int colonIdx = name.lastIndexOf(':');
         if (colonIdx > 0) {
           ipAddr = name.substring(0, colonIdx);
           xferPort = Integer.parseInt(name.substring(colonIdx +1));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index cbfe5b8..7559749 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -22,6 +22,7 @@ import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URI;
 import java.net.URL;
+import java.net.InetAddress;
 import java.security.PrivilegedExceptionAction;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
@@ -51,12 +52,12 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.StopWatch;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.common.net.InetAddresses;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -672,8 +673,8 @@ public class IPCLoggerChannel implements AsyncLogger {
 
   @Override
   public String toString() {
-    return InetAddresses.toAddrString(addr.getAddress()) + ':' +
-        addr.getPort();
+    final InetAddress ip = addr.getAddress();
+    return NetUtils.getHostPortString(addr);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 28ab716..13c986c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.*;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.http.conn.util.InetAddressUtils;
 
 import java.io.IOException;
 import java.io.PrintWriter;
@@ -1201,7 +1202,13 @@ public class DatanodeManager {
     DatanodeID dnId;
     String hostStr;
     int port;
-    int idx = hostLine.indexOf(':');
+    int idx;
+
+    if (InetAddressUtils.isIPv6StdAddress(hostLine)) {
+      idx = -1;
+    } else {
+      idx = hostLine.lastIndexOf(':');
+    }
 
     if (-1 == idx) {
       hostStr = hostLine;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 09a2d8b..532bfe7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -76,10 +76,20 @@ public class BlockPoolSliceStorage extends Storage {
    *      progress. Do not delete the 'previous' directory.
    */
   static final String ROLLING_UPGRADE_MARKER_FILE = "RollingUpgradeInProgress";
+  private static final String BLOCK_POOL_ID_IPV4_PATTERN_BASE =
+      "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}";
+
+  // Because we don't support ":" in path BlockPoolID on IPv6 boxes we replace
+  // ":" with ".".
+  // Also format of IPv6 is less fixed so we surround it with square brackets
+  // and just check that match
+  private static final String BLOCK_POOL_ID_IPV6_PATTERN_BASE =
+      Pattern.quote("[") + "(?:.*)" + Pattern.quote("]");
 
   private static final String BLOCK_POOL_ID_PATTERN_BASE =
       Pattern.quote(File.separator) +
-      "BP-\\d+-\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d+" +
+      "BP-\\d+-(?:" + BLOCK_POOL_ID_IPV4_PATTERN_BASE + "|" +
+      BLOCK_POOL_ID_IPV6_PATTERN_BASE + ")-\\d+" +
       Pattern.quote(File.separator);
 
   private static final Pattern BLOCK_POOL_PATH_PATTERN = Pattern.compile(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index b5d8f4f..f4d71d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -137,7 +137,7 @@ class DataXceiver extends Receiver implements Runnable {
     this.ioFileBufferSize = 
DFSUtilClient.getIoFileBufferSize(datanode.getConf());
     this.smallBufferSize = 
DFSUtilClient.getSmallBufferSize(datanode.getConf());
     remoteAddress = peer.getRemoteAddressString();
-    final int colonIdx = remoteAddress.indexOf(':');
+    final int colonIdx = remoteAddress.lastIndexOf(':');
     remoteAddressWithoutPort =
         (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
     localAddress = peer.getLocalAddressString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
index 9087629..a792306 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
@@ -101,10 +101,9 @@ class Checkpointer extends Daemon {
     checkpointConf = new CheckpointConf(conf);
 
     // Pull out exact http address for posting url to avoid ip aliasing issues
-    String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
+    String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
                                    DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
-    infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.indexOf(":"));
-
+    infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.lastIndexOf(":"));
     LOG.info("Checkpoint Period : " +
              checkpointConf.getPeriod() + " secs " +
              "(" + checkpointConf.getPeriod()/60 + " min)");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 14647f0..02bb808 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.util.PersistentLongFile;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.util.Time;
+import org.apache.http.conn.util.InetAddressUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -964,11 +965,15 @@ public class NNStorage extends Storage implements 
Closeable,
     String ip = "unknownIP";
     try {
       ip = DNS.getDefaultIP("default");
+      if (InetAddressUtils.isIPv6StdAddress(ip)) {
+        // HDFS doesn't support ":" in path, replace it with "."
+        ip = "[" + ip.replaceAll(":", ".") + "]";
+      }
     } catch (UnknownHostException e) {
       LOG.warn("Could not find ip address of \"default\" inteface.");
       throw e;
     }
-    
+
     int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
     String bpid = "BP-" + rand + "-"+ ip + "-" + Time.now();
     return bpid;
@@ -986,14 +991,14 @@ public class NNStorage extends Storage implements 
Closeable,
       throw new InconsistentFSStateException(storage, "file "
           + Storage.STORAGE_FILE_VERSION + " has no block pool Id.");
     }
-    
+
     if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
       throw new InconsistentFSStateException(storage,
           "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
     }
     setBlockPoolID(bpid);
   }
-  
+
   public String getBlockPoolID() {
     return blockpoolID;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index c2d4d91..45f3ed2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -155,7 +156,8 @@ public class ConfiguredFailoverProxyProvider<T> extends
         throw new RuntimeException(e);
       }
     }
-    return new ProxyInfo<T>(current.namenode, current.address.toString());
+    return new ProxyInfo<T>(current.namenode,
+        NetUtils.getSocketAddressString(current.address));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java
index 4e1cb9e..109ed4c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 
 import com.google.common.base.Preconditions;
@@ -94,7 +95,7 @@ public class IPFailoverProxyProvider<T> extends
         InetSocketAddress nnAddr = DFSUtilClient.getNNAddress(nameNodeUri);
         nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
             conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), 
-            false).getProxy(), nnAddr.toString());
+            false).getProxy(), NetUtils.getSocketAddressString(nnAddr));
       } catch (IOException ioe) {
         throw new RuntimeException(ioe);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index b03e074..8740269 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -93,6 +93,7 @@ import org.apache.hadoop.util.StringUtils;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
 import com.google.common.collect.Lists;
+import com.google.common.net.HostAndPort;
 import com.sun.jersey.spi.container.ResourceFilters;
 
 /** Web-hdfs NameNode implementation. */
@@ -171,17 +172,17 @@ public class NamenodeWebHdfsMethods {
       throw new IOException("Namesystem has not been intialized yet.");
     }
     final BlockManager bm = fsn.getBlockManager();
-    
+
     HashSet<Node> excludes = new HashSet<Node>();
     if (excludeDatanodes != null) {
-      for (String host : StringUtils
+      for (String hostAndPort : StringUtils
           .getTrimmedStringCollection(excludeDatanodes)) {
-        int idx = host.indexOf(":");
-        if (idx != -1) {          
+        HostAndPort hp = HostAndPort.fromString(hostAndPort);
+        if (hp.hasPort()) {
           excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
-              host.substring(0, idx), Integer.parseInt(host.substring(idx + 
1))));
+              hp.getHostText(), hp.getPort()));
         } else {
-          excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
+          excludes.add(bm.getDatanodeManager().getDatanodeByHost(hostAndPort));
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
index e6cf16c..5e517da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
@@ -228,7 +229,7 @@ public class GetConf extends Configured implements Tool {
       if (!cnnlist.isEmpty()) {
         for (ConfiguredNNAddress cnn : cnnlist) {
           InetSocketAddress rpc = cnn.getAddress();
-          tool.printOut(rpc.getHostName()+":"+rpc.getPort());
+          tool.printOut(NetUtils.getHostPortString(rpc));
         }
         return 0;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java
index 087972f..4f0d76a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java
@@ -34,6 +34,7 @@ import io.netty.handler.codec.string.StringEncoder;
 import io.netty.util.concurrent.GlobalEventExecutor;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.net.NetUtils;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -104,7 +105,9 @@ public class WebImageViewer implements Closeable {
     allChannels.add(channel);
 
     address = (InetSocketAddress) channel.localAddress();
-    LOG.info("WebImageViewer started. Listening on " + address.toString() + ". 
Press Ctrl+C to stop the viewer.");
+    LOG.info("WebImageViewer started. Listening on " + NetUtils
+        .getSocketAddressString(address) +
+        ". Press Ctrl+C to stop the viewer.");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
index 1ca1ca5..8474df4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
@@ -181,7 +181,7 @@ public class BlockReaderTestUtil {
     return new BlockReaderFactory(fs.getClient().getConf()).
       setInetSocketAddress(targetAddr).
       setBlock(block).
-      setFileName(targetAddr.toString()+ ":" + block.getBlockId()).
+      setFileName(NetUtils.getSocketAddressString(targetAddr)+ ":" + 
block.getBlockId()).
       setBlockToken(testBlock.getBlockToken()).
       setStartOffset(offset).
       setLength(lenToRead).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
index c61c0b1..266c0fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.net.NetUtils;
 import org.junit.Test;
 
 
@@ -55,7 +56,7 @@ public class TestDFSAddressConfig {
     ArrayList<DataNode> dns = cluster.getDataNodes();
     DataNode dn = dns.get(0);
 
-    String selfSocketAddr = dn.getXferAddress().toString();
+    String selfSocketAddr = 
NetUtils.getSocketAddressString(dn.getXferAddress());
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
 
@@ -80,7 +81,7 @@ public class TestDFSAddressConfig {
     dns = cluster.getDataNodes();
     dn = dns.get(0);
 
-    selfSocketAddr = dn.getXferAddress().toString();
+    selfSocketAddr = NetUtils.getSocketAddressString(dn.getXferAddress());
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     // assert that default self socket address is 127.0.0.1
     assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
@@ -105,10 +106,11 @@ public class TestDFSAddressConfig {
     dns = cluster.getDataNodes();
     dn = dns.get(0);
 
-    selfSocketAddr = dn.getXferAddress().toString();
+    selfSocketAddr = NetUtils.getSocketAddressString(dn.getXferAddress());
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     // assert that default self socket address is 0.0.0.0
-    assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
+    assertTrue(selfSocketAddr.contains("/0.0.0.0:") ||
+        selfSocketAddr.contains("/[0:0:0:0:0:0:0:0]:"));
 
     cluster.shutdown();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 19b8c69..3a03a69 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -502,26 +502,26 @@ public class TestDFSUtil {
         DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn1"),
         NS2_NN1_HOST);
     conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
+            DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
         NS2_NN2_HOST);
-    
+
     Map<String, Map<String, InetSocketAddress>> map =
       DFSUtil.getHaNnRpcAddresses(conf);
 
     assertTrue(HAUtil.isHAEnabled(conf, "ns1"));
     assertTrue(HAUtil.isHAEnabled(conf, "ns2"));
     assertFalse(HAUtil.isHAEnabled(conf, "ns3"));
-    
-    assertEquals(NS1_NN1_HOST, map.get("ns1").get("ns1-nn1").toString());
-    assertEquals(NS1_NN2_HOST, map.get("ns1").get("ns1-nn2").toString());
-    assertEquals(NS2_NN1_HOST, map.get("ns2").get("ns2-nn1").toString());
-    assertEquals(NS2_NN2_HOST, map.get("ns2").get("ns2-nn2").toString());
-    
-    assertEquals(NS1_NN1_HOST, 
+
+    assertEquals(NS1_NN1_HOST, 
NetUtils.getHostPortString(map.get("ns1").get("ns1-nn1")));
+    assertEquals(NS1_NN2_HOST, 
NetUtils.getHostPortString(map.get("ns1").get("ns1-nn2")));
+    assertEquals(NS2_NN1_HOST, 
NetUtils.getHostPortString(map.get("ns2").get("ns2-nn1")));
+    assertEquals(NS2_NN2_HOST, 
NetUtils.getHostPortString(map.get("ns2").get("ns2-nn2")));
+
+    assertEquals(NS1_NN1_HOST,
         DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn1"));
-    assertEquals(NS1_NN2_HOST, 
+    assertEquals(NS1_NN2_HOST,
         DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn2"));
-    assertEquals(NS2_NN1_HOST, 
+    assertEquals(NS2_NN1_HOST,
         DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn1"));
 
     // No nameservice was given and we can't determine which service addr
@@ -589,8 +589,26 @@ public class TestDFSUtil {
     Map<String, Map<String, InetSocketAddress>> map =
         DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
 
-    assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
-    assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
+    assertEquals(NS1_NN1_ADDR, 
NetUtils.getHostPortString(map.get("ns1").get("nn1")));
+    assertEquals(NS1_NN2_ADDR, NetUtils.getHostPortString(map.get(
+        "ns1").get("nn2")));
+  }
+
+  @Test
+  public void testIPv6GetHaNnHttpAddresses() throws IOException {
+    final String LOGICAL_HOST_NAME = "ns1";
+    final String NS1_NN1_ADDR      = "[0:0:0:0:0:b00c:c0a8:12a]:8020";
+    final String NS1_NN2_ADDR      = "[::face:a0b:182a]:8020";
+
+    Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, 
NS1_NN1_ADDR, NS1_NN2_ADDR);
+
+    Map<String, Map<String, InetSocketAddress>> map =
+        DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
+
+    assertEquals(NS1_NN1_ADDR, 
NetUtils.getHostPortString(map.get("ns1").get("nn1")));
+    assertEquals(NS1_NN2_ADDR.replace("::", "0:0:0:0:0:"),
+        NetUtils.getHostPortString(map.get(
+        "ns1").get("nn2")));
   }
 
   private static Configuration createWebHDFSHAConfiguration(String 
logicalHostName, String nnaddr1, String nnaddr2) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index 7b7f415..c86b05e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -424,7 +425,8 @@ public class TestFileAppend{
 
       // stop one datanode
       DataNodeProperties dnProp = cluster.stopDataNode(0);
-      String dnAddress = dnProp.datanode.getXferAddress().toString();
+      String dnAddress = NetUtils.getSocketAddressString(
+          dnProp.datanode.getXferAddress());
       if (dnAddress.startsWith("/")) {
         dnAddress = dnAddress.substring(1);
       }
@@ -478,7 +480,8 @@ public class TestFileAppend{
 
       // stop one datanode
       DataNodeProperties dnProp = cluster.stopDataNode(0);
-      String dnAddress = dnProp.datanode.getXferAddress().toString();
+      String dnAddress = NetUtils
+          .getSocketAddressString(dnProp.datanode.getXferAddress());
       if (dnAddress.startsWith("/")) {
         dnAddress = dnAddress.substring(1);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index 4ad066f..2d4cff0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -88,6 +88,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -95,6 +97,7 @@ import org.junit.Test;
  * This class tests various cases during file creation.
  */
 public class TestFileCreation {
+  public static final Log LOG = LogFactory.getLog(TestFileCreation.class);
   static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
 
   {
@@ -110,7 +113,7 @@ public class TestFileCreation {
   static final int numBlocks = 2;
   static final int fileSize = numBlocks * blockSize + 1;
   boolean simulatedStorage = false;
-  
+
   private static final String[] NON_CANONICAL_PATHS = new String[] {
     "//foo",
     "///foo2",
@@ -123,7 +126,7 @@ public class TestFileCreation {
   // creates a file but does not close it
   public static FSDataOutputStream createFile(FileSystem fileSys, Path name, 
int repl)
     throws IOException {
-    System.out.println("createFile: Created " + name + " with " + repl + " 
replica.");
+    LOG.info("createFile: Created " + name + " with " + repl + " replica.");
     FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
         .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
         (short) repl, blockSize);
@@ -196,8 +199,8 @@ public class TestFileCreation {
   public void testFileCreationSetLocalInterface() throws IOException {
     assumeTrue(System.getProperty("os.name").startsWith("Linux"));
 
-    // The mini cluster listens on the loopback so we can use it here
-    checkFileCreation("lo", false);
+    // Use wildcard address to force interface to be used
+    checkFileCreation("0.0.0.0", false);
 
     try {
       checkFileCreation("bogus-interface", false);
@@ -214,6 +217,7 @@ public class TestFileCreation {
    */
   public void checkFileCreation(String netIf, boolean useDnHostname)
       throws IOException {
+
     Configuration conf = new HdfsConfiguration();
     if (netIf != null) {
       conf.set(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
@@ -239,9 +243,9 @@ public class TestFileCreation {
       // check that / exists
       //
       Path path = new Path("/");
-      System.out.println("Path : \"" + path.toString() + "\"");
-      System.out.println(fs.getFileStatus(path).isDirectory()); 
-      assertTrue("/ should be a directory", 
+      LOG.info("Path : \"" + path.toString() + "\"");
+      LOG.info(fs.getFileStatus(path).isDirectory());
+      assertTrue("/ should be a directory",
                  fs.getFileStatus(path).isDirectory());
 
       //
@@ -249,7 +253,7 @@ public class TestFileCreation {
       //
       Path dir1 = new Path("/test_dir");
       fs.mkdirs(dir1);
-      System.out.println("createFile: Creating " + dir1.getName() + 
+      LOG.info("createFile: Creating " + dir1.getName() +
         " for overwrite of existing directory.");
       try {
         fs.create(dir1, true); // Create path, overwrite=true
@@ -270,9 +274,9 @@ public class TestFileCreation {
       FSDataOutputStream stm = createFile(fs, file1, 1);
 
       // verify that file exists in FS namespace
-      assertTrue(file1 + " should be a file", 
+      assertTrue(file1 + " should be a file",
                  fs.getFileStatus(file1).isFile());
-      System.out.println("Path : \"" + file1 + "\"");
+      LOG.info("Path : \"" + file1 + "\"");
 
       // write to file
       writeFile(stm);
@@ -284,13 +288,13 @@ public class TestFileCreation {
       assertTrue(file1 + " should be of size " + fileSize +
                  " but found to be of size " + len, 
                   len == fileSize);
-      
+
       // verify the disk space the file occupied
       long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
       assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
           "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
-      
-      // Check storage usage 
+
+      // Check storage usage
       // can't check capacities for real storage since the OS file system may 
be changing under us.
       if (simulatedStorage) {
         DataNode dn = cluster.getDataNodes().get(0);
@@ -327,7 +331,7 @@ public class TestFileCreation {
       FSDataOutputStream stm1 = createFile(fs, file1, 1);
       FSDataOutputStream stm2 = createFile(fs, file2, 1);
       FSDataOutputStream stm3 = createFile(localfs, file3, 1);
-      System.out.println("DeleteOnExit: Created files.");
+      LOG.info("DeleteOnExit: Created files.");
 
       // write to files and close. Purposely, do not close file2.
       writeFile(stm1);
@@ -358,7 +362,7 @@ public class TestFileCreation {
                  !fs.exists(file2));
       assertTrue(file3 + " still exists inspite of deletOnExit set.",
                  !localfs.exists(file3));
-      System.out.println("DeleteOnExit successful.");
+      LOG.info("DeleteOnExit successful.");
 
     } finally {
       IOUtils.closeStream(fs);
@@ -455,7 +459,7 @@ public class TestFileCreation {
       // verify that file exists in FS namespace
       assertTrue(file1 + " should be a file", 
                  fs.getFileStatus(file1).isFile());
-      System.out.println("Path : \"" + file1 + "\"");
+      LOG.info("Path : \"" + file1 + "\"");
 
       // kill the datanode
       cluster.shutdownDataNodes();
@@ -467,7 +471,7 @@ public class TestFileCreation {
         if (info.length == 0) {
           break;
         }
-        System.out.println("testFileCreationError1: waiting for datanode " +
+        LOG.info("testFileCreationError1: waiting for datanode " +
                            " to die.");
         try {
           Thread.sleep(1000);
@@ -482,14 +486,14 @@ public class TestFileCreation {
         stm.write(buffer);
         stm.close();
       } catch (Exception e) {
-        System.out.println("Encountered expected exception");
+        LOG.info("Encountered expected exception");
       }
 
       // verify that no blocks are associated with this file
       // bad block allocations were cleaned up earlier.
       LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
-      System.out.println("locations = " + locations.locatedBlockCount());
+      LOG.info("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up",
                  locations.locatedBlockCount() == 0);
     } finally {
@@ -505,7 +509,7 @@ public class TestFileCreation {
   @Test
   public void testFileCreationError2() throws IOException {
     long leasePeriod = 1000;
-    System.out.println("testFileCreationError2 start");
+    LOG.info("testFileCreationError2 start");
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -524,24 +528,24 @@ public class TestFileCreation {
       //
       Path file1 = new Path("/filestatus.dat");
       createFile(dfs, file1, 1);
-      System.out.println("testFileCreationError2: "
+      LOG.info("testFileCreationError2: "
                          + "Created file filestatus.dat with one replicas.");
 
       LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
-      System.out.println("testFileCreationError2: "
+      LOG.info("testFileCreationError2: "
           + "The file has " + locations.locatedBlockCount() + " blocks.");
 
       // add one block to the file
       LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
           client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, 
null);
-      System.out.println("testFileCreationError2: "
+      LOG.info("testFileCreationError2: "
           + "Added block " + location.getBlock());
 
       locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                     0, Long.MAX_VALUE);
       int count = locations.locatedBlockCount();
-      System.out.println("testFileCreationError2: "
+      LOG.info("testFileCreationError2: "
           + "The file now has " + count + " blocks.");
       
       // set the soft and hard limit to be 1 second so that the
@@ -557,10 +561,10 @@ public class TestFileCreation {
       // verify that the last block was synchronized.
       locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                     0, Long.MAX_VALUE);
-      System.out.println("testFileCreationError2: "
+      LOG.info("testFileCreationError2: "
           + "locations = " + locations.locatedBlockCount());
       assertEquals(0, locations.locatedBlockCount());
-      System.out.println("testFileCreationError2 successful");
+      LOG.info("testFileCreationError2 successful");
     } finally {
       IOUtils.closeStream(dfs);
       cluster.shutdown();
@@ -570,7 +574,7 @@ public class TestFileCreation {
   /** test addBlock(..) when replication<min and excludeNodes==null. */
   @Test
   public void testFileCreationError3() throws IOException {
-    System.out.println("testFileCreationError3 start");
+    LOG.info("testFileCreationError3 start");
     Configuration conf = new HdfsConfiguration();
     // create cluster
     MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(0).build();
@@ -591,7 +595,7 @@ public class TestFileCreation {
         FileSystem.LOG.info("GOOD!", ioe);
       }
 
-      System.out.println("testFileCreationError3 successful");
+      LOG.info("testFileCreationError3 successful");
     } finally {
       IOUtils.closeStream(dfs);
       cluster.shutdown();
@@ -624,7 +628,7 @@ public class TestFileCreation {
       // create a new file.
       Path file1 = new Path("/filestatus.dat");
       HdfsDataOutputStream stm = create(fs, file1, 1);
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Created file " + file1);
       assertEquals(file1 + " should be replicated to 1 datanode.", 1,
           stm.getCurrentBlockReplication());
@@ -638,7 +642,7 @@ public class TestFileCreation {
       // rename file wile keeping it open.
       Path fileRenamed = new Path("/filestatusRenamed.dat");
       fs.rename(file1, fileRenamed);
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Renamed file " + file1 + " to " +
                          fileRenamed);
       file1 = fileRenamed;
@@ -647,7 +651,7 @@ public class TestFileCreation {
       //
       Path file2 = new Path("/filestatus2.dat");
       FSDataOutputStream stm2 = createFile(fs, file2, 1);
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Created file " + file2);
 
       // create yet another new file with full path name. 
@@ -655,21 +659,21 @@ public class TestFileCreation {
       //
       Path file3 = new Path("/user/home/fullpath.dat");
       FSDataOutputStream stm3 = createFile(fs, file3, 1);
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Created file " + file3);
       Path file4 = new Path("/user/home/fullpath4.dat");
       FSDataOutputStream stm4 = createFile(fs, file4, 1);
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Created file " + file4);
 
       fs.mkdirs(new Path("/bin"));
       fs.rename(new Path("/user/home"), new Path("/bin"));
       Path file3new = new Path("/bin/home/fullpath.dat");
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Renamed file " + file3 + " to " +
                          file3new);
       Path file4new = new Path("/bin/home/fullpath4.dat");
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Renamed file " + file4 + " to " +
                          file4new);
 
@@ -729,14 +733,14 @@ public class TestFileCreation {
       DFSClient client = fs.dfs;
       LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
-      System.out.println("locations = " + locations.locatedBlockCount());
+      LOG.info("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file1,
                  locations.locatedBlockCount() == 3);
 
       // verify filestatus2.dat
       locations = client.getNamenode().getBlockLocations(
                                   file2.toString(), 0, Long.MAX_VALUE);
-      System.out.println("locations = " + locations.locatedBlockCount());
+      LOG.info("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file2,
                  locations.locatedBlockCount() == 1);
     } finally {
@@ -751,7 +755,7 @@ public class TestFileCreation {
   @Test
   public void testDFSClientDeath() throws IOException, InterruptedException {
     Configuration conf = new HdfsConfiguration();
-    System.out.println("Testing adbornal client death.");
+    LOG.info("Testing adbornal client death.");
     if (simulatedStorage) {
       SimulatedFSDataset.setFactory(conf);
     }
@@ -765,7 +769,7 @@ public class TestFileCreation {
       //
       Path file1 = new Path("/clienttest.dat");
       FSDataOutputStream stm = createFile(fs, file1, 1);
-      System.out.println("Created file clienttest.dat");
+      LOG.info("Created file clienttest.dat");
 
       // write to file
       writeFile(stm);
@@ -781,7 +785,7 @@ public class TestFileCreation {
       cluster.shutdown();
     }
   }
-  
+
   /**
    * Test file creation using createNonRecursive().
    */
@@ -863,7 +867,7 @@ public class TestFileCreation {
   static IOException createNonRecursive(FileSystem fs, Path name,
       int repl, EnumSet<CreateFlag> flag) throws IOException {
     try {
-      System.out.println("createNonRecursive: Attempting to create " + name +
+      LOG.info("createNonRecursive: Attempting to create " + name +
           " with " + repl + " replica.");
       int bufferSize = fs.getConf()
           .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
@@ -897,9 +901,9 @@ public class TestFileCreation {
 
     try {
       FileSystem fs = cluster.getFileSystem();
-      
+
       Path[] p = {new Path("/foo"), new Path("/bar")};
-      
+
       //write 2 files at the same time
       FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
       int i = 0;
@@ -931,9 +935,9 @@ public class TestFileCreation {
 
     try {
       FileSystem fs = cluster.getFileSystem();
-      
+
       Path[] p = {new Path("/foo"), new Path("/bar")};
-      
+
       //write 2 files at the same time
       FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
       int i = 0;
@@ -961,7 +965,7 @@ public class TestFileCreation {
    */
   @Test
   public void testLeaseExpireHardLimit() throws Exception {
-    System.out.println("testLeaseExpireHardLimit start");
+    LOG.info("testLeaseExpireHardLimit start");
     final long leasePeriod = 1000;
     final int DATANODE_NUM = 3;
 
@@ -1004,7 +1008,7 @@ public class TestFileCreation {
             blk.getBlockPoolId(), blk.getBlockId());
         final File blockfile = DataNodeTestUtils.getFile(datanode,
             blk.getBlockPoolId(), b.getBlockId());
-        System.out.println("blockfile=" + blockfile);
+        LOG.info("blockfile=" + blockfile);
         if (blockfile != null) {
           BufferedReader in = new BufferedReader(new FileReader(blockfile));
           assertEquals("something", in.readLine());
@@ -1012,20 +1016,20 @@ public class TestFileCreation {
           successcount++;
         }
       }
-      System.out.println("successcount=" + successcount);
-      assertTrue(successcount > 0); 
+      LOG.info("successcount=" + successcount);
+      assertTrue(successcount > 0);
     } finally {
       IOUtils.closeStream(dfs);
       cluster.shutdown();
     }
 
-    System.out.println("testLeaseExpireHardLimit successful");
+    LOG.info("testLeaseExpireHardLimit successful");
   }
 
   // test closing file system before all file handles are closed.
   @Test
   public void testFsClose() throws Exception {
-    System.out.println("test file system close start");
+    LOG.info("test file system close start");
     final int DATANODE_NUM = 3;
 
     Configuration conf = new HdfsConfiguration();
@@ -1046,7 +1050,7 @@ public class TestFileCreation {
       // close file system without closing file
       dfs.close();
     } finally {
-      System.out.println("testFsClose successful");
+      LOG.info("testFsClose successful");
       cluster.shutdown();
     }
   }
@@ -1054,7 +1058,7 @@ public class TestFileCreation {
   // test closing file after cluster is shutdown
   @Test
   public void testFsCloseAfterClusterShutdown() throws IOException {
-    System.out.println("test testFsCloseAfterClusterShutdown start");
+    LOG.info("test testFsCloseAfterClusterShutdown start");
     final int DATANODE_NUM = 3;
 
     Configuration conf = new HdfsConfiguration();
@@ -1085,13 +1089,13 @@ public class TestFileCreation {
       boolean hasException = false;
       try {
         out.close();
-        System.out.println("testFsCloseAfterClusterShutdown: Error here");
+        LOG.info("testFsCloseAfterClusterShutdown: Error here");
       } catch (IOException e) {
         hasException = true;
       }
       assertTrue("Failed to close file after cluster shutdown", hasException);
     } finally {
-      System.out.println("testFsCloseAfterClusterShutdown successful");
+      LOG.info("testFsCloseAfterClusterShutdown successful");
       if (cluster != null) {
         cluster.shutdown();
       }
@@ -1110,7 +1114,7 @@ public class TestFileCreation {
   public void testCreateNonCanonicalPathAndRestartRpc() throws Exception {
     doCreateTest(CreationMethod.DIRECT_NN_RPC);
   }
-  
+
   /**
    * Another regression test for HDFS-3626. This one creates files using
    * a Path instantiated from a string object.
@@ -1130,7 +1134,7 @@ public class TestFileCreation {
       throws Exception {
     doCreateTest(CreationMethod.PATH_FROM_URI);
   }
-  
+
   private static enum CreationMethod {
     DIRECT_NN_RPC,
     PATH_FROM_URI,
@@ -1145,7 +1149,7 @@ public class TestFileCreation {
       NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
 
       for (String pathStr : NON_CANONICAL_PATHS) {
-        System.out.println("Creating " + pathStr + " by " + method);
+        LOG.info("Creating " + pathStr + " by " + method);
         switch (method) {
         case DIRECT_NN_RPC:
           try {
@@ -1160,7 +1164,7 @@ public class TestFileCreation {
             // So, we expect all of them to fail. 
           }
           break;
-          
+
         case PATH_FROM_URI:
         case PATH_FROM_STRING:
           // Unlike the above direct-to-NN case, we expect these to succeed,
@@ -1178,7 +1182,7 @@ public class TestFileCreation {
           throw new AssertionError("bad method: " + method);
         }
       }
-      
+
       cluster.restartNameNode();
 
     } finally {
@@ -1235,13 +1239,13 @@ public class TestFileCreation {
       dfs.mkdirs(new Path("/foo/dir"));
       String file = "/foo/dir/file";
       Path filePath = new Path(file);
-      
+
       // Case 1: Create file with overwrite, check the blocks of old file
       // are cleaned after creating with overwrite
       NameNode nn = cluster.getNameNode();
       FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
       BlockManager bm = fsn.getBlockManager();
-      
+
       FSDataOutputStream out = dfs.create(filePath);
       byte[] oldData = AppendTestUtil.randomBytes(seed, fileSize);
       try {
@@ -1249,11 +1253,11 @@ public class TestFileCreation {
       } finally {
         out.close();
       }
-      
+
       LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(
           nn, file, 0, fileSize);
       assertBlocks(bm, oldBlocks, true);
-      
+
       out = dfs.create(filePath, true);
       byte[] newData = AppendTestUtil.randomBytes(seed, fileSize);
       try {
@@ -1262,12 +1266,12 @@ public class TestFileCreation {
         out.close();
       }
       dfs.deleteOnExit(filePath);
-      
+
       LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(
           nn, file, 0, fileSize);
       assertBlocks(bm, newBlocks, true);
       assertBlocks(bm, oldBlocks, false);
-      
+
       FSDataInputStream in = dfs.open(filePath);
       byte[] result = null;
       try {
@@ -1276,7 +1280,7 @@ public class TestFileCreation {
         in.close();
       }
       Assert.assertArrayEquals(newData, result);
-      
+
       // Case 2: Restart NN, check the file
       cluster.restartNameNode();
       nn = cluster.getNameNode();
@@ -1287,13 +1291,13 @@ public class TestFileCreation {
         in.close();
       }
       Assert.assertArrayEquals(newData, result);
-      
+
       // Case 3: Save new checkpoint and restart NN, check the file
       NameNodeAdapter.enterSafeMode(nn, false);
       NameNodeAdapter.saveNamespace(nn);
       cluster.restartNameNode();
       nn = cluster.getNameNode();
-      
+
       in = dfs.open(filePath);
       try {
         result = readAll(in);
@@ -1310,8 +1314,8 @@ public class TestFileCreation {
       }
     }
   }
-  
-  private void assertBlocks(BlockManager bm, LocatedBlocks lbs, 
+
+  private void assertBlocks(BlockManager bm, LocatedBlocks lbs,
       boolean exist) {
     for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) {
       if (exist) {
@@ -1323,7 +1327,7 @@ public class TestFileCreation {
       }
     }
   }
-  
+
   private byte[] readAll(FSDataInputStream in) throws IOException {
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     byte[] buffer = new byte[1024];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index ad67deb..8527953 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -902,14 +902,14 @@ public class TestQuorumJournalManager {
     GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
         "3");
   }
-  
+
   @Test
   public void testToString() throws Exception {
     GenericTestUtils.assertMatches(
         qjm.toString(),
-        "QJM to \\[127.0.0.1:\\d+, 127.0.0.1:\\d+, 127.0.0.1:\\d+\\]");
+        "QJM to \\[localhost:\\d+, localhost:\\d+, localhost:\\d+\\]");
   }
-  
+
   @Test
   public void testSelectInputStreamsNotOnBoundary() throws Exception {
     final int txIdsPerSegment = 10; 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
index b5951a0..5adf7ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.junit.Test;
 
 import java.io.File;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
 import java.util.Random;
 import java.util.UUID;
 
@@ -53,13 +55,37 @@ public class TestBlockPoolSliceStorage {
     }
   }
 
-  private String makeRandomIpAddress() {
+  private String makeRandomIpv4Address() {
     return rand.nextInt(256) + "." +
            rand.nextInt(256) + "." +
            rand.nextInt(256) + "." +
            rand.nextInt(256);
   }
 
+  private String makeRandomIpv6Address() {
+    byte[] bytes = new byte[16];
+    rand.nextBytes(bytes);
+    InetAddress adr = null;
+    try {
+      adr = InetAddress.getByAddress("unused", bytes);
+    } catch (UnknownHostException uhe) {
+      // Should never happen
+      LOG.error(uhe);
+      assertThat(true, is(false));
+    }
+    String addrString = adr.getHostAddress().replaceAll(":", ".");
+
+    return "[" + addrString + "]";
+  }
+
+  private String makeRandomIpAddress() {
+    if (rand.nextBoolean()) {
+      return makeRandomIpv4Address();
+    } else {
+      return makeRandomIpv6Address();
+    }
+  }
+
   private String makeRandomBlockpoolId() {
     return "BP-" + rand.nextInt(Integer.MAX_VALUE) +
            "-" + makeRandomIpAddress() +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
index dca777f..8399ab4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
@@ -20,8 +20,10 @@ package org.apache.hadoop.hdfs.server.namenode;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertThat;
 import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.AnyOf.anyOf;
 import static org.hamcrest.core.IsNot.not;
 
+import org.apache.hadoop.net.NetUtils;
 import org.junit.Test;
 
 import org.apache.hadoop.fs.FileUtil;
@@ -29,6 +31,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 
+import java.net.InetAddress;
+import java.net.Inet6Address;
 import java.io.File;
 import java.io.IOException;
 
@@ -53,6 +57,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 public class TestNameNodeRespectsBindHostKeys {
   public static final Log LOG = 
LogFactory.getLog(TestNameNodeRespectsBindHostKeys.class);
   private static final String WILDCARD_ADDRESS = "0.0.0.0";
+  private static final String IPv6_WILDCARD_ADDRESS = "0:0:0:0:0:0:0:0";
   private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0";
 
   private static String getRpcServerAddress(MiniDFSCluster cluster) {
@@ -69,9 +74,9 @@ public class TestNameNodeRespectsBindHostKeys {
   public void testRpcBindHostKey() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
-    
+
     LOG.info("Testing without " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
-    
+
     // NN should not bind the wildcard address by default.
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
@@ -87,7 +92,7 @@ public class TestNameNodeRespectsBindHostKeys {
     }
 
     LOG.info("Testing with " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
-    
+
     // Tell NN to bind the wildcard address.
     conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
 
@@ -97,12 +102,37 @@ public class TestNameNodeRespectsBindHostKeys {
       cluster.waitActive();
       String address = getRpcServerAddress(cluster);
       assertThat("Bind address " + address + " is not wildcard.",
-                 address, is("/" + WILDCARD_ADDRESS));
+                 address, anyOf(
+                   is("/" + WILDCARD_ADDRESS),
+                   is("/" + IPv6_WILDCARD_ADDRESS)));
     } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
-    }    
+    }
+
+    InetAddress localAddr = InetAddress.getLocalHost();
+    if (localAddr instanceof Inet6Address) {
+      // Tell NN to bind the IPv6 wildcard address.
+      conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, IPv6_WILDCARD_ADDRESS);
+
+      // Verify that NN binds wildcard address now.
+      try {
+        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+        cluster.waitActive();
+        String address = getRpcServerAddress(cluster);
+        assertThat("Bind address " + address + " is not wildcard.",
+                   address, anyOf(
+                     is("/" + WILDCARD_ADDRESS),
+                     is("/" + IPv6_WILDCARD_ADDRESS)));
+      } finally {
+        if (cluster != null) {
+          cluster.shutdown();
+        }
+      }
+    } else {
+      LOG.info("Not testing IPv6 binding as IPv6 us not supported");
+    }
   }
 
   @Test (timeout=300000)
@@ -111,7 +141,7 @@ public class TestNameNodeRespectsBindHostKeys {
     MiniDFSCluster cluster = null;
 
     LOG.info("Testing without " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
-    
+
     conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
 
     // NN should not bind the wildcard address by default.
@@ -130,6 +160,29 @@ public class TestNameNodeRespectsBindHostKeys {
 
     LOG.info("Testing with " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
 
+    InetAddress localAddr = InetAddress.getLocalHost();
+    if (localAddr instanceof Inet6Address) {
+      // Tell NN to bind the IPv6 wildcard address.
+      conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, IPv6_WILDCARD_ADDRESS);
+
+      // Verify that NN binds wildcard address now.
+      try {
+        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+        cluster.waitActive();
+        String address = getRpcServerAddress(cluster);
+        assertThat("Bind address " + address + " is not wildcard.",
+                   address, anyOf(
+                     is("/" + WILDCARD_ADDRESS),
+                     is("/" + IPv6_WILDCARD_ADDRESS)));
+      } finally {
+        if (cluster != null) {
+          cluster.shutdown();
+        }
+      }
+    } else {
+      LOG.info("Not testing IPv6 binding as IPv6 us not supported");
+    }
+
     // Tell NN to bind the wildcard address.
     conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
 
@@ -139,7 +192,9 @@ public class TestNameNodeRespectsBindHostKeys {
       cluster.waitActive();
       String address = getServiceRpcServerAddress(cluster);
       assertThat("Bind address " + address + " is not wildcard.",
-                 address, is("/" + WILDCARD_ADDRESS));
+                 address, anyOf(
+                   is("/" + WILDCARD_ADDRESS),
+                   is("/" + IPv6_WILDCARD_ADDRESS)));
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -159,7 +214,8 @@ public class TestNameNodeRespectsBindHostKeys {
       conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
-      String address = cluster.getNameNode().getHttpAddress().toString();
+      String address = NetUtils.getSocketAddressString(
+          cluster.getNameNode().getHttpAddress());
       assertFalse("HTTP Bind address not expected to be wildcard by default.",
                   address.startsWith(WILDCARD_ADDRESS));
     } finally {
@@ -179,7 +235,8 @@ public class TestNameNodeRespectsBindHostKeys {
       conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
-      String address = cluster.getNameNode().getHttpAddress().toString();
+      String address = NetUtils.getSocketAddressString(
+          cluster.getNameNode().getHttpAddress());
       assertTrue("HTTP Bind address " + address + " is not wildcard.",
                  address.startsWith(WILDCARD_ADDRESS));
     } finally {
@@ -229,7 +286,8 @@ public class TestNameNodeRespectsBindHostKeys {
       conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
-      String address = cluster.getNameNode().getHttpsAddress().toString();
+      String address = NetUtils.getSocketAddressString(
+          cluster.getNameNode().getHttpsAddress());
       assertFalse("HTTP Bind address not expected to be wildcard by default.",
                   address.startsWith(WILDCARD_ADDRESS));
     } finally {
@@ -249,7 +307,8 @@ public class TestNameNodeRespectsBindHostKeys {
       conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
-      String address = cluster.getNameNode().getHttpsAddress().toString();
+      String address = NetUtils
+          .getSocketAddressString(cluster.getNameNode().getHttpsAddress());
       assertTrue("HTTP Bind address " + address + " is not wildcard.",
                  address.startsWith(WILDCARD_ADDRESS));
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cda82c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
index ada93e8..5000ce0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
@@ -25,7 +25,9 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.AnyOf.anyOf;
 
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
@@ -43,14 +45,18 @@ public class TestNameNodeRpcServer {
     // The name node in MiniDFSCluster only binds to 127.0.0.1.
     // We can set the bind address to 0.0.0.0 to make it listen
     // to all interfaces.
+    // On IPv4-only machines it will return that it is listening on 0.0.0.0
+    // On dual-stack or IPv6-only machines it will return 0:0:0:0:0:0:0:0
     conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
     MiniDFSCluster cluster = null;
 
     try {
       cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
-      assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc())
-          .getClientRpcServer().getListenerAddress().getHostName());
+      String listenerAddress = ((NameNodeRpcServer)cluster.getNameNodeRpc())
+          .getClientRpcServer().getListenerAddress().getHostName();
+      assertThat("Bind address " + listenerAddress + " is not wildcard.",
+          listenerAddress, anyOf(is("0.0.0.0"), is("0:0:0:0:0:0:0:0")));
     } finally {
       if (cluster != null) {
         cluster.shutdown();

Reply via email to