Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c6349c320 -> 2d6ab2000


HDFS-12990. Change default NameNode RPC port back to 8020. Contributed by Xiao 
Chen.

(cherry picked from commit 4304fcd5bdf9fb7aa9181e866eea383f89bf171f)

 Conflicts:
        hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
        
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java

(cherry picked from commit 9264f10bb35dbe30c75c648bf759e8aeb715834a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d6ab200
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d6ab200
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d6ab200

Branch: refs/heads/branch-3.0
Commit: 2d6ab2000714b4702d9031a17a60bab29bb0649d
Parents: c6349c3
Author: Anu Engineer <aengin...@apache.org>
Authored: Tue Feb 6 13:43:45 2018 -0800
Committer: Anu Engineer <aengin...@apache.org>
Committed: Wed Feb 7 11:16:53 2018 -0800

----------------------------------------------------------------------
 .../hadoop/fs/CommonConfigurationKeys.java      |   2 +-
 .../hadoop/util/GenericOptionsParser.java       |   8 +-
 .../hadoop/fs/TestDelegateToFileSystem.java     |   2 +-
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |   2 +-
 .../hdfs/client/HdfsClientConfigKeys.java       |   2 +-
 .../ha/TestRequestHedgingProxyProvider.java     |   6 +-
 .../src/main/resources/hdfs-default.xml         |   2 +-
 .../markdown/HDFSHighAvailabilityWithNFS.md     |   6 +-
 .../markdown/HDFSHighAvailabilityWithQJM.md     |   6 +-
 .../hadoop-hdfs/src/site/markdown/HdfsDesign.md |   2 +-
 .../hadoop-hdfs/src/site/markdown/ViewFs.md     |  60 +++-
 .../hadoop/hdfs/TestDFSClientFailover.java      |   2 +-
 .../org/apache/hadoop/hdfs/TestDFSUtil.java     |  28 +-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |   2 +-
 .../blockmanagement/TestBlockTokenWithDFS.java  |   2 +-
 .../TestBlockTokenWithDFSStriped.java           |   2 +-
 .../server/datanode/TestBlockPoolManager.java   |  12 +-
 .../hdfs/server/namenode/TestAllowFormat.java   |   4 +-
 .../apache/hadoop/hdfs/tools/TestGetConf.java   |   1 -
 .../src/test/resources/job_0.23.9-FAILED.jhist  |   2 +-
 .../src/site/markdown/DistCp.md.vm              |  70 ++---
 .../apache/hadoop/tools/TestOptionsParser.java  | 294 +++++++++----------
 .../hadoop-openstack/src/site/markdown/index.md |   2 +-
 .../src/main/data/2jobs2min-rumen-jh.json       |  12 +-
 .../hadoop/registry/RegistryTestHelper.java     |   4 +-
 .../impl/pb/TestPBRecordImpl.java               |   2 +-
 26 files changed, 290 insertions(+), 247 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 0da4bbd..96d5bc3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -98,7 +98,7 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /**
    * CallQueue related settings. These are not used directly, but rather
    * combined with a namespace and port. For instance:
-   * IPC_NAMESPACE + ".9820." + IPC_CALLQUEUE_IMPL_KEY
+   * IPC_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
    */
   public static final String IPC_NAMESPACE = "ipc";
   public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index ac9776f..7b0a25c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -83,11 +83,11 @@ import org.slf4j.LoggerFactory;
  *
  * <p>Examples:</p>
  * <p><blockquote><pre>
- * $ bin/hadoop dfs -fs darwin:9820 -ls /data
- * list /data directory in dfs with namenode darwin:9820
+ * $ bin/hadoop dfs -fs darwin:8020 -ls /data
+ * list /data directory in dfs with namenode darwin:8020
  * 
- * $ bin/hadoop dfs -D fs.default.name=darwin:9820 -ls /data
- * list /data directory in dfs with namenode darwin:9820
+ * $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
+ * list /data directory in dfs with namenode darwin:8020
  *     
  * $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data
  * list /data directory in dfs with multiple conf files specified.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java
index fefcf0e..5de3286 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java
@@ -47,6 +47,6 @@ public class TestDelegateToFileSystem {
 
   @Test
   public void testDefaultURIwithPort() throws Exception {
-    testDefaultUriInternal("hdfs://dummyhost:9820");
+    testDefaultUriInternal("hdfs://dummyhost:8020");
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
index f0ebc1e..b07da8d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
@@ -38,7 +38,7 @@ public class TestSshFenceByTcpPort {
   private static String TEST_FENCING_HOST = System.getProperty(
       "test.TestSshFenceByTcpPort.host", "localhost");
   private static final String TEST_FENCING_PORT = System.getProperty(
-      "test.TestSshFenceByTcpPort.port", "9820");
+      "test.TestSshFenceByTcpPort.port", "8020");
   private static final String TEST_KEYFILE = System.getProperty(
       "test.TestSshFenceByTcpPort.key");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 38c2435..52a7cd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -73,7 +73,7 @@ public interface HdfsClientConfigKeys {
   int     DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871;
   String  DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
   String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
-  int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820;
+  int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
   String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
       "dfs.namenode.kerberos.principal";
   String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 724b5f0..04e77ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -70,10 +70,10 @@ public class TestRequestHedgingProxyProvider {
         HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, 
"nn1,nn2");
     conf.set(
         HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
-        "machine1.foo.bar:9820");
+        "machine1.foo.bar:8020");
     conf.set(
         HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
-        "machine2.foo.bar:9820");
+        "machine2.foo.bar:8020");
   }
 
   @Test
@@ -236,7 +236,7 @@ public class TestRequestHedgingProxyProvider {
     conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
             "nn1,nn2,nn3");
     conf.set(HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + 
".nn3",
-            "machine3.foo.bar:9820");
+            "machine3.foo.bar:8020");
 
     final AtomicInteger counter = new AtomicInteger(0);
     final int[] isGood = {1};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 9879f1a..aaf9fd5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -37,7 +37,7 @@
     RPC address that handles all clients requests. In the case of 
HA/Federation where multiple namenodes exist,
     the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
     dfs.namenode.rpc-address.EXAMPLENAMESERVICE
-    The value of this property will take the form of nn-host1:rpc-port. The 
NameNode's default RPC port is 9820.
+    The value of this property will take the form of nn-host1:rpc-port. The 
NameNode's default RPC port is 8020.
   </description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
index 5caf8c2..389d503 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
@@ -119,15 +119,15 @@ The order in which you set these configurations is 
unimportant, but the values y
 
         <property>
           <name>dfs.namenode.rpc-address.mycluster.nn1</name>
-          <value>machine1.example.com:9820</value>
+          <value>machine1.example.com:8020</value>
         </property>
         <property>
           <name>dfs.namenode.rpc-address.mycluster.nn2</name>
-          <value>machine2.example.com:9820</value>
+          <value>machine2.example.com:8020</value>
         </property>
         <property>
           <name>dfs.namenode.rpc-address.mycluster.nn3</name>
-          <value>machine3.example.com:9820</value>
+          <value>machine3.example.com:8020</value>
         </property>
 
     **Note:** You may similarly configure the "**servicerpc-address**" setting 
if

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index b7fce98..3fe7ef6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@ -132,15 +132,15 @@ The order in which you set these configurations is 
unimportant, but the values y
 
         <property>
           <name>dfs.namenode.rpc-address.mycluster.nn1</name>
-          <value>machine1.example.com:9820</value>
+          <value>machine1.example.com:8020</value>
         </property>
         <property>
           <name>dfs.namenode.rpc-address.mycluster.nn2</name>
-          <value>machine2.example.com:9820</value>
+          <value>machine2.example.com:8020</value>
         </property>
         <property>
           <name>dfs.namenode.rpc-address.mycluster.nn3</name>
-          <value>machine3.example.com:9820</value>
+          <value>machine3.example.com:8020</value>
         </property>
 
     **Note:** You may similarly configure the "**servicerpc-address**" setting 
if you so desire.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
index 76cd2bf..471a27f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
@@ -291,7 +291,7 @@ We are going to remove the file test1.
 The comment below shows that the file has been moved to Trash directory.
 
     $ hadoop fs -rm -r delete/test1
-    Moved: hdfs://localhost:9820/user/hadoop/delete/test1 to trash at: 
hdfs://localhost:9820/user/hadoop/.Trash/Current
+    Moved: hdfs://localhost:8020/user/hadoop/delete/test1 to trash at: 
hdfs://localhost:8020/user/hadoop/.Trash/Current
 
 now we are going to remove the file with skipTrash option,
 which will not send the file to Trash.It will be completely removed from HDFS.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
index 3810e28..e8b85f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
@@ -100,7 +100,51 @@ The mount points of a mount table are specified in the 
standard Hadoop configura
 </property>
 ```
 
+<<<<<<< HEAD
 The authority following the `viewfs://` scheme in the URI is the mount table 
name. It is recommanded that the mount table of a cluster should be named by 
the cluster name. Then Hadoop system will look for a mount table with the name 
"clusterX" in the Hadoop configuration files. Operations arrange all gateways 
and service machines to contain the mount tables for ALL clusters such that, 
for each cluster, the default file system is set to the ViewFs mount table for 
that cluster as described above.
+=======
+The authority following the `viewfs://` scheme in the URI is the mount table 
name. It is recommended that the mount table of a cluster should be named by 
the cluster name. Then Hadoop system will look for a mount table with the name 
"clusterX" in the Hadoop configuration files. Operations arrange all gateways 
and service machines to contain the mount tables for ALL clusters such that, 
for each cluster, the default file system is set to the ViewFs mount table for 
that cluster as described above.
+
+The mount points of a mount table are specified in the standard Hadoop 
configuration files. All the mount table config entries for `viewfs` are 
prefixed by `fs.viewfs.mounttable.`. The mount points that are linking other 
filesystems are specified using `link` tags. The recommendation is to have 
mount points name same as in the linked filesystem target locations. For all 
namespaces that are not configured in the mount table, we can have them 
fallback to a default filesystem via `linkFallback`.
+
+In the below mount table configuration, namespace `/data` is linked to the 
filesystem `hdfs://nn1-clusterx.example.com:8020/data`, `/project` is linked to 
the filesystem `hdfs://nn2-clusterx.example.com:8020/project`. All namespaces 
that are not configured in the mount table, like `/logs` are linked to the 
filesystem `hdfs://nn5-clusterx.example.com:8020/home`.
+
+```xml
+<configuration>
+  <property>
+    <name>fs.viewfs.mounttable.ClusterX.link./data</name>
+    <value>hdfs://nn1-clusterx.example.com:8020/data</value>
+  </property>
+  <property>
+    <name>fs.viewfs.mounttable.ClusterX.link./project</name>
+    <value>hdfs://nn2-clusterx.example.com:8020/project</value>
+  </property>
+  <property>
+    <name>fs.viewfs.mounttable.ClusterX.link./user</name>
+    <value>hdfs://nn3-clusterx.example.com:8020/user</value>
+  </property>
+  <property>
+    <name>fs.viewfs.mounttable.ClusterX.link./tmp</name>
+    <value>hdfs://nn4-clusterx.example.com:8020/tmp</value>
+  </property>
+  <property>
+    <name>fs.viewfs.mounttable.ClusterX.linkFallback</name>
+    <value>hdfs://nn5-clusterx.example.com:8020/home</value>
+  </property>
+</configuration>
+```
+
+Alternatively we can have the mount table's root merged with the root of 
another filesystem via `linkMergeSlash`. In the below mount table 
configuration, ClusterY's root is merged with the root filesystem at 
`hdfs://nn1-clustery.example.com:8020`.
+
+```xml
+<configuration>
+  <property>
+    <name>fs.viewfs.mounttable.ClusterY.linkMergeSlash</name>
+    <value>hdfs://nn1-clustery.example.com:8020/</value>
+  </property>
+</configuration>
+```
+>>>>>>> 4304fcd5bdf... HDFS-12990. Change default NameNode RPC port back to 
8020. Contributed by Xiao Chen.
 
 ### Pathname Usage Patterns
 
@@ -197,11 +241,11 @@ The mount tables can be described in `core-site.xml` but 
it is better to use ind
 
 In the file `mountTable.xml`, there is a definition of the mount table 
"ClusterX" for the hypothetical cluster that is a federation of the three 
namespace volumes managed by the three namenodes
 
-1.  nn1-clusterx.example.com:9820,
-2.  nn2-clusterx.example.com:9820, and
-3.  nn3-clusterx.example.com:9820.
+1.  nn1-clusterx.example.com:8020,
+2.  nn2-clusterx.example.com:8020, and
+3.  nn3-clusterx.example.com:8020.
 
-Here `/home` and `/tmp` are in the namespace managed by namenode 
nn1-clusterx.example.com:9820, and projects `/foo` and `/bar` are hosted on the 
other namenodes of the federated cluster. The home directory base path is set 
to `/home` so that each user can access its home directory using the 
getHomeDirectory() method defined in 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)/[FileContext](../../api/org/apache/hadoop/fs/FileContext.html).
+Here `/home` and `/tmp` are in the namespace managed by namenode 
nn1-clusterx.example.com:8020, and projects `/foo` and `/bar` are hosted on the 
other namenodes of the federated cluster. The home directory base path is set 
to `/home` so that each user can access its home directory using the 
getHomeDirectory() method defined in 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)/[FileContext](../../api/org/apache/hadoop/fs/FileContext.html).
 
 ```xml
 <configuration>
@@ -211,19 +255,19 @@ Here `/home` and `/tmp` are in the namespace managed by 
namenode nn1-clusterx.ex
   </property>
   <property>
     <name>fs.viewfs.mounttable.ClusterX.link./home</name>
-    <value>hdfs://nn1-clusterx.example.com:9820/home</value>
+    <value>hdfs://nn1-clusterx.example.com:8020/home</value>
   </property>
   <property>
     <name>fs.viewfs.mounttable.ClusterX.link./tmp</name>
-    <value>hdfs://nn1-clusterx.example.com:9820/tmp</value>
+    <value>hdfs://nn1-clusterx.example.com:8020/tmp</value>
   </property>
   <property>
     <name>fs.viewfs.mounttable.ClusterX.link./projects/foo</name>
-    <value>hdfs://nn2-clusterx.example.com:9820/projects/foo</value>
+    <value>hdfs://nn2-clusterx.example.com:8020/projects/foo</value>
   </property>
   <property>
     <name>fs.viewfs.mounttable.ClusterX.link./projects/bar</name>
-    <value>hdfs://nn3-clusterx.example.com:9820/projects/bar</value>
+    <value>hdfs://nn3-clusterx.example.com:8020/projects/bar</value>
   </property>
 </configuration>
 ```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
index 6265f44..c14ebb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
@@ -312,7 +312,7 @@ public class TestDFSClientFailover {
     conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + service,
         namenode);
     conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + service + "."
-        + namenode, "localhost:9820");
+        + namenode, "localhost:8020");
 
     // call createProxy implicitly and explicitly
     Path p = new Path("/");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index f811d3d..c152a24 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -83,9 +83,9 @@ import com.google.common.collect.Sets;
 
 public class TestDFSUtil {
 
-  static final String NS1_NN_ADDR    = "ns1-nn.example.com:9820";
-  static final String NS1_NN1_ADDR   = "ns1-nn1.example.com:9820";
-  static final String NS1_NN2_ADDR   = "ns1-nn2.example.com:9820";
+  static final String NS1_NN_ADDR    = "ns1-nn.example.com:8020";
+  static final String NS1_NN1_ADDR   = "ns1-nn1.example.com:8020";
+  static final String NS1_NN2_ADDR   = "ns1-nn2.example.com:8020";
 
   /**
    * Reset to default UGI settings since some tests change them.
@@ -477,7 +477,7 @@ public class TestDFSUtil {
         DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
 
     URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
-        "localhost", 9820), conf, "http");
+        "localhost", 8020), conf, "http");
     assertEquals(
         URI.create("http://localhost:"; + DFS_NAMENODE_HTTP_PORT_DEFAULT),
         httpAddress);
@@ -487,10 +487,10 @@ public class TestDFSUtil {
   public void testHANameNodesWithFederation() throws URISyntaxException {
     HdfsConfiguration conf = new HdfsConfiguration();
     
-    final String NS1_NN1_HOST = "ns1-nn1.example.com:9820";
-    final String NS1_NN2_HOST = "ns1-nn2.example.com:9820";
-    final String NS2_NN1_HOST = "ns2-nn1.example.com:9820";
-    final String NS2_NN2_HOST = "ns2-nn2.example.com:9820";
+    final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
+    final String NS1_NN2_HOST = "ns1-nn2.example.com:8020";
+    final String NS2_NN1_HOST = "ns2-nn1.example.com:8020";
+    final String NS2_NN2_HOST = "ns2-nn2.example.com:8020";
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
     
     // Two nameservices, each with two NNs.
@@ -554,9 +554,9 @@ public class TestDFSUtil {
     HdfsConfiguration conf = new HdfsConfiguration();
     
     // One nameservice with two NNs
-    final String NS1_NN1_HOST = "ns1-nn1.example.com:9820";
+    final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
     final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821";
-    final String NS1_NN2_HOST = "ns1-nn1.example.com:9820";
+    final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
     final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821";
    
     conf.set(DFS_NAMESERVICES, "ns1");
@@ -640,10 +640,10 @@ public class TestDFSUtil {
   public void testGetNNUris() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
 
-    final String NS2_NN_ADDR    = "ns2-nn.example.com:9820";
-    final String NN1_ADDR       = "nn.example.com:9820";
+    final String NS2_NN_ADDR    = "ns2-nn.example.com:8020";
+    final String NN1_ADDR       = "nn.example.com:8020";
     final String NN1_SRVC_ADDR  = "nn.example.com:9821";
-    final String NN2_ADDR       = "nn2.example.com:9820";
+    final String NN2_ADDR       = "nn2.example.com:8020";
 
     conf.set(DFS_NAMESERVICES, "ns1");
     conf.set(DFSUtil.addKeySuffixes(
@@ -821,7 +821,7 @@ public class TestDFSUtil {
     // Make sure when config FS_DEFAULT_NAME_KEY using IP address,
     // it will automatically convert it to hostname
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, 
"hdfs://127.0.0.1:9820");
+    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, 
"hdfs://127.0.0.1:8020");
     Collection<URI> uris = getInternalNameServiceUris(conf);
     assertEquals(1, uris.size());
     for (URI uri : uris) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index dd1d538..88889f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -1148,7 +1148,7 @@ public class TestQuota {
   @Test
   public void testSetSpaceQuotaWhenStorageTypeIsWrong() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9820");
+    conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
     DFSAdmin admin = new DFSAdmin(conf);
     ByteArrayOutputStream err = new ByteArrayOutputStream();
     PrintStream oldErr = System.err;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 5a8a39a..3fbcd26 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -353,7 +353,7 @@ public class TestBlockTokenWithDFS {
     try {
       // prefer non-ephemeral port to avoid port collision on restartNameNode
       cluster = new MiniDFSCluster.Builder(conf)
-          .nameNodePort(ServerSocketUtil.getPort(19820, 100))
+          .nameNodePort(ServerSocketUtil.getPort(18020, 100))
           .nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
           .numDataNodes(numDataNodes)
           .build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
index 7627cf5..0b39456 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
@@ -79,7 +79,7 @@ public class TestBlockTokenWithDFSStriped extends 
TestBlockTokenWithDFS {
     }
 
     cluster = new MiniDFSCluster.Builder(conf)
-        .nameNodePort(ServerSocketUtil.getPort(19820, 100))
+        .nameNodePort(ServerSocketUtil.getPort(18020, 100))
         .nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
         .numDataNodes(numDNs)
         .build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
index 560b32e..951adbd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
@@ -100,7 +100,7 @@ public class TestBlockPoolManager {
   public void testSimpleSingleNS() throws Exception {
     Configuration conf = new Configuration();
     conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,
-        "hdfs://mock1:9820");
+        "hdfs://mock1:8020");
     bpm.refreshNamenodes(conf);
     assertEquals("create #1\n", log.toString());
   }
@@ -110,8 +110,8 @@ public class TestBlockPoolManager {
     Configuration conf = new Configuration();
     conf.set(DFSConfigKeys.DFS_NAMESERVICES,
         "ns1,ns2");
-    addNN(conf, "ns1", "mock1:9820");
-    addNN(conf, "ns2", "mock1:9820");
+    addNN(conf, "ns1", "mock1:8020");
+    addNN(conf, "ns2", "mock1:8020");
     bpm.refreshNamenodes(conf);
     assertEquals(
         "create #1\n" +
@@ -141,9 +141,9 @@ public class TestBlockPoolManager {
   public void testInternalNameService() throws Exception {
     Configuration conf = new Configuration();
     conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2,ns3");
-    addNN(conf, "ns1", "mock1:9820");
-    addNN(conf, "ns2", "mock1:9820");
-    addNN(conf, "ns3", "mock1:9820");
+    addNN(conf, "ns1", "mock1:8020");
+    addNN(conf, "ns2", "mock1:8020");
+    addNN(conf, "ns3", "mock1:8020");
     conf.set(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
     bpm.refreshNamenodes(conf);
     assertEquals("create #1\n", log.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
index f3ffebe..7e3a030 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
@@ -163,8 +163,8 @@ public class TestAllowFormat {
     // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
     // is considered.
     String localhost = "127.0.0.1";
-    InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 9820);
-    InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9820);
+    InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
+    InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 8020);
     HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
 
     conf.set(DFS_NAMENODE_NAME_DIR_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
index 942719e..e177c01 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
@@ -348,7 +348,6 @@ public class TestGetConf {
     verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
     verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
   }
-  
   @Test(timeout=10000)
   public void testGetSpecificKey() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_0.23.9-FAILED.jhist
----------------------------------------------------------------------
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_0.23.9-FAILED.jhist
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_0.23.9-FAILED.jhist
index b081fd8..b2f407a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_0.23.9-FAILED.jhist
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_0.23.9-FAILED.jhist
@@ -1,7 +1,7 @@
 Avro-Json
 
{"type":"record","name":"Event","namespace":"org.apache.hadoop.mapreduce.jobhistory","fields":[{"name":"type","type":{"type":"enum","name":"EventType","symbols":["JOB_SUBMITTED","JOB_INITED","JOB_FINISHED","JOB_PRIORITY_CHANGED","JOB_STATUS_CHANGED","JOB_FAILED","JOB_KILLED","JOB_INFO_CHANGED","TASK_STARTED","TASK_FINISHED","TASK_FAILED","TASK_UPDATED","NORMALIZED_RESOURCE","MAP_ATTEMPT_STARTED","MAP_ATTEMPT_FINISHED","MAP_ATTEMPT_FAILED","MAP_ATTEMPT_KILLED","REDUCE_ATTEMPT_STARTED","REDUCE_ATTEMPT_FINISHED","REDUCE_ATTEMPT_FAILED","REDUCE_ATTEMPT_KILLED","SETUP_ATTEMPT_STARTED","SETUP_ATTEMPT_FINISHED","SETUP_ATTEMPT_FAILED","SETUP_ATTEMPT_KILLED","CLEANUP_ATTEMPT_STARTED","CLEANUP_ATTEMPT_FINISHED","CLEANUP_ATTEMPT_FAILED","CLEANUP_ATTEMPT_KILLED","AM_STARTED"]}},{"name":"event","type":[{"type":"record","name":"JobFinished","fields":[{"name":"jobid","type":"string"},{"name":"finishTime","type":"long"},{"name":"finishedMaps","type":"int"},{"name":"finishedReduces","type":"int"},{"
 
name":"failedMaps","type":"int"},{"name":"failedReduces","type":"int"},{"name":"totalCounters","type":{"type":"record","name":"JhCounters","fields":[{"name":"name","type":"string"},{"name":"groups","type":{"type":"array","items":{"type":"record","name":"JhCounterGroup","fields":[{"name":"name","type":"string"},{"name":"displayName","type":"string"},{"name":"counts","type":{"type":"array","items":{"type":"record","name":"JhCounter","fields":[{"name":"name","type":"string"},{"name":"displayName","type":"string"},{"name":"value","type":"long"}]}}}]}}}]}},{"name":"mapCounters","type":"JhCounters"},{"name":"reduceCounters","type":"JhCounters"}]},{"type":"record","name":"JobInfoChange","fields":[{"name":"jobid","type":"string"},{"name":"submitTime","type":"long"},{"name":"launchTime","type":"long"}]},{"type":"record","name":"JobInited","fields":[{"name":"jobid","type":"string"},{"name":"launchTime","type":"long"},{"name":"totalMaps","type":"int"},{"name":"totalReduces","type":"int"},{"nam
 
e":"jobStatus","type":"string"},{"name":"uberized","type":"boolean"}]},{"type":"record","name":"AMStarted","fields":[{"name":"applicationAttemptId","type":"string"},{"name":"startTime","type":"long"},{"name":"containerId","type":"string"},{"name":"nodeManagerHost","type":"string"},{"name":"nodeManagerPort","type":"int"},{"name":"nodeManagerHttpPort","type":"int"}]},{"type":"record","name":"JobPriorityChange","fields":[{"name":"jobid","type":"string"},{"name":"priority","type":"string"}]},{"type":"record","name":"JobStatusChanged","fields":[{"name":"jobid","type":"string"},{"name":"jobStatus","type":"string"}]},{"type":"record","name":"JobSubmitted","fields":[{"name":"jobid","type":"string"},{"name":"jobName","type":"string"},{"name":"userName","type":"string"},{"name":"submitTime","type":"long"},{"name":"jobConfPath","type":"string"},{"name":"acls","type":{"type":"map","values":"string"}},{"name":"jobQueueName","type":"string"}]},{"type":"record","name":"JobUnsuccessfulCompletion","
 
fields":[{"name":"jobid","type":"string"},{"name":"finishTime","type":"long"},{"name":"finishedMaps","type":"int"},{"name":"finishedReduces","type":"int"},{"name":"jobStatus","type":"string"}]},{"type":"record","name":"MapAttemptFinished","fields":[{"name":"taskid","type":"string"},{"name":"attemptId","type":"string"},{"name":"taskType","type":"string"},{"name":"taskStatus","type":"string"},{"name":"mapFinishTime","type":"long"},{"name":"finishTime","type":"long"},{"name":"hostname","type":"string"},{"name":"port","type":"int"},{"name":"rackname","type":"string"},{"name":"state","type":"string"},{"name":"counters","type":"JhCounters"},{"name":"clockSplits","type":{"type":"array","items":"int"}},{"name":"cpuUsages","type":{"type":"array","items":"int"}},{"name":"vMemKbytes","type":{"type":"array","items":"int"}},{"name":"physMemKbytes","type":{"type":"array","items":"int"}}]},{"type":"record","name":"ReduceAttemptFinished","fields":[{"name":"taskid","type":"string"},{"name":"attemptI
 
d","type":"string"},{"name":"taskType","type":"string"},{"name":"taskStatus","type":"string"},{"name":"shuffleFinishTime","type":"long"},{"name":"sortFinishTime","type":"long"},{"name":"finishTime","type":"long"},{"name":"hostname","type":"string"},{"name":"port","type":"int"},{"name":"rackname","type":"string"},{"name":"state","type":"string"},{"name":"counters","type":"JhCounters"},{"name":"clockSplits","type":{"type":"array","items":"int"}},{"name":"cpuUsages","type":{"type":"array","items":"int"}},{"name":"vMemKbytes","type":{"type":"array","items":"int"}},{"name":"physMemKbytes","type":{"type":"array","items":"int"}}]},{"type":"record","name":"TaskAttemptFinished","fields":[{"name":"taskid","type":"string"},{"name":"attemptId","type":"string"},{"name":"taskType","type":"string"},{"name":"taskStatus","type":"string"},{"name":"finishTime","type":"long"},{"name":"rackname","type":"string"},{"name":"hostname","type":"string"},{"name":"state","type":"string"},{"name":"counters","typ
 
e":"JhCounters"}]},{"type":"record","name":"TaskAttemptStarted","fields":[{"name":"taskid","type":"string"},{"name":"taskType","type":"string"},{"name":"attemptId","type":"string"},{"name":"startTime","type":"long"},{"name":"trackerName","type":"string"},{"name":"httpPort","type":"int"},{"name":"shufflePort","type":"int"},{"name":"containerId","type":"string"}]},{"type":"record","name":"TaskAttemptUnsuccessfulCompletion","fields":[{"name":"taskid","type":"string"},{"name":"taskType","type":"string"},{"name":"attemptId","type":"string"},{"name":"finishTime","type":"long"},{"name":"hostname","type":"string"},{"name":"port","type":"int"},{"name":"rackname","type":"string"},{"name":"status","type":"string"},{"name":"error","type":"string"},{"name":"clockSplits","type":{"type":"array","items":"int"}},{"name":"cpuUsages","type":{"type":"array","items":"int"}},{"name":"vMemKbytes","type":{"type":"array","items":"int"}},{"name":"physMemKbytes","type":{"type":"array","items":"int"}}]},{"type
 
":"record","name":"TaskFailed","fields":[{"name":"taskid","type":"string"},{"name":"taskType","type":"string"},{"name":"finishTime","type":"long"},{"name":"error","type":"string"},{"name":"failedDueToAttempt","type":["null","string"]},{"name":"status","type":"string"}]},{"type":"record","name":"TaskFinished","fields":[{"name":"taskid","type":"string"},{"name":"taskType","type":"string"},{"name":"finishTime","type":"long"},{"name":"status","type":"string"},{"name":"counters","type":"JhCounters"},{"name":"successfulAttemptId","type":"string"}]},{"type":"record","name":"TaskStarted","fields":[{"name":"taskid","type":"string"},{"name":"taskType","type":"string"},{"name":"startTime","type":"long"},{"name":"splitLocations","type":"string"}]},{"type":"record","name":"TaskUpdated","fields":[{"name":"taskid","type":"string"},{"name":"finishTime","type":"long"}]}]}]}
 
{"type":"AM_STARTED","event":{"org.apache.hadoop.mapreduce.jobhistory.AMStarted":{"applicationAttemptId":"appattempt_1399356417814_19732_000001","startTime":1400251473264,"containerId":"container_1399356417814_19732_01_000001","nodeManagerHost":"localhost","nodeManagerPort":8041,"nodeManagerHttpPort":8042}}}
- 
{"type":"JOB_SUBMITTED","event":{"org.apache.hadoop.mapreduce.jobhistory.JobSubmitted":{"jobid":"job_1399356417814_19732","jobName":"Fail
 
job","userName":"rushabhs","submitTime":1400251470231,"jobConfPath":"hdfs://localhost:9820/user/rushabhs/.staging/job_1399356417814_19732/job.xml","acls":{"mapreduce.job.acl-view-job":"
 ","mapreduce.job.acl-modify-job":" "},"jobQueueName":"unfunded"}}}
+ 
{"type":"JOB_SUBMITTED","event":{"org.apache.hadoop.mapreduce.jobhistory.JobSubmitted":{"jobid":"job_1399356417814_19732","jobName":"Fail
 
job","userName":"rushabhs","submitTime":1400251470231,"jobConfPath":"hdfs://localhost:8020/user/rushabhs/.staging/job_1399356417814_19732/job.xml","acls":{"mapreduce.job.acl-view-job":"
 ","mapreduce.job.acl-modify-job":" "},"jobQueueName":"unfunded"}}}
  
{"type":"JOB_INITED","event":{"org.apache.hadoop.mapreduce.jobhistory.JobInited":{"jobid":"job_1399356417814_19732","launchTime":1400251475763,"totalMaps":2,"totalReduces":1,"jobStatus":"INITED","uberized":false}}}
  
{"type":"JOB_INFO_CHANGED","event":{"org.apache.hadoop.mapreduce.jobhistory.JobInfoChange":{"jobid":"job_1399356417814_19732","submitTime":1400251470231,"launchTime":1400251475763}}}
  
{"type":"TASK_STARTED","event":{"org.apache.hadoop.mapreduce.jobhistory.TaskStarted":{"taskid":"task_1399356417814_19732_m_000000","taskType":"MAP","startTime":1400251475786,"splitLocations":"localhost,localhost,localhost"}}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index 2cd01e2..82e9cf5 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -63,8 +63,8 @@ $H3 Basic Usage
 
   The most common invocation of DistCp is an inter-cluster copy:
 
-    bash$ hadoop distcp hdfs://nn1:9820/foo/bar \
-    hdfs://nn2:9820/bar/foo
+    bash$ hadoop distcp hdfs://nn1:8020/foo/bar \
+    hdfs://nn2:8020/bar/foo
 
   This will expand the namespace under `/foo/bar` on nn1 into a temporary file,
   partition its contents among a set of map tasks, and start a copy on each
@@ -72,19 +72,19 @@ $H3 Basic Usage
 
   One can also specify multiple source directories on the command line:
 
-    bash$ hadoop distcp hdfs://nn1:9820/foo/a \
-    hdfs://nn1:9820/foo/b \
-    hdfs://nn2:9820/bar/foo
+    bash$ hadoop distcp hdfs://nn1:8020/foo/a \
+    hdfs://nn1:8020/foo/b \
+    hdfs://nn2:8020/bar/foo
 
   Or, equivalently, from a file using the -f option:
 
-    bash$ hadoop distcp -f hdfs://nn1:9820/srclist \
-    hdfs://nn2:9820/bar/foo
+    bash$ hadoop distcp -f hdfs://nn1:8020/srclist \
+    hdfs://nn2:8020/bar/foo
 
   Where `srclist` contains
 
-    hdfs://nn1:9820/foo/a
-    hdfs://nn1:9820/foo/b
+    hdfs://nn1:8020/foo/a
+    hdfs://nn1:8020/foo/b
 
   When copying from multiple sources, DistCp will abort the copy with an error
   message if two sources collide, but collisions at the destination are
@@ -126,35 +126,35 @@ $H3 Update and Overwrite
   Consider a copy from `/source/first/` and `/source/second/` to `/target/`,
   where the source paths have the following contents:
 
-    hdfs://nn1:9820/source/first/1
-    hdfs://nn1:9820/source/first/2
-    hdfs://nn1:9820/source/second/10
-    hdfs://nn1:9820/source/second/20
+    hdfs://nn1:8020/source/first/1
+    hdfs://nn1:8020/source/first/2
+    hdfs://nn1:8020/source/second/10
+    hdfs://nn1:8020/source/second/20
 
   When DistCp is invoked without `-update` or `-overwrite`, the DistCp defaults
   would create directories `first/` and `second/`, under `/target`. Thus:
 
-    distcp hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second 
hdfs://nn2:9820/target
+    distcp hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second 
hdfs://nn2:8020/target
 
   would yield the following contents in `/target`:
 
-    hdfs://nn2:9820/target/first/1
-    hdfs://nn2:9820/target/first/2
-    hdfs://nn2:9820/target/second/10
-    hdfs://nn2:9820/target/second/20
+    hdfs://nn2:8020/target/first/1
+    hdfs://nn2:8020/target/first/2
+    hdfs://nn2:8020/target/second/10
+    hdfs://nn2:8020/target/second/20
 
   When either `-update` or `-overwrite` is specified, the **contents** of the
   source-directories are copied to target, and not the source directories
   themselves. Thus:
 
-    distcp -update hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second 
hdfs://nn2:9820/target
+    distcp -update hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second 
hdfs://nn2:8020/target
 
   would yield the following contents in `/target`:
 
-    hdfs://nn2:9820/target/1
-    hdfs://nn2:9820/target/2
-    hdfs://nn2:9820/target/10
-    hdfs://nn2:9820/target/20
+    hdfs://nn2:8020/target/1
+    hdfs://nn2:8020/target/2
+    hdfs://nn2:8020/target/10
+    hdfs://nn2:8020/target/20
 
   By extension, if both source folders contained a file with the same name
   (say, `0`), then both sources would map an entry to `/target/0` at the
@@ -162,27 +162,27 @@ $H3 Update and Overwrite
 
   Now, consider the following copy operation:
 
-    distcp hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second 
hdfs://nn2:9820/target
+    distcp hdfs://nn1:8020/source/first hdfs://nn1:8020/source/second 
hdfs://nn2:8020/target
 
   With sources/sizes:
 
-    hdfs://nn1:9820/source/first/1 32
-    hdfs://nn1:9820/source/first/2 32
-    hdfs://nn1:9820/source/second/10 64
-    hdfs://nn1:9820/source/second/20 32
+    hdfs://nn1:8020/source/first/1 32
+    hdfs://nn1:8020/source/first/2 32
+    hdfs://nn1:8020/source/second/10 64
+    hdfs://nn1:8020/source/second/20 32
 
   And destination/sizes:
 
-    hdfs://nn2:9820/target/1 32
-    hdfs://nn2:9820/target/10 32
-    hdfs://nn2:9820/target/20 64
+    hdfs://nn2:8020/target/1 32
+    hdfs://nn2:8020/target/10 32
+    hdfs://nn2:8020/target/20 64
 
   Will effect:
 
-    hdfs://nn2:9820/target/1 32
-    hdfs://nn2:9820/target/2 32
-    hdfs://nn2:9820/target/10 64
-    hdfs://nn2:9820/target/20 32
+    hdfs://nn2:8020/target/1 32
+    hdfs://nn2:8020/target/2 32
+    hdfs://nn2:8020/target/10 64
+    hdfs://nn2:8020/target/20 32
 
   `1` is skipped because the file-length and contents match. `2` is copied
   because it doesn't exist at the target. `10` and `20` are overwritten since

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
index 6928cdf..9361fc1 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
@@ -37,36 +37,36 @@ public class TestOptionsParser {
   @Test
   public void testParseIgnoreFailure() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertFalse(options.shouldIgnoreFailures());
 
     options = OptionsParser.parse(new String[] {
         "-i",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldIgnoreFailures());
   }
 
   @Test
   public void testParseOverwrite() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertFalse(options.shouldOverwrite());
 
     options = OptionsParser.parse(new String[] {
         "-overwrite",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldOverwrite());
 
     try {
       OptionsParser.parse(new String[] {
           "-update",
           "-overwrite",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/"});
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/"});
       Assert.fail("Update and overwrite aren't allowed together");
     } catch (IllegalArgumentException ignore) {
     }
@@ -75,44 +75,44 @@ public class TestOptionsParser {
   @Test
   public void testLogPath() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertNull(options.getLogPath());
 
     options = OptionsParser.parse(new String[] {
         "-log",
-        "hdfs://localhost:9820/logs",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
-    Assert.assertEquals(options.getLogPath(), new 
Path("hdfs://localhost:9820/logs"));
+        "hdfs://localhost:8020/logs",
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
+    Assert.assertEquals(options.getLogPath(), new 
Path("hdfs://localhost:8020/logs"));
   }
 
   @Test
   public void testParseBlokcing() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldBlock());
 
     options = OptionsParser.parse(new String[] {
         "-async",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertFalse(options.shouldBlock());
   }
 
   @Test
   public void testParsebandwidth() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(options.getMapBandwidth(), 0, DELTA);
 
     options = OptionsParser.parse(new String[] {
         "-bandwidth",
         "11.2",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(options.getMapBandwidth(), 11.2, DELTA);
   }
 
@@ -121,8 +121,8 @@ public class TestOptionsParser {
     OptionsParser.parse(new String[] {
         "-bandwidth",
         "-11",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
   }
 
   @Test(expected=IllegalArgumentException.class)
@@ -130,22 +130,22 @@ public class TestOptionsParser {
     OptionsParser.parse(new String[] {
         "-bandwidth",
         "0",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
   }
 
   @Test
   public void testParseSkipCRC() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertFalse(options.shouldSkipCRC());
 
     options = OptionsParser.parse(new String[] {
         "-update",
         "-skipcrccheck",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldSyncFolder());
     Assert.assertTrue(options.shouldSkipCRC());
   }
@@ -153,22 +153,22 @@ public class TestOptionsParser {
   @Test
   public void testParseAtomicCommit() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertFalse(options.shouldAtomicCommit());
 
     options = OptionsParser.parse(new String[] {
         "-atomic",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldAtomicCommit());
 
     try {
       OptionsParser.parse(new String[] {
           "-atomic",
           "-update",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/"});
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/"});
       Assert.fail("Atomic and sync folders were allowed");
     } catch (IllegalArgumentException ignore) { }
   }
@@ -176,30 +176,30 @@ public class TestOptionsParser {
   @Test
   public void testParseWorkPath() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertNull(options.getAtomicWorkPath());
 
     options = OptionsParser.parse(new String[] {
         "-atomic",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertNull(options.getAtomicWorkPath());
 
     options = OptionsParser.parse(new String[] {
         "-atomic",
         "-tmp",
-        "hdfs://localhost:9820/work",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
-    Assert.assertEquals(options.getAtomicWorkPath(), new 
Path("hdfs://localhost:9820/work"));
+        "hdfs://localhost:8020/work",
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
+    Assert.assertEquals(options.getAtomicWorkPath(), new 
Path("hdfs://localhost:8020/work"));
 
     try {
       OptionsParser.parse(new String[] {
           "-tmp",
-          "hdfs://localhost:9820/work",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/"});
+          "hdfs://localhost:8020/work",
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/"});
       Assert.fail("work path was allowed without -atomic switch");
     } catch (IllegalArgumentException ignore) {}
   }
@@ -207,37 +207,37 @@ public class TestOptionsParser {
   @Test
   public void testParseSyncFolders() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertFalse(options.shouldSyncFolder());
 
     options = OptionsParser.parse(new String[] {
         "-update",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldSyncFolder());
   }
 
   @Test
   public void testParseDeleteMissing() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertFalse(options.shouldDeleteMissing());
 
     options = OptionsParser.parse(new String[] {
         "-update",
         "-delete",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldSyncFolder());
     Assert.assertTrue(options.shouldDeleteMissing());
 
     options = OptionsParser.parse(new String[] {
         "-overwrite",
         "-delete",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldOverwrite());
     Assert.assertTrue(options.shouldDeleteMissing());
 
@@ -245,8 +245,8 @@ public class TestOptionsParser {
       OptionsParser.parse(new String[] {
           "-atomic",
           "-delete",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/"});
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/"});
       Assert.fail("Atomic and delete folders were allowed");
     } catch (IllegalArgumentException ignore) { }
   }
@@ -254,38 +254,38 @@ public class TestOptionsParser {
   @Test
   public void testParseMaps() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(options.getMaxMaps(), DistCpConstants.DEFAULT_MAPS);
 
     options = OptionsParser.parse(new String[] {
         "-m",
         "1",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(options.getMaxMaps(), 1);
 
     options = OptionsParser.parse(new String[] {
         "-m",
         "0",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(options.getMaxMaps(), 1);
 
     try {
       OptionsParser.parse(new String[] {
           "-m",
           "hello",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/"});
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/"});
       Assert.fail("Non numberic map parsed");
     } catch (IllegalArgumentException ignore) { }
 
     try {
       OptionsParser.parse(new String[] {
           "-mapredXslConf",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/"});
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/"});
       Assert.fail("Non numberic map parsed");
     } catch (IllegalArgumentException ignore) { }
   }
@@ -293,8 +293,8 @@ public class TestOptionsParser {
   @Test
   public void testParseNumListstatusThreads() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     // If command line argument isn't set, we expect .getNumListstatusThreads
     // option to be zero (so that we know when to override conf properties).
     Assert.assertEquals(0, options.getNumListstatusThreads());
@@ -302,23 +302,23 @@ public class TestOptionsParser {
     options = OptionsParser.parse(new String[] {
         "--numListstatusThreads",
         "12",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(12, options.getNumListstatusThreads());
 
     options = OptionsParser.parse(new String[] {
         "--numListstatusThreads",
         "0",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(0, options.getNumListstatusThreads());
 
     try {
       OptionsParser.parse(new String[] {
           "--numListstatusThreads",
           "hello",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/"});
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/"});
       Assert.fail("Non numberic numListstatusThreads parsed");
     } catch (IllegalArgumentException ignore) { }
 
@@ -326,8 +326,8 @@ public class TestOptionsParser {
     options = OptionsParser.parse(new String[] {
         "--numListstatusThreads",
         "100",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(DistCpOptions.MAX_NUM_LISTSTATUS_THREADS,
                         options.getNumListstatusThreads());
   }
@@ -336,10 +336,10 @@ public class TestOptionsParser {
   public void testSourceListing() {
     DistCpOptions options = OptionsParser.parse(new String[] {
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(options.getSourceFileListing(),
-        new Path("hdfs://localhost:9820/source/first"));
+        new Path("hdfs://localhost:8020/source/first"));
   }
 
   @Test
@@ -347,9 +347,9 @@ public class TestOptionsParser {
     try {
       OptionsParser.parse(new String[] {
           "-f",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/"});
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/"});
       Assert.fail("Both source listing & source paths allowed");
     } catch (IllegalArgumentException ignore) {}
   }
@@ -358,7 +358,7 @@ public class TestOptionsParser {
   public void testMissingSourceInfo() {
     try {
       OptionsParser.parse(new String[] {
-          "hdfs://localhost:9820/target/"});
+          "hdfs://localhost:8020/target/"});
       Assert.fail("Neither source listing not source paths present");
     } catch (IllegalArgumentException ignore) {}
   }
@@ -367,7 +367,7 @@ public class TestOptionsParser {
   public void testMissingTarget() {
     try {
       OptionsParser.parse(new String[] {
-          "-f", "hdfs://localhost:9820/source"});
+          "-f", "hdfs://localhost:8020/source"});
       Assert.fail("Missing target allowed");
     } catch (IllegalArgumentException ignore) {}
   }
@@ -376,7 +376,7 @@ public class TestOptionsParser {
   public void testInvalidArgs() {
     try {
       OptionsParser.parse(new String[] {
-          "-m", "-f", "hdfs://localhost:9820/source"});
+          "-m", "-f", "hdfs://localhost:8020/source"});
       Assert.fail("Missing map value");
     } catch (IllegalArgumentException ignore) {}
   }
@@ -387,14 +387,14 @@ public class TestOptionsParser {
         "-strategy",
         "dynamic",
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(options.getCopyStrategy(), "dynamic");
 
     options = OptionsParser.parse(new String[] {
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(options.getCopyStrategy(), 
DistCpConstants.UNIFORMSIZE);
   }
 
@@ -402,17 +402,17 @@ public class TestOptionsParser {
   public void testTargetPath() {
     DistCpOptions options = OptionsParser.parse(new String[] {
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
-    Assert.assertEquals(options.getTargetPath(), new 
Path("hdfs://localhost:9820/target/"));
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
+    Assert.assertEquals(options.getTargetPath(), new 
Path("hdfs://localhost:8020/target/"));
   }
 
   @Test
   public void testPreserve() {
     DistCpOptions options = OptionsParser.parse(new String[] {
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
@@ -423,8 +423,8 @@ public class TestOptionsParser {
     options = OptionsParser.parse(new String[] {
         "-p",
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@@ -436,8 +436,8 @@ public class TestOptionsParser {
 
     options = OptionsParser.parse(new String[] {
         "-p",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@@ -450,8 +450,8 @@ public class TestOptionsParser {
     options = OptionsParser.parse(new String[] {
         "-pbr",
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
@@ -464,8 +464,8 @@ public class TestOptionsParser {
     options = OptionsParser.parse(new String[] {
         "-pbrgup",
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@@ -478,8 +478,8 @@ public class TestOptionsParser {
     options = OptionsParser.parse(new String[] {
         "-pbrgupcaxt",
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
@@ -493,8 +493,8 @@ public class TestOptionsParser {
     options = OptionsParser.parse(new String[] {
         "-pc",
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
@@ -507,8 +507,8 @@ public class TestOptionsParser {
     options = OptionsParser.parse(new String[] {
         "-p",
         "-f",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.length() - 
2,
         options.getPreserveAttributes().size());
 
@@ -516,15 +516,15 @@ public class TestOptionsParser {
       OptionsParser.parse(new String[] {
           "-pabcd",
           "-f",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target"});
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target"});
       Assert.fail("Invalid preserve attribute");
     }
     catch (NoSuchElementException ignore) {}
 
     Builder builder = new DistCpOptions.Builder(
-        new Path("hdfs://localhost:9820/source/first"),
-        new Path("hdfs://localhost:9820/target/"));
+        new Path("hdfs://localhost:8020/source/first"),
+        new Path("hdfs://localhost:8020/target/"));
     Assert.assertFalse(
         builder.build().shouldPreserve(FileAttribute.PERMISSION));
     builder.preserve(FileAttribute.PERMISSION);
@@ -552,8 +552,8 @@ public class TestOptionsParser {
     DistCpOptions options = OptionsParser.parse(new String[] {
         "-atomic",
         "-i",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     options.appendToConf(conf);
     
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),
 false));
     
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),
 false));
@@ -570,8 +570,8 @@ public class TestOptionsParser {
         "-pu",
         "-bandwidth",
         "11.2",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     options.appendToConf(conf);
     
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),
 false));
     
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(),
 false));
@@ -644,8 +644,8 @@ public class TestOptionsParser {
         DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
 
     DistCpOptions options = OptionsParser.parse(new String[] { "-update",
-        "-append", "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/" });
+        "-append", "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/" });
     options.appendToConf(conf);
     Assert.assertTrue(conf.getBoolean(
         DistCpOptionSwitch.APPEND.getConfigLabel(), false));
@@ -655,8 +655,8 @@ public class TestOptionsParser {
     // make sure -append is only valid when -update is specified
     try {
       OptionsParser.parse(new String[] { "-append",
-              "hdfs://localhost:9820/source/first",
-              "hdfs://localhost:9820/target/" });
+              "hdfs://localhost:8020/source/first",
+              "hdfs://localhost:8020/target/" });
       fail("Append should fail if update option is not specified");
     } catch (IllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains(
@@ -667,8 +667,8 @@ public class TestOptionsParser {
     try {
       OptionsParser.parse(new String[] {
           "-append", "-update", "-skipcrccheck",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/" });
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
       fail("Append should fail if skipCrc option is specified");
     } catch (IllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains(
@@ -687,8 +687,8 @@ public class TestOptionsParser {
 
     DistCpOptions options = OptionsParser.parse(new String[] { "-update",
         optionStr, "s1", "s2",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/" });
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/" });
     options.appendToConf(conf);
     Assert.assertTrue(conf.getBoolean(optionLabel, false));
     Assert.assertTrue(isDiff?
@@ -698,8 +698,8 @@ public class TestOptionsParser {
 
     options = OptionsParser.parse(new String[] {
         optionStr, "s1", ".", "-update",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/" });
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/" });
     options.appendToConf(conf);
     Assert.assertTrue(conf.getBoolean(optionLabel, false));
     Assert.assertTrue(isDiff?
@@ -710,8 +710,8 @@ public class TestOptionsParser {
     // -diff/-rdiff requires two option values
     try {
       OptionsParser.parse(new String[] {optionStr, "s1", "-update",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/" });
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
       fail(optionStr + " should fail with only one snapshot name");
     } catch (IllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains(
@@ -721,8 +721,8 @@ public class TestOptionsParser {
     // make sure -diff/-rdiff is only valid when -update is specified
     try {
       OptionsParser.parse(new String[] {optionStr, "s1", "s2",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/" });
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
       fail(optionStr + " should fail if -update option is not specified");
     } catch (IllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains(
@@ -732,8 +732,8 @@ public class TestOptionsParser {
     try {
       OptionsParser.parse(new String[] {
           "-diff", "s1", "s2", "-update", "-delete",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/" });
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
       fail("Should fail as -delete and -diff/-rdiff are mutually exclusive");
     } catch (IllegalArgumentException e) {
       assertExceptionContains(
@@ -743,8 +743,8 @@ public class TestOptionsParser {
     try {
       OptionsParser.parse(new String[] {
           "-diff", "s1", "s2", "-delete",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/" });
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
       fail("Should fail as -delete and -diff/-rdiff are mutually exclusive");
     } catch (IllegalArgumentException e) {
       assertExceptionContains(
@@ -754,8 +754,8 @@ public class TestOptionsParser {
     try {
       OptionsParser.parse(new String[] {optionStr, "s1", "s2",
           "-delete", "-overwrite",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/" });
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
       fail("Should fail as -delete and -diff are mutually exclusive");
     } catch (IllegalArgumentException e) {
       assertExceptionContains(
@@ -768,8 +768,8 @@ public class TestOptionsParser {
           optionStr, "s1", "s2",
           optionStrOther, "s2", "s1",
           "-update",
-          "hdfs://localhost:9820/source/first",
-          "hdfs://localhost:9820/target/" });
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
       fail(optionStr + " should fail if " + optionStrOther
           + " is also specified");
     } catch (IllegalArgumentException e) {
@@ -791,15 +791,15 @@ public class TestOptionsParser {
   @Test
   public void testExclusionsOption() {
     DistCpOptions options = OptionsParser.parse(new String[] {
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertNull(options.getFiltersFile());
 
     options = OptionsParser.parse(new String[] {
         "-filters",
         "/tmp/filters.txt",
-        "hdfs://localhost:9820/source/first",
-        "hdfs://localhost:9820/target/"});
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
     Assert.assertEquals(options.getFiltersFile(), "/tmp/filters.txt");
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-tools/hadoop-openstack/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/site/markdown/index.md 
b/hadoop-tools/hadoop-openstack/src/site/markdown/index.md
index 7c5e783..1815f60 100644
--- a/hadoop-tools/hadoop-openstack/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-openstack/src/site/markdown/index.md
@@ -165,7 +165,7 @@ Hadoop uses URIs to refer to files within a filesystem. 
Some common examples are
 
         local://etc/hosts
         hdfs://cluster1/users/example/data/set1
-        hdfs://cluster2.example.org:9820/users/example/data/set1
+        hdfs://cluster2.example.org:8020/users/example/data/set1
 
 The Swift Filesystem Client adds a new URL type `swift`. In a Swift Filesystem 
URL, the hostname part of a URL identifies the container and the service to 
work with; the path the name of the object. Here are some examples
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json 
b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
index c096229..c537195 100644
--- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
+++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
@@ -4547,7 +4547,7 @@
     "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
     "mapreduce.task.io.sort.factor" : "10",
     "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
-    "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
+    "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins",
     "yarn.admin.acl" : "*",
     "mapreduce.job.speculative.speculativecap" : "0.1",
     "dfs.namenode.num.checkpoints.retained" : "2",
@@ -4795,7 +4795,7 @@
     "ftp.stream-buffer-size" : "4096",
     "dfs.namenode.avoid.write.stale.datanode" : "false",
     "hadoop.security.group.mapping.ldap.search.attr.member" : "member",
-    "mapreduce.output.fileoutputformat.outputdir" : 
"hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-1",
+    "mapreduce.output.fileoutputformat.outputdir" : 
"hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-1",
     "dfs.blockreport.initialDelay" : "0",
     "yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
     "hadoop.http.authentication.token.validity" : "36000",
@@ -4839,7 +4839,7 @@
     "hadoop.security.auth_to_local" : "DEFAULT",
     "dfs.secondary.namenode.kerberos.internal.spnego.principal" : 
"${dfs.web.authentication.kerberos.principal}",
     "ftp.client-write-packet-size" : "65536",
-    "fs.defaultFS" : "hdfs://a2115.smile.com:9820",
+    "fs.defaultFS" : "hdfs://a2115.smile.com:8020",
     "yarn.nodemanager.address" : "0.0.0.0:0",
     "yarn.scheduler.fair.assignmultiple" : "true",
     "yarn.resourcemanager.scheduler.client.thread-count" : "50",
@@ -9628,7 +9628,7 @@
     "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
     "mapreduce.task.io.sort.factor" : "10",
     "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
-    "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
+    "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:8020/user/jenkins",
     "yarn.admin.acl" : "*",
     "mapreduce.job.speculative.speculativecap" : "0.1",
     "dfs.namenode.num.checkpoints.retained" : "2",
@@ -9876,7 +9876,7 @@
     "ftp.stream-buffer-size" : "4096",
     "dfs.namenode.avoid.write.stale.datanode" : "false",
     "hadoop.security.group.mapping.ldap.search.attr.member" : "member",
-    "mapreduce.output.fileoutputformat.outputdir" : 
"hdfs://a2115.smile.com:9820/user/jenkins/tera-gen-2",
+    "mapreduce.output.fileoutputformat.outputdir" : 
"hdfs://a2115.smile.com:8020/user/jenkins/tera-gen-2",
     "dfs.blockreport.initialDelay" : "0",
     "yarn.nm.liveness-monitor.expiry-interval-ms" : "600000",
     "hadoop.http.authentication.token.validity" : "36000",
@@ -9920,7 +9920,7 @@
     "hadoop.security.auth_to_local" : "DEFAULT",
     "dfs.secondary.namenode.kerberos.internal.spnego.principal" : 
"${dfs.web.authentication.kerberos.principal}",
     "ftp.client-write-packet-size" : "65536",
-    "fs.defaultFS" : "hdfs://a2115.smile.com:9820",
+    "fs.defaultFS" : "hdfs://a2115.smile.com:8020",
     "yarn.nodemanager.address" : "0.0.0.0:0",
     "yarn.scheduler.fair.assignmultiple" : "true",
     "yarn.resourcemanager.scheduler.client.thread-count" : "50",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java
index cd877b2..91602e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java
@@ -147,7 +147,7 @@ public class RegistryTestHelper extends Assert {
     Map<String, String> url = addressList.get(0);
     String addr = url.get("uri");
     assertTrue(addr.contains("http"));
-    assertTrue(addr.contains(":9820"));
+    assertTrue(addr.contains(":8020"));
 
     Endpoint nnipc = findEndpoint(record, NNIPC, false, 1,2);
     assertEquals("wrong protocol in " + nnipc, ProtocolTypes.PROTOCOL_THRIFT,
@@ -275,7 +275,7 @@ public class RegistryTestHelper extends Assert {
         new URI("http", hostname + ":80", "/")));
     entry.addExternalEndpoint(
         restEndpoint(API_WEBHDFS,
-            new URI("http", hostname + ":9820", "/")));
+            new URI("http", hostname + ":8020", "/")));
 
     Endpoint endpoint = ipcEndpoint(API_HDFS, null);
     endpoint.addresses.add(RegistryTypeUtils.hostnamePortPair(hostname, 8030));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6ab200/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java
index 6ceaa75..05ead9d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java
@@ -64,7 +64,7 @@ public class TestPBRecordImpl {
     LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
     assertTrue(ret instanceof LocalResourcePBImpl);
     ret.setResource(URL.fromPath(new Path(
-      "hdfs://y.ak:9820/foo/bar")));
+      "hdfs://y.ak:8020/foo/bar")));
     ret.setSize(4344L);
     ret.setTimestamp(3141592653589793L);
     ret.setVisibility(LocalResourceVisibility.PUBLIC);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to