HDFS-13944. [JDK10] Fix javadoc errors in hadoop-hdfs-rbf module. Contributed 
by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa7f7078
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa7f7078
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa7f7078

Branch: refs/heads/HEAD
Commit: fa7f7078a713c44783425195a891582bcf8a6d5c
Parents: ec07579
Author: Akira Ajisaka <aajis...@apache.org>
Authored: Wed Oct 3 12:48:38 2018 +0900
Committer: Akira Ajisaka <aajis...@apache.org>
Committed: Wed Oct 3 12:48:45 2018 +0900

----------------------------------------------------------------------
 ...uterAdminProtocolServerSideTranslatorPB.java |  2 +-
 .../resolver/ActiveNamenodeResolver.java        | 11 +++++----
 .../resolver/FileSubclusterResolver.java        |  1 +
 .../federation/resolver/MountTableResolver.java |  2 ++
 .../MultipleDestinationMountTableResolver.java  |  4 +--
 .../resolver/NamenodeStatusReport.java          |  5 ++--
 .../resolver/order/AvailableSpaceResolver.java  |  2 +-
 .../resolver/order/LocalResolver.java           |  2 +-
 .../federation/resolver/package-info.java       |  5 ++--
 .../federation/router/ConnectionPool.java       |  3 ++-
 .../federation/router/NameserviceManager.java   |  9 +++++++
 .../hdfs/server/federation/router/Quota.java    |  4 +--
 .../hdfs/server/federation/router/Router.java   |  5 ++--
 .../federation/router/RouterAdminServer.java    |  4 +--
 .../federation/router/RouterClientProtocol.java |  6 ++---
 .../federation/router/RouterQuotaManager.java   |  6 +++--
 .../federation/router/RouterQuotaUsage.java     |  4 +--
 .../federation/router/RouterRpcClient.java      | 23 +++++++++++------
 .../federation/router/RouterRpcServer.java      | 14 ++++++++---
 .../router/RouterSafemodeService.java           | 26 ++++++++++++--------
 .../federation/router/RouterStateManager.java   |  9 +++++++
 .../federation/store/CachedRecordStore.java     |  4 +--
 .../federation/store/MembershipStore.java       | 16 ++++++------
 .../server/federation/store/RouterStore.java    | 10 ++++----
 .../federation/store/StateStoreService.java     | 19 +++++++-------
 .../federation/store/StateStoreUtils.java       |  4 +++
 .../driver/StateStoreRecordOperations.java      | 15 ++++++++---
 .../store/driver/impl/StateStoreBaseImpl.java   |  1 -
 .../server/federation/store/package-info.java   | 12 ++++-----
 .../federation/store/records/BaseRecord.java    |  4 +--
 .../store/records/MembershipState.java          |  3 ---
 .../federation/store/records/MountTable.java    | 26 +++++++++++---------
 .../server/federation/store/records/Query.java  |  2 +-
 .../federation/utils/ConsistentHashRing.java    |  2 +-
 .../hdfs/tools/federation/RouterAdmin.java      |  5 ++++
 35 files changed, 166 insertions(+), 104 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
index 0204ce8..6341ebd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
@@ -102,7 +102,7 @@ public class RouterAdminProtocolServerSideTranslatorPB 
implements
   /**
    * Constructor.
    * @param server The NN server.
-   * @throws IOException
+   * @throws IOException if it cannot create the translator.
    */
   public RouterAdminProtocolServerSideTranslatorPB(RouterAdminServer server)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
index f1a5329..f06df70 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
@@ -27,16 +27,17 @@ import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * Locates the most active NN for a given nameservice ID or blockpool ID. This
- * interface is used by the {@link org.apache.hadoop.hdfs.server.federation.
- * router.RouterRpcServer RouterRpcServer} to:
+ * interface is used by the {@link
+ * org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer
+ * RouterRpcServer} to:
  * <ul>
  * <li>Determine the target NN for a given subcluster.
  * <li>List of all namespaces discovered/active in the federation.
  * <li>Update the currently active NN empirically.
  * </ul>
- * The interface is also used by the {@link org.apache.hadoop.hdfs.server.
- * federation.router.NamenodeHeartbeatService NamenodeHeartbeatService} to
- * register a discovered NN.
+ * The interface is also used by the {@link
+ * org.apache.hadoop.hdfs.server.federation.router.NamenodeHeartbeatService
+ * NamenodeHeartbeatService} to register a discovered NN.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
index af9f493..5aa5ec9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
@@ -60,6 +60,7 @@ public interface FileSubclusterResolver {
    * Get a list of mount points for a path. Results are from the mount table
    * cache.
    *
+   * @param path Path to get the mount points under.
    * @return List of mount points present at this path or zero-length list if
    *         none are found.
    * @throws IOException Throws exception if the data is not available.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index bdd75c7..121469f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -416,6 +416,7 @@ public class MountTableResolver
    * the read lock.
    * @param path Path to check/insert.
    * @return New remote location.
+   * @throws IOException If it cannot find the location.
    */
   public PathLocation lookupLocation(final String path) throws IOException {
     PathLocation ret = null;
@@ -631,6 +632,7 @@ public class MountTableResolver
   /**
    * Get the size of the cache.
    * @return Size of the cache.
+   * @throws IOException If the cache is not initialized.
    */
   protected long getCacheSize() throws IOException{
     if (this.locationCache != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java
index e31077e..b09a883 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java
@@ -44,8 +44,8 @@ import com.google.common.annotations.VisibleForTesting;
  * <p>
  * Does the Mount table entry for this path have multiple destinations?
  * <ul>
- * <li>No -> Return the location
- * <li>Yes -> Return all locations, prioritizing the best guess from the
+ * <li>No: Return the location
+ * <li>Yes: Return all locations, prioritizing the best guess from the
  * consistent hashing algorithm.
  * </ul>
  * <p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java
index c3c6fa8..b121e24 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java
@@ -190,9 +190,9 @@ public class NamenodeStatusReport {
   }
 
   /**
-   * Get the HA service state.
+   * Set the HA service state.
    *
-   * @return The HA service state.
+   * @param state The HA service state to set.
    */
   public void setHAServiceState(HAServiceState state) {
     this.status = state;
@@ -293,6 +293,7 @@ public class NamenodeStatusReport {
    * @param numBlocksPendingReplication Number of blocks pending replication.
    * @param numBlocksUnderReplicated Number of blocks under replication.
    * @param numBlocksPendingDeletion Number of blocks pending deletion.
+   * @param providedSpace Space in provided storage.
    */
   public void setNamesystemInfo(long available, long total,
       long numFiles, long numBlocks, long numBlocksMissing,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java
index 77a35a4..883a126 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java
@@ -83,7 +83,7 @@ public class AvailableSpaceResolver
    * caching to avoid too many calls. The cache might be updated asynchronously
    * to reduce latency.
    *
-   * @return NamespaceId -> {@link SubclusterAvailableSpace}
+   * @return NamespaceId to {@link SubclusterAvailableSpace}.
    */
   @Override
   protected Map<String, SubclusterAvailableSpace> getSubclusterInfo(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
index afc49c7..a774677 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
@@ -65,7 +65,7 @@ public class LocalResolver extends RouterResolver<String, 
String> {
    * too many calls. The cache might be updated asynchronously to reduce
    * latency.
    *
-   * @return Node IP -> Subcluster.
+   * @return Node IP to Subcluster.
    */
   @Override
   protected Map<String, String> getSubclusterInfo(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java
index d8be9e3..7f68b4c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java
@@ -21,8 +21,9 @@
  * federation. The data resolvers collect data from the cluster, including from
  * the state store. The resolvers expose APIs used by HDFS federation to 
collect
  * aggregated, cached data for use in Real-time request processing. The
- * resolvers are perf-sensitive and are used in the flow of the
- * {@link RouterRpcServer} request path.
+ * resolvers are perf-sensitive and are used in the flow of the {@link
+ * org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer
+ * RouterRpcServer} request path.
  * <p>
  * The principal resolvers are:
  * <ul>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
index 5fcde5b..fab3b81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
@@ -151,6 +151,7 @@ public class ConnectionPool {
 
   /**
    * Get the clientIndex used to calculate index for lookup.
+   * @return Client index.
    */
   @VisibleForTesting
   public AtomicInteger getClientIndex() {
@@ -300,7 +301,7 @@ public class ConnectionPool {
    * Create a new proxy wrapper for a client NN connection.
    * @return Proxy for the target ClientProtocol that contains the user's
    *         security context.
-   * @throws IOException
+   * @throws IOException If it cannot get a new connection.
    */
   public ConnectionContext newConnection() throws IOException {
     return newConnection(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java
index cab336c..631fde3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java
@@ -33,18 +33,27 @@ public interface NameserviceManager {
 
   /**
    * Disable a name service.
+   * @param request Request to disable a name service.
+   * @return Response to disable a name service.
+   * @throws IOException If it cannot perform the operation.
    */
   DisableNameserviceResponse disableNameservice(
       DisableNameserviceRequest request) throws IOException;
 
   /**
    * Enable a name service.
+   * @param request Request to enable a name service.
+   * @return Response to disable a name service.
+   * @throws IOException If it cannot perform the operation.
    */
   EnableNameserviceResponse enableNameservice(EnableNameserviceRequest request)
       throws IOException;
 
   /**
    * Get the list of disabled name service.
+   * @param request Request to get the disabled name services.
+   * @return Response to get the disabled name services.
+   * @throws IOException If it cannot perform the operation.
    */
   GetDisabledNameservicesResponse getDisabledNameservices(
       GetDisabledNameservicesRequest request) throws IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index 846ccd1..d8ed080 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -62,7 +62,7 @@ public class Quota {
    * @param namespaceQuota Name space quota.
    * @param storagespaceQuota Storage space quota.
    * @param type StorageType that the space quota is intended to be set on.
-   * @throws IOException
+   * @throws IOException If the quota system is disabled.
    */
   public void setQuota(String path, long namespaceQuota,
       long storagespaceQuota, StorageType type) throws IOException {
@@ -91,7 +91,7 @@ public class Quota {
    * Get quota usage for the federation path.
    * @param path Federation path.
    * @return Aggregated quota.
-   * @throws IOException
+   * @throws IOException If the quota system is disabled.
    */
   public QuotaUsage getQuotaUsage(String path) throws IOException {
     rpcServer.checkOperation(OperationCategory.READ);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
index 7e67daa..5ddc129 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
@@ -498,7 +498,7 @@ public class Router extends CompositeService {
   /**
    * Update the router state and heartbeat to the state store.
    *
-   * @param state The new router state.
+   * @param newState The new router state.
    */
   public void updateRouterState(RouterServiceState newState) {
     this.state = newState;
@@ -636,7 +636,8 @@ public class Router extends CompositeService {
   }
 
   /**
-   * If the quota system is enabled in Router.
+   * Check if the quota system is enabled in Router.
+   * @return True if the quota system is enabled in Router.
    */
   public boolean isQuotaEnabled() {
     return this.quotaManager != null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
index 3509768..e7fec9e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
@@ -415,8 +415,8 @@ public class RouterAdminServer extends AbstractService
    * control. This method will be invoked during each RPC call in router
    * admin server.
    *
-   * @return Router permission checker
-   * @throws AccessControlException
+   * @return Router permission checker.
+   * @throws AccessControlException If the user is not authorized.
    */
   public static RouterPermissionChecker getPermissionChecker()
       throws AccessControlException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index f45da3c..ddbc014 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -142,9 +142,9 @@ public class RouterClientProtocol implements ClientProtocol 
{
   /**
    * The the delegation token from each name service.
    *
-   * @param renewer
-   * @return Name service -> Token.
-   * @throws IOException
+   * @param renewer The token renewer.
+   * @return Name service to Token.
+   * @throws IOException If it cannot get the delegation token.
    */
   public Map<FederationNamespaceInfo, Token<DelegationTokenIdentifier>>
   getDelegationTokens(Text renewer) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
index 87a8724..fa2a6e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
@@ -50,6 +50,7 @@ public class RouterQuotaManager {
 
   /**
    * Get all the mount quota paths.
+   * @return All the mount quota paths.
    */
   public Set<String> getAll() {
     readLock.lock();
@@ -88,8 +89,8 @@ public class RouterQuotaManager {
 
   /**
    * Get children paths (can including itself) under specified federation path.
-   * @param parentPath
-   * @return Set<String> Children path set.
+   * @param parentPath Federated path.
+   * @return Set of children paths.
    */
   public Set<String> getPaths(String parentPath) {
     readLock.lock();
@@ -154,6 +155,7 @@ public class RouterQuotaManager {
   /**
    * Check if the quota was set.
    * @param quota RouterQuotaUsage set in mount table.
+   * @return True if the quota is set.
    */
   public boolean isQuotaSet(RouterQuotaUsage quota) {
     if (quota != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
index 18268aa..e4728f5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
@@ -72,7 +72,7 @@ public final class RouterQuotaUsage extends QuotaUsage {
   /**
    * Verify if namespace quota is violated once quota is set. Relevant
    * method {@link DirectoryWithQuotaFeature#verifyNamespaceQuota}.
-   * @throws NSQuotaExceededException
+   * @throws NSQuotaExceededException If the quota is exceeded.
    */
   public void verifyNamespaceQuota() throws NSQuotaExceededException {
     if (Quota.isViolated(getQuota(), getFileAndDirectoryCount())) {
@@ -84,7 +84,7 @@ public final class RouterQuotaUsage extends QuotaUsage {
   /**
    * Verify if storage space quota is violated once quota is set. Relevant
    * method {@link DirectoryWithQuotaFeature#verifyStoragespaceQuota}.
-   * @throws DSQuotaExceededException
+   * @throws DSQuotaExceededException If the quota is exceeded.
    */
   public void verifyStoragespaceQuota() throws DSQuotaExceededException {
     if (Quota.isViolated(getSpaceQuota(), getSpaceConsumed())) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 56ca55f..34f51ec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -70,7 +70,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
- * A client proxy for Router -> NN communication using the NN ClientProtocol.
+ * A client proxy for Router to NN communication using the NN ClientProtocol.
  * <p>
  * Provides routers to invoke remote ClientProtocol methods and handle
  * retries/failover.
@@ -584,7 +584,7 @@ public class RouterRpcClient {
    * @param block Block used to determine appropriate nameservice.
    * @param method The remote method and parameters to invoke.
    * @return The result of invoking the method.
-   * @throws IOException
+   * @throws IOException If the invoke generated an error.
    */
   public Object invokeSingle(final ExtendedBlock block, RemoteMethod method)
       throws IOException {
@@ -602,7 +602,7 @@ public class RouterRpcClient {
    * @param bpId Block pool identifier.
    * @param method The remote method and parameters to invoke.
    * @return The result of invoking the method.
-   * @throws IOException
+   * @throws IOException If the invoke generated an error.
    */
   public Object invokeSingleBlockPool(final String bpId, RemoteMethod method)
       throws IOException {
@@ -619,7 +619,7 @@ public class RouterRpcClient {
    * @param nsId Target namespace for the method.
    * @param method The remote method and parameters to invoke.
    * @return The result of invoking the method.
-   * @throws IOException
+   * @throws IOException If the invoke generated an error.
    */
   public Object invokeSingle(final String nsId, RemoteMethod method)
       throws IOException {
@@ -639,6 +639,7 @@ public class RouterRpcClient {
    * Re-throws exceptions generated by the remote RPC call as either
    * RemoteException or IOException.
    *
+   * @param <T> The type of the remote method return.
    * @param nsId Target namespace for the method.
    * @param method The remote method and parameters to invoke.
    * @param clazz Class for the return type.
@@ -661,7 +662,7 @@ public class RouterRpcClient {
    * @param location RemoteLocation to invoke.
    * @param remoteMethod The remote method and parameters to invoke.
    * @return The result of invoking the method if successful.
-   * @throws IOException
+   * @throws IOException If the invoke generated an error.
    */
   public Object invokeSingle(final RemoteLocationContext location,
       RemoteMethod remoteMethod) throws IOException {
@@ -700,6 +701,7 @@ public class RouterRpcClient {
    * If no expected result class/values are specified, the success condition is
    * a call that does not throw a remote exception.
    *
+   * @param <T> The type of the remote method return.
    * @param locations List of locations/nameservices to call concurrently.
    * @param remoteMethod The remote method and parameters to invoke.
    * @param expectedResultClass In order to be considered a positive result, 
the
@@ -871,6 +873,8 @@ public class RouterRpcClient {
   /**
    * Invoke method in all locations and return success if any succeeds.
    *
+   * @param <T> The type of the remote location.
+   * @param <R> The type of the remote method return.
    * @param locations List of remote locations to call concurrently.
    * @param method The remote method and parameters to invoke.
    * @return If the call succeeds in any location.
@@ -899,6 +903,7 @@ public class RouterRpcClient {
    * RemoteException or IOException.
    *
    * @param <T> The type of the remote location.
+   * @param <R> The type of the remote method return.
    * @param locations List of remote locations to call concurrently.
    * @param method The remote method and parameters to invoke.
    * @throws IOException If all the calls throw an exception.
@@ -917,9 +922,10 @@ public class RouterRpcClient {
    * RemoteException or IOException.
    *
    * @param <T> The type of the remote location.
+   * @param <R> The type of the remote method return.
    * @param locations List of remote locations to call concurrently.
    * @param method The remote method and parameters to invoke.
-   * @return Result of invoking the method per subcluster: nsId -> result.
+   * @return Result of invoking the method per subcluster: nsId to result.
    * @throws IOException If all the calls throw an exception.
    */
   public <T extends RemoteLocationContext, R> Map<T, R> invokeConcurrent(
@@ -936,6 +942,7 @@ public class RouterRpcClient {
    * RemoteException or IOException.
    *
    * @param <T> The type of the remote location.
+   * @param <R> The type of the remote method return.
    * @param locations List of remote locations to call concurrently.
    * @param method The remote method and parameters to invoke.
    * @param requireResponse If true an exception will be thrown if all calls do
@@ -966,7 +973,7 @@ public class RouterRpcClient {
    *          successfully received are returned.
    * @param standby If the requests should go to the standby namenodes too.
    * @param clazz Type of the remote return type.
-   * @return Result of invoking the method per subcluster: nsId -> result.
+   * @return Result of invoking the method per subcluster: nsId to result.
    * @throws IOException If requiredResponse=true and any of the calls throw an
    *           exception.
    */
@@ -995,7 +1002,7 @@ public class RouterRpcClient {
    * @param standby If the requests should go to the standby namenodes too.
    * @param timeOutMs Timeout for each individual call.
    * @param clazz Type of the remote return type.
-   * @return Result of invoking the method per subcluster: nsId -> result.
+   * @return Result of invoking the method per subcluster: nsId to result.
    * @throws IOException If requiredResponse=true and any of the calls throw an
    *           exception.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 165b429..525d6d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -354,6 +354,8 @@ public class RouterRpcServer extends AbstractService
 
   /**
    * Get the active namenode resolver
+   *
+   * @return Active namenode resolver.
    */
   public ActiveNamenodeResolver getNamenodeResolver() {
     return namenodeResolver;
@@ -786,8 +788,8 @@ public class RouterRpcServer extends AbstractService
    * Get the list of datanodes per subcluster.
    *
    * @param type Type of the datanodes to get.
-   * @return nsId -> datanode list.
-   * @throws IOException
+   * @return nsId to datanode list.
+   * @throws IOException If the method cannot be invoked remotely.
    */
   public Map<String, DatanodeStorageReport[]> getDatanodeStorageReportMap(
       DatanodeReportType type) throws IOException {
@@ -1414,7 +1416,9 @@ public class RouterRpcServer extends AbstractService
 
   /**
    * Merge the outputs from multiple namespaces.
-   * @param map Namespace -> Output array.
+   *
+   * @param <T> The type of the objects to merge.
+   * @param map Namespace to Output array.
    * @param clazz Class of the values.
    * @return Array with the outputs.
    */
@@ -1434,6 +1438,7 @@ public class RouterRpcServer extends AbstractService
 
   /**
    * Convert a set of values into an array.
+   * @param <T> The type of the return objects.
    * @param set Input set.
    * @param clazz Class of the values.
    * @return Array with the values in set.
@@ -1446,7 +1451,8 @@ public class RouterRpcServer extends AbstractService
   }
 
   /**
-   * Get quota module implement.
+   * Get quota module implementation.
+   * @return Quota module implementation
    */
   public Quota getQuotaModule() {
     return this.quotaCall;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java
index 877e1d4..23ce4fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java
@@ -28,10 +28,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Service to periodically check if the {@link org.apache.hadoop.hdfs.server.
- * federation.store.StateStoreService StateStoreService} cached information in
- * the {@link Router} is up to date. This is for performance and removes the
- * {@link org.apache.hadoop.hdfs.server.federation.store.StateStoreService
+ * Service to periodically check if the {@link
+ * org.apache.hadoop.hdfs.server.federation.store.StateStoreService
+ * StateStoreService} cached information in the {@link Router} is up to date.
+ * This is for performance and removes the {@link
+ * org.apache.hadoop.hdfs.server.federation.store.StateStoreService
  * StateStoreService} from the critical path in common operations.
  */
 public class RouterSafemodeService extends PeriodicService {
@@ -45,12 +46,17 @@ public class RouterSafemodeService extends PeriodicService {
   /**
    * If we are in safe mode, fail requests as if a standby NN.
    * Router can enter safe mode in two different ways:
-   *   1. upon start up: router enters this mode after service start, and will
-   *      exit after certain time threshold;
-   *   2. via admin command: router enters this mode via admin command:
-   *        dfsrouteradmin -safemode enter
-   *      and exit after admin command:
-   *        dfsrouteradmin -safemode leave
+   * <ul>
+   * <li>Upon start up: router enters this mode after service start, and will
+   * exit after certain time threshold.
+   * <li>Via admin command:
+   * <ul>
+   * <li>Router enters this mode via admin command:
+   * dfsrouteradmin -safemode enter
+   * <li>And exit after admin command:
+   * dfsrouteradmin -safemode leave
+   * </ul>
+   * </ul>
    */
 
   /** Whether Router is in safe mode */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStateManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStateManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStateManager.java
index 527600c..e7a91e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStateManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStateManager.java
@@ -32,18 +32,27 @@ import 
org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResp
 public interface RouterStateManager {
   /**
    * Enter safe mode and change Router state to RouterServiceState#SAFEMODE.
+   * @param request Request to enter safe mode.
+   * @return Response to enter safe mode.
+   * @throws IOException If it cannot perform the operation.
    */
   EnterSafeModeResponse enterSafeMode(EnterSafeModeRequest request)
       throws IOException;
 
   /**
    * Leave safe mode and change Router state to RouterServiceState#RUNNING.
+   * @param request Request to leave safe mode.
+   * @return Response to leave safe mode.
+   * @throws IOException If it cannot perform the operation.
    */
   LeaveSafeModeResponse leaveSafeMode(LeaveSafeModeRequest request)
       throws IOException;
 
   /**
    * Verify if current Router state is safe mode.
+   * @param request Request to get the safe mode state.
+   * @return Response to get the safe mode state.
+   * @throws IOException If it cannot perform the operation.
    */
   GetSafeModeResponse getSafeMode(GetSafeModeRequest request)
       throws IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java
index cdd4449..5cfb521 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java
@@ -167,7 +167,7 @@ public abstract class CachedRecordStore<R extends 
BaseRecord>
    * expired state.
    *
    * @param query RecordQueryResult containing the data to be inspected.
-   * @throws IOException
+   * @throws IOException If the values cannot be updated.
    */
   public void overrideExpiredRecords(QueryResult<R> query) throws IOException {
     List<R> commitRecords = new ArrayList<>();
@@ -194,7 +194,7 @@ public abstract class CachedRecordStore<R extends 
BaseRecord>
    * expired state.
    *
    * @param record Record record to be updated.
-   * @throws IOException
+   * @throws IOException If the values cannot be updated.
    */
   public void overrideExpiredRecord(R record) throws IOException {
     List<R> newRecords = Collections.singletonList(record);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MembershipStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MembershipStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MembershipStore.java
index 3e8ba6b..4352ae1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MembershipStore.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MembershipStore.java
@@ -33,12 +33,13 @@ import 
org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateNamenodeReg
 import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
 
 /**
- * Management API for NameNode registrations stored in
- * {@link 
org.apache.hadoop.hdfs.server.federation.store.records.MembershipState
- * MembershipState} records. The {@link org.apache.hadoop.hdfs.server.
- * federation.router.RouterHeartbeatService RouterHeartbeatService} 
periodically
- * polls each NN to update the NameNode metadata(addresses, operational) and HA
- * state(active, standby). Each NameNode may be polled by multiple
+ * Management API for NameNode registrations stored in {@link
+ * org.apache.hadoop.hdfs.server.federation.store.records.MembershipState
+ * MembershipState} records. The {@link
+ * org.apache.hadoop.hdfs.server.federation.router.RouterHeartbeatService
+ * RouterHeartbeatService} periodically polls each NN to update the NameNode
+ * metadata(addresses, operational) and HA state(active, standby). Each
+ * NameNode may be polled by multiple
  * {@link org.apache.hadoop.hdfs.server.federation.router.Router Router}
  * instances.
  * <p>
@@ -90,6 +91,7 @@ public abstract class MembershipStore
   /**
    * Get the expired registrations from the registration cache.
    *
+   * @param request Request to get the expired registrations.
    * @return Expired registrations or zero-length list if none are found.
    * @throws StateStoreUnavailableException Throws exception if the data store
    *           is not initialized.
@@ -103,7 +105,7 @@ public abstract class MembershipStore
   /**
    * Retrieves a list of registered nameservices and their associated info.
    *
-   * @param request
+   * @param request Request to get the name spaces.
    * @return Collection of information for each registered nameservice.
    * @throws IOException if the data store could not be queried or the query is
    *           invalid.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RouterStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RouterStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RouterStore.java
index c6a0dad..2a05ece 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RouterStore.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RouterStore.java
@@ -31,11 +31,11 @@ import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RouterHeartbeatRe
 import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
 
 /**
- * Management API for
- * {@link org.apache.hadoop.hdfs.server.federation.store.records.RouterState
- *  RouterState} records in the state store. Accesses the data store via the
- * {@link org.apache.hadoop.hdfs.server.federation.store.driver.
- * StateStoreDriver StateStoreDriver} interface. No data is cached.
+ * Management API for {@link
+ * org.apache.hadoop.hdfs.server.federation.store.records.RouterState
+ * RouterState} records in the state store. Accesses the data store via the
+ * {@link 
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver
+ * StateStoreDriver} interface. No data is cached.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
index 64c22ae..c55f4cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
@@ -60,12 +60,12 @@ import com.google.common.annotations.VisibleForTesting;
  * StateStoreDriver} and maintain the connection to the data store. There are
  * multiple state store driver connections supported:
  * <ul>
- * <li>File
- * {@link org.apache.hadoop.hdfs.server.federation.store.driver.impl.
- * StateStoreFileImpl StateStoreFileImpl}
- * <li>ZooKeeper
- * {@link org.apache.hadoop.hdfs.server.federation.store.driver.impl.
- * StateStoreZooKeeperImpl StateStoreZooKeeperImpl}
+ * <li>File {@link
+ * 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl
+ * StateStoreFileImpl}
+ * <li>ZooKeeper {@link
+ * 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl
+ * StateStoreZooKeeperImpl}
  * </ul>
  * <p>
  * The service also supports the dynamic registration of record stores like:
@@ -74,10 +74,8 @@ import com.google.common.annotations.VisibleForTesting;
  * federation.
  * <li>{@link MountTableStore}: Mount table between to subclusters.
  * See {@link org.apache.hadoop.fs.viewfs.ViewFs ViewFs}.
- * <li>{@link RebalancerStore}: Log of the rebalancing operations.
  * <li>{@link RouterStore}: Router state in the federation.
  * <li>{@link DisabledNameserviceStore}: Disabled name services.
- * <li>{@link TokenStore}: Tokens in the federation.
  * </ul>
  */
 @InterfaceAudience.Private
@@ -130,10 +128,10 @@ public class StateStoreService extends CompositeService {
   }
 
   /**
-   * Initialize the State Store and the connection to the backend.
+   * Initialize the State Store and the connection to the back-end.
    *
    * @param config Configuration for the State Store.
-   * @throws IOException
+   * @throws IOException Cannot create driver for the State Store.
    */
   @Override
   protected void serviceInit(Configuration config) throws Exception {
@@ -214,6 +212,7 @@ public class StateStoreService extends CompositeService {
    * Add a record store to the State Store. It includes adding the store, the
    * supported record and the cache management.
    *
+   * @param <T> Type of the records stored.
    * @param clazz Class of the record store to track.
    * @return New record store.
    * @throws ReflectiveOperationException

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
index 0a36619..924c96a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
@@ -42,6 +42,7 @@ public final class StateStoreUtils {
    * Get the base class for a record class. If we get an implementation of a
    * record we will return the real parent record class.
    *
+   * @param <T> Type of the class of the data record to check.
    * @param clazz Class of the data record to check.
    * @return Base class for the record.
    */
@@ -67,6 +68,7 @@ public final class StateStoreUtils {
    * Get the base class for a record. If we get an implementation of a record 
we
    * will return the real parent record class.
    *
+   * @param <T> Type of the class of the data record.
    * @param record Record to check its main class.
    * @return Base class for the record.
    */
@@ -79,6 +81,7 @@ public final class StateStoreUtils {
    * Get the base class name for a record. If we get an implementation of a
    * record we will return the real parent record class.
    *
+   * @param <T> Type of the class of the data record.
    * @param clazz Class of the data record to check.
    * @return Name of the base class for the record.
    */
@@ -90,6 +93,7 @@ public final class StateStoreUtils {
   /**
    * Filters a list of records to find all records matching the query.
    *
+   * @param <T> Type of the class of the data record.
    * @param query Map of field names and objects to use to filter results.
    * @param records List of data records to filter.
    * @return List of all records matching the query (or empty list if none

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index 443d46e..b5ce8f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -41,8 +41,9 @@ public interface StateStoreRecordOperations {
    * of the records on each call. It is recommended to override the default
    * implementations for better performance.
    *
+   * @param <T> Record class of the records.
    * @param clazz Class of record to fetch.
-   * @return List of all records that match the clazz.
+   * @return List of all records that match the class.
    * @throws IOException Throws exception if unable to query the data store.
    */
   @Idempotent
@@ -51,6 +52,7 @@ public interface StateStoreRecordOperations {
   /**
    * Get a single record from the store that matches the query.
    *
+   * @param <T> Record class of the records.
    * @param clazz Class of record to fetch.
    * @param query Query to filter results.
    * @return A single record matching the query. Null if there are no matching
@@ -67,10 +69,11 @@ public interface StateStoreRecordOperations {
    * assumes the underlying driver does not support filtering. If the driver
    * supports filtering it should overwrite this method.
    *
+   * @param <T> Record class of the records.
    * @param clazz Class of record to fetch.
    * @param query Query to filter results.
-   * @return Records of type clazz that match the query or empty list if none
-   *         are found.
+   * @return Records of type class that match the query or empty list if none
+   * are found.
    * @throws IOException Throws exception if unable to query the data store.
    */
   @Idempotent
@@ -81,6 +84,7 @@ public interface StateStoreRecordOperations {
    * Creates a single record. Optionally updates an existing record with same
    * primary key.
    *
+   * @param <T> Record class of the records.
    * @param record The record to insert or update.
    * @param allowUpdate True if update of exiting record is allowed.
    * @param errorIfExists True if an error should be returned when inserting
@@ -97,9 +101,9 @@ public interface StateStoreRecordOperations {
    * Creates multiple records. Optionally updates existing records that have
    * the same primary key.
    *
+   * @param <T> Record class of the records.
    * @param records List of data records to update or create. All records must
    *                be of class clazz.
-   * @param clazz Record class of records.
    * @param allowUpdate True if update of exiting record is allowed.
    * @param errorIfExists True if an error should be returned when inserting
    *          an existing record. Only used if allowUpdate = false.
@@ -115,6 +119,7 @@ public interface StateStoreRecordOperations {
   /**
    * Remove a single record.
    *
+   * @param <T> Record class of the records.
    * @param record Record to be removed.
    * @return true If the record was successfully removed. False if the record
    *              could not be removed or not stored.
@@ -126,6 +131,7 @@ public interface StateStoreRecordOperations {
   /**
    * Remove all records of this class from the store.
    *
+   * @param <T> Record class of the records.
    * @param clazz Class of records to remove.
    * @return True if successful.
    * @throws IOException Throws exception if unable to query the data store.
@@ -137,6 +143,7 @@ public interface StateStoreRecordOperations {
    * Remove multiple records of a specific class that match a query. Requires
    * the getAll implementation to fetch fresh records on each call.
    *
+   * @param <T> Record class of the records.
    * @param query Query to filter what to remove.
    * @return The number of records removed.
    * @throws IOException Throws exception if unable to query the data store.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreBaseImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreBaseImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreBaseImpl.java
index 1bd35f2..30686f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreBaseImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreBaseImpl.java
@@ -38,7 +38,6 @@ import 
org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
  * Drivers may optionally override additional routines for performance
  * optimization, such as custom get/put/remove queries, depending on the
  * capabilities of the data store.
- * <p>
  */
 public abstract class StateStoreBaseImpl extends StateStoreDriver {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java
index 949ec7c..6b3e55f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java
@@ -29,15 +29,14 @@
  * The state store uses a modular data storage
  * {@link 
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver
  * StateStoreDriver} to handle querying, updating and deleting data records. 
The
- * data storage driver is initialized and maintained by the
- * {@link org.apache.hadoop.hdfs.server.federation.store.
- * StateStoreService FederationStateStoreService}. The state store
+ * data storage driver is initialized and maintained by the {@link
+ * org.apache.hadoop.hdfs.server.federation.store.StateStoreService
+ * FederationStateStoreService}. The state store
  * supports fetching all records of a type, filtering by column values or
  * fetching a single record by its primary key.
  * <p>
  * The state store contains several API interfaces, one for each data records
  * type.
- * <p>
  * <ul>
  * <li>FederationMembershipStateStore: state of all Namenodes in the 
federation.
  * Uses the MembershipState record.
@@ -46,10 +45,9 @@
  * <li>RouterStateStore: State of all routers in the federation. Uses the
  * RouterState record.
  * </ul>
- * <p>
  * Each API is defined in a separate interface. The implementations of these
- * interfaces are responsible for accessing the
- * {@link 
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver
+ * interfaces are responsible for accessing the {@link
+ * org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver
  * StateStoreDriver} to query, update and delete data records.
  */
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
index 64ecc1e..7212f3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
@@ -75,8 +75,8 @@ public abstract class BaseRecord implements 
Comparable<BaseRecord> {
   public abstract long getExpirationMs();
 
   /**
-   * Map of primary key names->values for the record. The primary key can be a
-   * combination of 1-n different State Store serialized values.
+   * Map of primary key names to values for the record. The primary key can be
+   * a combination of 1-n different State Store serialized values.
    *
    * @return Map of key/value pairs that constitute this object's primary key.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
index e33dedf..642c72b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
@@ -21,7 +21,6 @@ import static 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNameno
 import static 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState.EXPIRED;
 import static 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState.UNAVAILABLE;
 
-import java.io.IOException;
 import java.util.Comparator;
 import java.util.SortedMap;
 import java.util.TreeMap;
@@ -69,7 +68,6 @@ public abstract class MembershipState extends BaseRecord
   /**
    * Create a new membership instance.
    * @return Membership instance.
-   * @throws IOException
    */
   public static MembershipState newInstance() {
     MembershipState record =
@@ -93,7 +91,6 @@ public abstract class MembershipState extends BaseRecord
    * @param state State of the federation.
    * @param safemode If the safe mode is enabled.
    * @return Membership instance.
-   * @throws IOException If we cannot create the instance.
    */
   public static MembershipState newInstance(String router, String nameservice,
       String namenode, String clusterId, String blockPoolId, String rpcAddress,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
index 0e2e868..c1585b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
@@ -39,12 +39,11 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /**
- * Data schema for
- * {@link org.apache.hadoop.hdfs.server.federation.store.
- * MountTableStore FederationMountTableStore} data stored in the
- * {@link org.apache.hadoop.hdfs.server.federation.store.
- * StateStoreService FederationStateStoreService}. Supports string
- * serialization.
+ * Data schema for {@link
+ * org.apache.hadoop.hdfs.server.federation.store.MountTableStore
+ * FederationMountTableStore} data stored in the {@link
+ * org.apache.hadoop.hdfs.server.federation.store.StateStoreService
+ * FederationStateStoreService}. Supports string serialization.
  */
 public abstract class MountTable extends BaseRecord {
 
@@ -100,10 +99,11 @@ public abstract class MountTable extends BaseRecord {
    * Constructor for a mount table entry with a single destinations.
    *
    * @param src Source path in the mount entry.
-   * @param destinations Nameservice destination of the mount point.
+   * @param destinations Name service destination of the mount point.
    * @param dateCreated Created date.
    * @param dateModified Modified date.
-   * @throws IOException
+   * @return New mount table instance.
+   * @throws IOException If it cannot be created.
    */
   public static MountTable newInstance(final String src,
       final Map<String, String> destinations,
@@ -119,8 +119,8 @@ public abstract class MountTable extends BaseRecord {
    * Constructor for a mount table entry with multiple destinations.
    *
    * @param src Source path in the mount entry.
-   * @param destinations Nameservice destinations of the mount point.
-   * @throws IOException
+   * @param destinations Name service destinations of the mount point.
+   * @throws IOException If it cannot be created.
    */
   public static MountTable newInstance(final String src,
       final Map<String, String> destinations) throws IOException {
@@ -187,12 +187,16 @@ public abstract class MountTable extends BaseRecord {
   /**
    * Set the destination paths.
    *
-   * @param paths Destination paths.
+   * @param dests Destination paths.
    */
   public abstract void setDestinations(List<RemoteLocation> dests);
 
   /**
    * Add a new destination to this mount table entry.
+   *
+   * @param nsId Name service identifier.
+   * @param path Path in the remote name service.
+   * @return If the destination was added.
    */
   public abstract boolean addDestination(String nsId, String path);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/Query.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/Query.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/Query.java
index 3c59abf..16f150b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/Query.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/Query.java
@@ -31,7 +31,7 @@ public class Query<T extends BaseRecord> {
   /**
    * Create a query to search for a partial record.
    *
-   * @param partial It defines the attributes to search.
+   * @param part It defines the attributes to search.
    */
   public Query(final T part) {
     this.partial = part;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java
index 89273db..fc3e49f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java
@@ -103,7 +103,7 @@ public class ConsistentHashRing {
 
   /**
    * Return location (owner) of specified item. Owner is the next
-   * entry on the hash ring (with a hash value > hash value of item).
+   * entry on the hash ring (with a hash value &gt; hash value of item).
    * @param item Item to look for.
    * @return The location of the item.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7f7078/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 0a681e9..1aefe4f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -340,6 +340,8 @@ public class RouterAdmin extends Configured implements Tool 
{
    *
    * @param parameters Parameters for the mount point.
    * @param i Index in the parameters.
+   * @return If it was successful.
+   * @throws IOException If it cannot add the mount point.
    */
   public boolean addMount(String[] parameters, int i) throws IOException {
     // Mandatory parameters
@@ -495,6 +497,8 @@ public class RouterAdmin extends Configured implements Tool 
{
    *
    * @param parameters Parameters for the mount point.
    * @param i Index in the parameters.
+   * @return If it updated the mount point successfully.
+   * @throws IOException If there is an error.
    */
   public boolean updateMount(String[] parameters, int i) throws IOException {
     // Mandatory parameters
@@ -599,6 +603,7 @@ public class RouterAdmin extends Configured implements Tool 
{
    * Remove mount point.
    *
    * @param path Path to remove.
+   * @return If the mount point was removed successfully.
    * @throws IOException If it cannot be removed.
    */
   public boolean removeMount(String path) throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to