hadoop git commit: HDDS-353. Multiple delete Blocks tests are failing consistently. Contributed by Lokesh Jain.

2018-08-20 Thread msingh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6425ed27e -> e3d73bbc2


HDDS-353. Multiple delete Blocks tests are failing consistently. Contributed by 
Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3d73bbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3d73bbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3d73bbc

Branch: refs/heads/trunk
Commit: e3d73bbc24eea8d539811ba07c695b1d0c139b61
Parents: 6425ed2
Author: Mukul Kumar Singh 
Authored: Mon Aug 20 13:37:58 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Mon Aug 20 13:37:58 2018 +0530

--
 .../apache/hadoop/hdds/scm/HddsServerUtil.java   |  6 +++---
 .../report/CommandStatusReportPublisher.java |  2 +-
 .../common/report/ContainerReportPublisher.java  |  2 +-
 .../common/report/NodeReportPublisher.java   |  2 +-
 .../statemachine/DatanodeStateMachine.java   |  3 +--
 .../statemachine/EndpointStateMachine.java   |  9 +
 .../hdds/scm/container/ContainerMapping.java |  7 ---
 .../scm/container/closer/ContainerCloser.java|  9 -
 .../ozone/TestStorageContainerManager.java   | 19 ++-
 .../commandhandler/TestBlockDeletion.java| 10 +++---
 .../apache/hadoop/ozone/web/client/TestKeys.java | 10 +-
 11 files changed, 50 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3d73bbc/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
index a8b919d..580d027 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
@@ -180,11 +180,11 @@ public final class HddsServerUtil {
* SCM.
*
* @param conf - Ozone Config
-   * @return - HB interval in seconds.
+   * @return - HB interval in milli seconds.
*/
   public static long getScmHeartbeatInterval(Configuration conf) {
 return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL,
-HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
+HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
   }
 
   /**
@@ -202,7 +202,7 @@ public final class HddsServerUtil {
 
 long heartbeatThreadFrequencyMs = getScmheartbeatCheckerInterval(conf);
 
-long heartbeatIntervalMs = getScmHeartbeatInterval(conf) * 1000;
+long heartbeatIntervalMs = getScmHeartbeatInterval(conf);
 
 
 // Make sure that StaleNodeInterval is configured way above the frequency

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3d73bbc/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
index 3898d15..4cf6321 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java
@@ -58,7 +58,7 @@ public class CommandStatusReportPublisher extends
   getConf());
 
   Preconditions.checkState(
-  heartbeatFrequency < cmdStatusReportInterval,
+  heartbeatFrequency <= cmdStatusReportInterval,
   HDDS_COMMAND_STATUS_REPORT_INTERVAL +
   " cannot be configured lower than heartbeat frequency.");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3d73bbc/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
index 3e73bb4..ccb9a9a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/repor

hadoop git commit: HDFS-13750. RBF: Router ID in RouterRpcClient is always null. Contributed by Takanobu Asanuma.

2018-08-20 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk e3d73bbc2 -> 01ff81781


HDFS-13750. RBF: Router ID in RouterRpcClient is always null. Contributed by 
Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01ff8178
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01ff8178
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01ff8178

Branch: refs/heads/trunk
Commit: 01ff8178148790f7b0112058cf08d23d031b6868
Parents: e3d73bb
Author: Yiqun Lin 
Authored: Mon Aug 20 23:01:59 2018 +0800
Committer: Yiqun Lin 
Committed: Mon Aug 20 23:01:59 2018 +0800

--
 .../server/federation/router/RouterRpcClient.java | 18 ++
 .../server/federation/router/RouterRpcServer.java |  3 ++-
 .../hdfs/server/federation/MockResolver.java  |  3 +++
 .../hdfs/server/federation/router/TestRouter.java | 18 ++
 4 files changed, 33 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01ff8178/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 3eb7241..56ca55f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -92,8 +92,8 @@ public class RouterRpcClient {
   LoggerFactory.getLogger(RouterRpcClient.class);
 
 
-  /** Router identifier. */
-  private final String routerId;
+  /** Router using this RPC client. */
+  private final Router router;
 
   /** Interface to identify the active NN for a nameservice or blockpool ID. */
   private final ActiveNamenodeResolver namenodeResolver;
@@ -116,12 +116,13 @@ public class RouterRpcClient {
* Create a router RPC client to manage remote procedure calls to NNs.
*
* @param conf Hdfs Configuation.
+   * @param router A router using this RPC client.
* @param resolver A NN resolver to determine the currently active NN in HA.
* @param monitor Optional performance monitor.
*/
-  public RouterRpcClient(Configuration conf, String identifier,
+  public RouterRpcClient(Configuration conf, Router router,
   ActiveNamenodeResolver resolver, RouterRpcMonitor monitor) {
-this.routerId = identifier;
+this.router = router;
 
 this.namenodeResolver = resolver;
 
@@ -343,7 +344,8 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("No namenodes to invoke " + method.getName() +
-  " with params " + Arrays.toString(params) + " from " + 
this.routerId);
+  " with params " + Arrays.toString(params) + " from "
+  + router.getRouterId());
 }
 
 Object ret = null;
@@ -1126,7 +1128,7 @@ public class RouterRpcClient {
   String msg = "Not enough client threads " + active + "/" + total;
   LOG.error(msg);
   throw new StandbyException(
-  "Router " + routerId + " is overloaded: " + msg);
+  "Router " + router.getRouterId() + " is overloaded: " + msg);
 } catch (InterruptedException ex) {
   LOG.error("Unexpected error while invoking API: {}", ex.getMessage());
   throw new IOException(
@@ -1150,7 +1152,7 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("Cannot locate a registered namenode for " + nsId +
-  " from " + this.routerId);
+  " from " + router.getRouterId());
 }
 return namenodes;
   }
@@ -1171,7 +1173,7 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("Cannot locate a registered namenode for " + bpId +
-  " from " + this.routerId);
+  " from " + router.getRouterId());
 }
 return namenodes;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01ff8178/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index fe54993..2deda9f 100644
--- 
a/hadoop-hdfs-project/hado

hadoop git commit: HDFS-13750. RBF: Router ID in RouterRpcClient is always null. Contributed by Takanobu Asanuma.

2018-08-20 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 675aa2bbc -> 5e2e68d4b


HDFS-13750. RBF: Router ID in RouterRpcClient is always null. Contributed by 
Takanobu Asanuma.

(cherry picked from commit 01ff8178148790f7b0112058cf08d23d031b6868)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e2e68d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e2e68d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e2e68d4

Branch: refs/heads/branch-3.0
Commit: 5e2e68d4bb3af43edc6f45d5ca0672fb1ec0d7f2
Parents: 675aa2b
Author: Yiqun Lin 
Authored: Mon Aug 20 23:01:59 2018 +0800
Committer: Yiqun Lin 
Committed: Mon Aug 20 23:03:19 2018 +0800

--
 .../server/federation/router/RouterRpcClient.java | 18 ++
 .../server/federation/router/RouterRpcServer.java |  3 ++-
 .../hdfs/server/federation/MockResolver.java  |  3 +++
 .../hdfs/server/federation/router/TestRouter.java | 18 ++
 4 files changed, 33 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e2e68d4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 3eb7241..56ca55f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -92,8 +92,8 @@ public class RouterRpcClient {
   LoggerFactory.getLogger(RouterRpcClient.class);
 
 
-  /** Router identifier. */
-  private final String routerId;
+  /** Router using this RPC client. */
+  private final Router router;
 
   /** Interface to identify the active NN for a nameservice or blockpool ID. */
   private final ActiveNamenodeResolver namenodeResolver;
@@ -116,12 +116,13 @@ public class RouterRpcClient {
* Create a router RPC client to manage remote procedure calls to NNs.
*
* @param conf Hdfs Configuation.
+   * @param router A router using this RPC client.
* @param resolver A NN resolver to determine the currently active NN in HA.
* @param monitor Optional performance monitor.
*/
-  public RouterRpcClient(Configuration conf, String identifier,
+  public RouterRpcClient(Configuration conf, Router router,
   ActiveNamenodeResolver resolver, RouterRpcMonitor monitor) {
-this.routerId = identifier;
+this.router = router;
 
 this.namenodeResolver = resolver;
 
@@ -343,7 +344,8 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("No namenodes to invoke " + method.getName() +
-  " with params " + Arrays.toString(params) + " from " + 
this.routerId);
+  " with params " + Arrays.toString(params) + " from "
+  + router.getRouterId());
 }
 
 Object ret = null;
@@ -1126,7 +1128,7 @@ public class RouterRpcClient {
   String msg = "Not enough client threads " + active + "/" + total;
   LOG.error(msg);
   throw new StandbyException(
-  "Router " + routerId + " is overloaded: " + msg);
+  "Router " + router.getRouterId() + " is overloaded: " + msg);
 } catch (InterruptedException ex) {
   LOG.error("Unexpected error while invoking API: {}", ex.getMessage());
   throw new IOException(
@@ -1150,7 +1152,7 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("Cannot locate a registered namenode for " + nsId +
-  " from " + this.routerId);
+  " from " + router.getRouterId());
 }
 return namenodes;
   }
@@ -1171,7 +1173,7 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("Cannot locate a registered namenode for " + bpId +
-  " from " + this.routerId);
+  " from " + router.getRouterId());
 }
 return namenodes;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e2e68d4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Ro

hadoop git commit: HDFS-13750. RBF: Router ID in RouterRpcClient is always null. Contributed by Takanobu Asanuma.

2018-08-20 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a3d4a25bb -> d7442c244


HDFS-13750. RBF: Router ID in RouterRpcClient is always null. Contributed by 
Takanobu Asanuma.

(cherry picked from commit 01ff8178148790f7b0112058cf08d23d031b6868)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7442c24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7442c24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7442c24

Branch: refs/heads/branch-3.1
Commit: d7442c244f5490e66e89d3a4e787c170807ce338
Parents: a3d4a25
Author: Yiqun Lin 
Authored: Mon Aug 20 23:01:59 2018 +0800
Committer: Yiqun Lin 
Committed: Mon Aug 20 23:05:55 2018 +0800

--
 .../server/federation/router/RouterRpcClient.java | 18 ++
 .../server/federation/router/RouterRpcServer.java |  3 ++-
 .../hdfs/server/federation/MockResolver.java  |  3 +++
 .../hdfs/server/federation/router/TestRouter.java | 18 ++
 4 files changed, 33 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7442c24/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 3eb7241..56ca55f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -92,8 +92,8 @@ public class RouterRpcClient {
   LoggerFactory.getLogger(RouterRpcClient.class);
 
 
-  /** Router identifier. */
-  private final String routerId;
+  /** Router using this RPC client. */
+  private final Router router;
 
   /** Interface to identify the active NN for a nameservice or blockpool ID. */
   private final ActiveNamenodeResolver namenodeResolver;
@@ -116,12 +116,13 @@ public class RouterRpcClient {
* Create a router RPC client to manage remote procedure calls to NNs.
*
* @param conf Hdfs Configuation.
+   * @param router A router using this RPC client.
* @param resolver A NN resolver to determine the currently active NN in HA.
* @param monitor Optional performance monitor.
*/
-  public RouterRpcClient(Configuration conf, String identifier,
+  public RouterRpcClient(Configuration conf, Router router,
   ActiveNamenodeResolver resolver, RouterRpcMonitor monitor) {
-this.routerId = identifier;
+this.router = router;
 
 this.namenodeResolver = resolver;
 
@@ -343,7 +344,8 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("No namenodes to invoke " + method.getName() +
-  " with params " + Arrays.toString(params) + " from " + 
this.routerId);
+  " with params " + Arrays.toString(params) + " from "
+  + router.getRouterId());
 }
 
 Object ret = null;
@@ -1126,7 +1128,7 @@ public class RouterRpcClient {
   String msg = "Not enough client threads " + active + "/" + total;
   LOG.error(msg);
   throw new StandbyException(
-  "Router " + routerId + " is overloaded: " + msg);
+  "Router " + router.getRouterId() + " is overloaded: " + msg);
 } catch (InterruptedException ex) {
   LOG.error("Unexpected error while invoking API: {}", ex.getMessage());
   throw new IOException(
@@ -1150,7 +1152,7 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("Cannot locate a registered namenode for " + nsId +
-  " from " + this.routerId);
+  " from " + router.getRouterId());
 }
 return namenodes;
   }
@@ -1171,7 +1173,7 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("Cannot locate a registered namenode for " + bpId +
-  " from " + this.routerId);
+  " from " + router.getRouterId());
 }
 return namenodes;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7442c24/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Ro

hadoop git commit: HDFS-13750. RBF: Router ID in RouterRpcClient is always null. Contributed by Takanobu Asanuma.

2018-08-20 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c68d1d49c -> 18ebe1830


HDFS-13750. RBF: Router ID in RouterRpcClient is always null. Contributed by 
Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18ebe183
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18ebe183
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18ebe183

Branch: refs/heads/branch-2
Commit: 18ebe183052ee37ce041432f3b32a3b9a531c3e6
Parents: c68d1d4
Author: Yiqun Lin 
Authored: Mon Aug 20 23:08:26 2018 +0800
Committer: Yiqun Lin 
Committed: Mon Aug 20 23:08:26 2018 +0800

--
 .../federation/router/RouterRpcClient.java  | 18 +---
 .../federation/router/RouterRpcServer.java  |  3 ++-
 .../hdfs/server/federation/MockResolver.java|  3 +++
 .../server/federation/router/TestRouter.java| 22 
 4 files changed, 37 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18ebe183/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index e4d304d..0161900 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -92,8 +92,8 @@ public class RouterRpcClient {
   LoggerFactory.getLogger(RouterRpcClient.class);
 
 
-  /** Router identifier. */
-  private final String routerId;
+  /** Router using this RPC client. */
+  private final Router router;
 
   /** Interface to identify the active NN for a nameservice or blockpool ID. */
   private final ActiveNamenodeResolver namenodeResolver;
@@ -116,12 +116,13 @@ public class RouterRpcClient {
* Create a router RPC client to manage remote procedure calls to NNs.
*
* @param conf Hdfs Configuation.
+   * @param router A router using this RPC client.
* @param resolver A NN resolver to determine the currently active NN in HA.
* @param monitor Optional performance monitor.
*/
-  public RouterRpcClient(Configuration conf, String identifier,
+  public RouterRpcClient(Configuration conf, Router router,
   ActiveNamenodeResolver resolver, RouterRpcMonitor monitor) {
-this.routerId = identifier;
+this.router = router;
 
 this.namenodeResolver = resolver;
 
@@ -335,7 +336,8 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("No namenodes to invoke " + method.getName() +
-  " with params " + Arrays.toString(params) + " from " + 
this.routerId);
+  " with params " + Arrays.toString(params) + " from "
+  + router.getRouterId());
 }
 
 Object ret = null;
@@ -1118,7 +1120,7 @@ public class RouterRpcClient {
   String msg = "Not enough client threads " + active + "/" + total;
   LOG.error(msg);
   throw new StandbyException(
-  "Router " + routerId + " is overloaded: " + msg);
+  "Router " + router.getRouterId() + " is overloaded: " + msg);
 } catch (InterruptedException ex) {
   LOG.error("Unexpected error while invoking API: {}", ex.getMessage());
   throw new IOException(
@@ -1142,7 +1144,7 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("Cannot locate a registered namenode for " + nsId +
-  " from " + this.routerId);
+  " from " + router.getRouterId());
 }
 return namenodes;
   }
@@ -1163,7 +1165,7 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("Cannot locate a registered namenode for " + bpId +
-  " from " + this.routerId);
+  " from " + router.getRouterId());
 }
 return namenodes;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18ebe183/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 7dff226..9c07a9a 100644
--- 
a/hadoop-hdfs-project/hadoop

hadoop git commit: HDFS-13750. RBF: Router ID in RouterRpcClient is always null. Contributed by Takanobu Asanuma.

2018-08-20 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 42c47971d -> 9c8a79806


HDFS-13750. RBF: Router ID in RouterRpcClient is always null. Contributed by 
Takanobu Asanuma.

(cherry picked from commit 18ebe183052ee37ce041432f3b32a3b9a531c3e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c8a7980
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c8a7980
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c8a7980

Branch: refs/heads/branch-2.9
Commit: 9c8a79806d2218c6dfd79d373030074ea981d0c4
Parents: 42c4797
Author: Yiqun Lin 
Authored: Mon Aug 20 23:08:26 2018 +0800
Committer: Yiqun Lin 
Committed: Mon Aug 20 23:10:02 2018 +0800

--
 .../federation/router/RouterRpcClient.java  | 18 +---
 .../federation/router/RouterRpcServer.java  |  3 ++-
 .../hdfs/server/federation/MockResolver.java|  3 +++
 .../server/federation/router/TestRouter.java| 22 
 4 files changed, 37 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c8a7980/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index e4d304d..0161900 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -92,8 +92,8 @@ public class RouterRpcClient {
   LoggerFactory.getLogger(RouterRpcClient.class);
 
 
-  /** Router identifier. */
-  private final String routerId;
+  /** Router using this RPC client. */
+  private final Router router;
 
   /** Interface to identify the active NN for a nameservice or blockpool ID. */
   private final ActiveNamenodeResolver namenodeResolver;
@@ -116,12 +116,13 @@ public class RouterRpcClient {
* Create a router RPC client to manage remote procedure calls to NNs.
*
* @param conf Hdfs Configuation.
+   * @param router A router using this RPC client.
* @param resolver A NN resolver to determine the currently active NN in HA.
* @param monitor Optional performance monitor.
*/
-  public RouterRpcClient(Configuration conf, String identifier,
+  public RouterRpcClient(Configuration conf, Router router,
   ActiveNamenodeResolver resolver, RouterRpcMonitor monitor) {
-this.routerId = identifier;
+this.router = router;
 
 this.namenodeResolver = resolver;
 
@@ -335,7 +336,8 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("No namenodes to invoke " + method.getName() +
-  " with params " + Arrays.toString(params) + " from " + 
this.routerId);
+  " with params " + Arrays.toString(params) + " from "
+  + router.getRouterId());
 }
 
 Object ret = null;
@@ -1118,7 +1120,7 @@ public class RouterRpcClient {
   String msg = "Not enough client threads " + active + "/" + total;
   LOG.error(msg);
   throw new StandbyException(
-  "Router " + routerId + " is overloaded: " + msg);
+  "Router " + router.getRouterId() + " is overloaded: " + msg);
 } catch (InterruptedException ex) {
   LOG.error("Unexpected error while invoking API: {}", ex.getMessage());
   throw new IOException(
@@ -1142,7 +1144,7 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("Cannot locate a registered namenode for " + nsId +
-  " from " + this.routerId);
+  " from " + router.getRouterId());
 }
 return namenodes;
   }
@@ -1163,7 +1165,7 @@ public class RouterRpcClient {
 
 if (namenodes == null || namenodes.isEmpty()) {
   throw new IOException("Cannot locate a registered namenode for " + bpId +
-  " from " + this.routerId);
+  " from " + router.getRouterId());
 }
 return namenodes;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c8a7980/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcS

hadoop git commit: YARN-8242. YARN NM: OOM error while reading back the state store on recovery. Contributed by Pradeep Ambati and Kanwaljeet Sachdev

2018-08-20 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 01ff81781 -> 65e746971


YARN-8242. YARN NM: OOM error while reading back the state store on recovery. 
Contributed by Pradeep Ambati and Kanwaljeet Sachdev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65e74697
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65e74697
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65e74697

Branch: refs/heads/trunk
Commit: 65e7469712be6cf393e29ef73cc94727eec81227
Parents: 01ff817
Author: Jason Lowe 
Authored: Mon Aug 20 10:14:40 2018 -0500
Committer: Jason Lowe 
Committed: Mon Aug 20 10:14:40 2018 -0500

--
 .../server/nodemanager/DeletionService.java |  25 +-
 .../containermanager/ContainerManagerImpl.java  |  26 +-
 .../localizer/ResourceLocalizationService.java  |  56 +--
 .../recovery/NMLeveldbStateStoreService.java| 412 ---
 .../recovery/NMNullStateStoreService.java   |   2 +-
 .../recovery/NMStateStoreService.java   |  55 +--
 .../nodemanager/recovery/RecoveryIterator.java  |  41 ++
 .../security/NMContainerTokenSecretManager.java |  27 +-
 .../security/NMTokenSecretManagerInNM.java  |  15 +-
 .../recovery/NMMemoryStateStoreService.java |  82 +++-
 .../TestNMLeveldbStateStoreService.java | 216 +++---
 11 files changed, 647 insertions(+), 310 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65e74697/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index ae81dc1..e665c5a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -19,13 +19,14 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import static java.util.concurrent.TimeUnit.SECONDS;
+
+import org.apache.hadoop.yarn.server.nodemanager.recovery.RecoveryIterator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
@@ -96,16 +97,20 @@ public class DeletionService extends AbstractService {
 
   private void recover(NMStateStoreService.RecoveredDeletionServiceState state)
   throws IOException {
-List taskProtos = state.getTasks();
 Map idToInfoMap =
-new HashMap<>(taskProtos.size());
-Set successorTasks = new HashSet<>();
-for (DeletionServiceDeleteTaskProto proto : taskProtos) {
-  DeletionTaskRecoveryInfo info =
-  NMProtoUtils.convertProtoToDeletionTaskRecoveryInfo(proto, this);
-  idToInfoMap.put(info.getTask().getTaskId(), info);
-  nextTaskId.set(Math.max(nextTaskId.get(), info.getTask().getTaskId()));
-  successorTasks.addAll(info.getSuccessorTaskIds());
+new HashMap();
+Set successorTasks = new HashSet();
+
+try (RecoveryIterator it =
+ state.getIterator()) {
+  while (it.hasNext()) {
+DeletionServiceDeleteTaskProto proto = it.next();
+DeletionTaskRecoveryInfo info =
+NMProtoUtils.convertProtoToDeletionTaskRecoveryInfo(proto, this);
+idToInfoMap.put(info.getTask().getTaskId(), info);
+nextTaskId.set(Math.max(nextTaskId.get(), info.getTask().getTaskId()));
+successorTasks.addAll(info.getSuccessorTaskIds());
+  }
 }
 
 // restore the task dependencies and schedule the deletion tasks that

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65e74697/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/ha

hadoop git commit: YARN-8242. YARN NM: OOM error while reading back the state store on recovery. Contributed by Pradeep Ambati and Kanwaljeet Sachdev

2018-08-20 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d7442c244 -> 44c4928b6


YARN-8242. YARN NM: OOM error while reading back the state store on recovery. 
Contributed by Pradeep Ambati and Kanwaljeet Sachdev

(cherry picked from commit 65e7469712be6cf393e29ef73cc94727eec81227)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44c4928b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44c4928b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44c4928b

Branch: refs/heads/branch-3.1
Commit: 44c4928b64498c76fc5dffe288d9e959960282df
Parents: d7442c2
Author: Jason Lowe 
Authored: Mon Aug 20 10:14:40 2018 -0500
Committer: Jason Lowe 
Committed: Mon Aug 20 10:21:57 2018 -0500

--
 .../server/nodemanager/DeletionService.java |  25 +-
 .../containermanager/ContainerManagerImpl.java  |  26 +-
 .../localizer/ResourceLocalizationService.java  |  56 +--
 .../recovery/NMLeveldbStateStoreService.java| 412 ---
 .../recovery/NMNullStateStoreService.java   |   2 +-
 .../recovery/NMStateStoreService.java   |  55 +--
 .../nodemanager/recovery/RecoveryIterator.java  |  41 ++
 .../security/NMContainerTokenSecretManager.java |  27 +-
 .../security/NMTokenSecretManagerInNM.java  |  15 +-
 .../recovery/NMMemoryStateStoreService.java |  82 +++-
 .../TestNMLeveldbStateStoreService.java | 216 +++---
 11 files changed, 647 insertions(+), 310 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44c4928b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index ae81dc1..e665c5a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -19,13 +19,14 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import static java.util.concurrent.TimeUnit.SECONDS;
+
+import org.apache.hadoop.yarn.server.nodemanager.recovery.RecoveryIterator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
@@ -96,16 +97,20 @@ public class DeletionService extends AbstractService {
 
   private void recover(NMStateStoreService.RecoveredDeletionServiceState state)
   throws IOException {
-List taskProtos = state.getTasks();
 Map idToInfoMap =
-new HashMap<>(taskProtos.size());
-Set successorTasks = new HashSet<>();
-for (DeletionServiceDeleteTaskProto proto : taskProtos) {
-  DeletionTaskRecoveryInfo info =
-  NMProtoUtils.convertProtoToDeletionTaskRecoveryInfo(proto, this);
-  idToInfoMap.put(info.getTask().getTaskId(), info);
-  nextTaskId.set(Math.max(nextTaskId.get(), info.getTask().getTaskId()));
-  successorTasks.addAll(info.getSuccessorTaskIds());
+new HashMap();
+Set successorTasks = new HashSet();
+
+try (RecoveryIterator it =
+ state.getIterator()) {
+  while (it.hasNext()) {
+DeletionServiceDeleteTaskProto proto = it.next();
+DeletionTaskRecoveryInfo info =
+NMProtoUtils.convertProtoToDeletionTaskRecoveryInfo(proto, this);
+idToInfoMap.put(info.getTask().getTaskId(), info);
+nextTaskId.set(Math.max(nextTaskId.get(), info.getTask().getTaskId()));
+successorTasks.addAll(info.getSuccessorTaskIds());
+  }
 }
 
 // restore the task dependencies and schedule the deletion tasks that

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44c4928b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-ya

hadoop git commit: YARN-8673. [AMRMProxy] More robust responseId resync after an YarnRM master slave switch. Contributed by Botong Huang.

2018-08-20 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 65e746971 -> 8736fc39a


YARN-8673. [AMRMProxy] More robust responseId resync after an YarnRM master 
slave switch. Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8736fc39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8736fc39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8736fc39

Branch: refs/heads/trunk
Commit: 8736fc39ac3b3de168d2c216f3d1c0edb48fb3f9
Parents: 65e7469
Author: Giovanni Matteo Fumarola 
Authored: Mon Aug 20 12:22:36 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Mon Aug 20 12:22:36 2018 -0700

--
 .../hadoop/yarn/client/AMRMClientUtils.java |  47 +++
 .../hadoop/yarn/server/AMRMClientRelayer.java   | 130 +--
 .../server/uam/UnmanagedApplicationManager.java |   2 +-
 .../yarn/server/TestAMRMClientRelayer.java  |  53 +++-
 .../amrmproxy/FederationInterceptor.java|   6 +-
 .../ApplicationMasterService.java   |   9 +-
 6 files changed, 190 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8736fc39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
index 5d4ab4a6..b8319cd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -54,6 +55,10 @@ public final class AMRMClientUtils {
   public static final String APP_ALREADY_REGISTERED_MESSAGE =
   "Application Master is already registered : ";
 
+  public static final String EXPECTED_HB_RESPONSEID_MESSAGE =
+  " expect responseId to be ";
+  public static final String RECEIVED_HB_RESPONSEID_MESSAGE = " but get ";
+
   private AMRMClientUtils() {
   }
 
@@ -105,6 +110,48 @@ public final class AMRMClientUtils {
 SaslRpcServer.AuthMethod.TOKEN.toString());
   }
 
+  /**
+   * Generate the exception message when RM receives an AM heartbeat with
+   * invalid responseId.
+   *
+   * @param appAttemptId the app attempt
+   * @param expected the expected responseId value
+   * @param received the received responseId value
+   * @return the assembled exception message
+   */
+  public static String assembleInvalidResponseIdExceptionMessage(
+  ApplicationAttemptId appAttemptId, int expected, int received) {
+return "Invalid responseId in AllocateRequest from application attempt: "
++ appAttemptId + EXPECTED_HB_RESPONSEID_MESSAGE + expected
++ RECEIVED_HB_RESPONSEID_MESSAGE + received;
+  }
+
+  /**
+   * Parse the expected responseId from the exception generated by RM when
+   * processing AM heartbeat.
+   *
+   * @param exceptionMessage the exception message thrown by RM
+   * @return the parsed expected responseId, -1 if failed
+   */
+  public static int parseExpectedResponseIdFromException(
+  String exceptionMessage) {
+if (exceptionMessage == null) {
+  return -1;
+}
+int start = exceptionMessage.indexOf(EXPECTED_HB_RESPONSEID_MESSAGE);
+int end = exceptionMessage.indexOf(RECEIVED_HB_RESPONSEID_MESSAGE);
+if (start == -1 || end == -1) {
+  return -1;
+}
+start += EXPECTED_HB_RESPONSEID_MESSAGE.length();
+
+try {
+  return Integer.parseInt(exceptionMessage.substring(start, end));
+} catch (NumberFormatException ex) {
+  return -1;
+}
+  }
+
   public static void addToOutstandingSchedulingRequests(
   Collection requests,
   Map, List> outstandingSchedRequests) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8736fc39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/had

hadoop git commit: YARN-8581. [AMRMProxy] Add sub-cluster timeout in LocalityMulticastAMRMProxyPolicy. Contributed by Botong Huang.

2018-08-20 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8736fc39a -> e0f6ffdba


YARN-8581. [AMRMProxy] Add sub-cluster timeout in 
LocalityMulticastAMRMProxyPolicy. Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0f6ffdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0f6ffdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0f6ffdb

Branch: refs/heads/trunk
Commit: e0f6ffdbad6f43fd43ec57fb68ebf5275b8b9ba0
Parents: 8736fc3
Author: Giovanni Matteo Fumarola 
Authored: Mon Aug 20 14:33:16 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Mon Aug 20 14:33:16 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  8 +-
 .../yarn/conf/TestYarnConfigurationFields.java  |  2 +
 .../LocalityMulticastAMRMProxyPolicy.java   | 64 --
 .../utils/FederationStateStoreFacade.java   |  9 ++
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 91 +---
 .../utils/FederationPoliciesTestUtil.java   |  7 +-
 6 files changed, 162 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0f6ffdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 78e28f7..148edb9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3209,8 +3209,14 @@ public class YarnConfiguration extends Configuration {
   "org.apache.hadoop.yarn.server.federation.resolver."
   + "DefaultSubClusterResolverImpl";
 
-  public static final String DEFAULT_FEDERATION_POLICY_KEY = "*";
+  // AMRMProxy split-merge timeout for active sub-clusters. We will not route
+  // new asks to expired sub-clusters.
+  public static final String FEDERATION_AMRMPROXY_SUBCLUSTER_TIMEOUT =
+  FEDERATION_PREFIX + "amrmproxy.subcluster.timeout.ms";
+  public static final long DEFAULT_FEDERATION_AMRMPROXY_SUBCLUSTER_TIMEOUT =
+  6; // one minute
 
+  public static final String DEFAULT_FEDERATION_POLICY_KEY = "*";
   public static final String FEDERATION_POLICY_MANAGER = FEDERATION_PREFIX
   + "policy-manager";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0f6ffdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 9249ed4..d63933c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -105,6 +105,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_AMRMPROXY_SUBCLUSTER_TIMEOUT);
 
 // Federation StateStore ZK implementation configs to be ignored
 configurationPropsToSkipCompare.add(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0f6ffdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 1481f34..1ccd61c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn

[04/14] hadoop git commit: HDFS-13799. TestEditLogTailer#testTriggersLogRollsForAllStandbyNN fails due to missing synchronization between rollEditsRpcExecutor and tailerThread shutdown. Contributed by

2018-08-20 Thread arp
HDFS-13799. TestEditLogTailer#testTriggersLogRollsForAllStandbyNN fails due to 
missing synchronization
between rollEditsRpcExecutor and tailerThread shutdown. Contributed 
by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75406990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75406990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75406990

Branch: refs/heads/branch-3.1
Commit: 754069906bda1d961bce9f584d0457ee10db6762
Parents: e4b75ad
Author: Xiao Chen 
Authored: Tue Aug 7 16:11:37 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:51:07 2018 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75406990/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 2003f94..b306b8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -234,7 +234,6 @@ public class EditLogTailer {
   }
   
   public void stop() throws IOException {
-rollEditsRpcExecutor.shutdown();
 tailerThread.setShouldRun(false);
 tailerThread.interrupt();
 try {
@@ -242,6 +241,8 @@ public class EditLogTailer {
 } catch (InterruptedException e) {
   LOG.warn("Edit log tailer thread exited with an exception");
   throw new IOException(e);
+} finally {
+  rollEditsRpcExecutor.shutdown();
 }
   }
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/14] hadoop git commit: HDFS-13217. Audit log all EC policy names during addErasureCodingPolicies. Contributed by liaoyuxiangqin.

2018-08-20 Thread arp
HDFS-13217. Audit log all EC policy names during addErasureCodingPolicies. 
Contributed by liaoyuxiangqin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81fbfe50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81fbfe50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81fbfe50

Branch: refs/heads/branch-3.1
Commit: 81fbfe500e4ac1c57c681db20f620336a072898e
Parents: c15853f
Author: Xiao Chen 
Authored: Wed Aug 15 09:22:24 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:56:47 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81fbfe50/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5eef12b..f6c8c94 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7475,9 +7475,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   ErasureCodingPolicy[] policies, final boolean logRetryCache)
   throws IOException {
 final String operationName = "addErasureCodingPolicies";
-String addECPolicyName = "";
+List addECPolicyNames = new ArrayList<>(policies.length);
 checkOperation(OperationCategory.WRITE);
-List responses = new ArrayList<>();
+List responses =
+new ArrayList<>(policies.length);
 boolean success = false;
 writeLock();
 try {
@@ -7488,7 +7489,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   ErasureCodingPolicy newPolicy =
   FSDirErasureCodingOp.addErasureCodingPolicy(this, policy,
   logRetryCache);
-  addECPolicyName = newPolicy.getName();
+  addECPolicyNames.add(newPolicy.getName());
   responses.add(new AddErasureCodingPolicyResponse(newPolicy));
 } catch (HadoopIllegalArgumentException e) {
   responses.add(new AddErasureCodingPolicyResponse(policy, e));
@@ -7501,7 +7502,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   if (success) {
 getEditLog().logSync();
   }
-  logAuditEvent(success, operationName, addECPolicyName, null, null);
+  logAuditEvent(success, operationName, addECPolicyNames.toString(),
+  null, null);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/14] hadoop git commit: HDFS-13447. Fix Typos - Node Not Chosen. Contributed by Beluga Behr.

2018-08-20 Thread arp
HDFS-13447. Fix Typos - Node Not Chosen. Contributed by Beluga Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/366517ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/366517ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/366517ba

Branch: refs/heads/branch-3.1
Commit: 366517ba994c9a075d220a03ae6b829e66449b55
Parents: a517ee4
Author: Márton Elek 
Authored: Wed Aug 8 17:27:57 2018 +0200
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:52:04 2018 -0700

--
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/366517ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index e2a9c55..ac20e6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -67,11 +67,11 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   .withInitial(() -> new HashMap());
 
   private enum NodeNotChosenReason {
-NOT_IN_SERVICE("the node isn't in service"),
+NOT_IN_SERVICE("the node is not in service"),
 NODE_STALE("the node is stale"),
 NODE_TOO_BUSY("the node is too busy"),
 TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
-NOT_ENOUGH_STORAGE_SPACE("no enough storage space to place the block");
+NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block");
 
 private final String text;
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/14] hadoop git commit: HADOOP-9214. Create a new touch command to allow modifying atime and mtime. Contributed by Hrishikesh Gadre.

2018-08-20 Thread arp
HADOOP-9214. Create a new touch command to allow modifying atime and mtime. 
Contributed by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3712b79b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3712b79b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3712b79b

Branch: refs/heads/branch-3.1
Commit: 3712b79b38754f5e1710d29ffc3bb3576bacf02e
Parents: a630a27
Author: Xiao Chen 
Authored: Fri Aug 17 10:53:22 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:59:14 2018 -0700

--
 .../org/apache/hadoop/fs/shell/FsCommand.java   |   2 +-
 .../java/org/apache/hadoop/fs/shell/Touch.java  |  85 
 .../apache/hadoop/fs/shell/TouchCommands.java   | 198 +++
 .../src/site/markdown/FileSystemShell.md|  32 +++
 .../org/apache/hadoop/fs/TestFsShellTouch.java  | 103 ++
 .../src/test/resources/testConf.xml |  51 +
 6 files changed, 385 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3712b79b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
index 4a13414..784bbf3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
@@ -66,7 +66,7 @@ abstract public class FsCommand extends Command {
 factory.registerCommands(Tail.class);
 factory.registerCommands(Head.class);
 factory.registerCommands(Test.class);
-factory.registerCommands(Touch.class);
+factory.registerCommands(TouchCommands.class);
 factory.registerCommands(Truncate.class);
 factory.registerCommands(SnapshotCommands.class);
 factory.registerCommands(XAttrCommands.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3712b79b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
deleted file mode 100644
index a6c751e..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.shell;
-
-import java.io.IOException;
-import java.util.LinkedList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.PathIOException;
-import org.apache.hadoop.fs.PathIsDirectoryException;
-import org.apache.hadoop.fs.PathNotFoundException;
-
-/**
- * Unix touch like commands
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-
-class Touch extends FsCommand {
-  public static void registerCommands(CommandFactory factory) {
-factory.addClass(Touchz.class, "-touchz");
-  }
-
-  /**
-   * (Re)create zero-length file at the specified path.
-   * This will be replaced by a more UNIX-like touch when files may be
-   * modified.
-   */
-  public static class Touchz extends Touch {
-public static final String NAME = "touchz";
-public static final String USAGE = " ...";
-public static final String DESCRIPTION =
-  "Creates a file of zero length " +
-  "at  with current time as the timestamp of that . " +
-  "An error is returned if the file exists with non-zero length\n";
-
-@Override
-protected void processOptions(LinkedList args) {
-  CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE);
-  cf.parse

[12/14] hadoop git commit: HDFS-10240. Race between close/recoverLease leads to missing block. Contributed by Jinglun, zhouyingchao and Wei-Chiu Chuang.

2018-08-20 Thread arp
HDFS-10240. Race between close/recoverLease leads to missing block. Contributed 
by Jinglun, zhouyingchao and Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86565005
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86565005
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86565005

Branch: refs/heads/branch-3.1
Commit: 865650052b07c8a20d51306202354ac770ed36d5
Parents: 0424715
Author: Wei-Chiu Chuang 
Authored: Thu Aug 16 16:29:38 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:57:40 2018 -0700

--
 .../hdfs/server/blockmanagement/BlockInfo.java  |  4 ++
 .../server/blockmanagement/BlockManager.java|  4 ++
 .../hdfs/server/datanode/BPServiceActor.java|  3 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   | 10 +++
 .../apache/hadoop/hdfs/TestLeaseRecovery2.java  | 65 
 .../hdfs/server/datanode/DataNodeTestUtils.java |  3 +
 6 files changed, 88 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86565005/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 111ade1..43f4f47 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -262,6 +262,10 @@ public abstract class BlockInfo extends Block
 return getBlockUCState().equals(BlockUCState.COMPLETE);
   }
 
+  public boolean isUnderRecovery() {
+return getBlockUCState().equals(BlockUCState.UNDER_RECOVERY);
+  }
+
   public final boolean isCompleteOrCommitted() {
 final BlockUCState state = getBlockUCState();
 return state.equals(BlockUCState.COMPLETE) ||

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86565005/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bac89bf..6ab237f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -971,6 +971,10 @@ public class BlockManager implements BlockStatsMXBean {
   return false; // no blocks in file yet
 if(lastBlock.isComplete())
   return false; // already completed (e.g. by syncBlock)
+if(lastBlock.isUnderRecovery()) {
+  throw new IOException("Commit or complete block " + commitBlock +
+  ", whereas it is under recovery.");
+}
 
 final boolean committed = commitBlock(lastBlock, commitBlock);
 if (committed && lastBlock.isStriped()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86565005/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index a94d2df..6c167f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -682,7 +682,8 @@ class BPServiceActor implements Runnable {
 }
   }
 }
-if (ibrManager.sendImmediately() || sendHeartbeat) {
+if (!dn.areIBRDisabledForTests() &&
+(ibrManager.sendImmediately()|| sendHeartbeat)) {
   ibrManager.sendIBRs(bpNamenode, bpRegistration,
   bpos.getBlockPoolId());
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86565005/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b

[09/14] hadoop git commit: HDFS-13819. TestDirectoryScanner#testDirectoryScannerInFederatedCluster is flaky

2018-08-20 Thread arp
HDFS-13819. TestDirectoryScanner#testDirectoryScannerInFederatedCluster is flaky

Change-Id: I1cea6e67fcec72702ad202775dee3373261ac5cd


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c15853f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c15853f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c15853f8

Branch: refs/heads/branch-3.1
Commit: c15853f87ae1ce4a474494f88407786b3a644068
Parents: 975d606
Author: Daniel Templeton 
Authored: Tue Aug 14 17:03:10 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:54:58 2018 -0700

--
 .../server/datanode/TestDirectoryScanner.java   | 42 +++-
 1 file changed, 32 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c15853f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index f792523..893fe20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -40,6 +40,7 @@ import java.util.Random;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.io.FileUtils;
@@ -312,18 +313,29 @@ public class TestDirectoryScanner {
 return id;
   }
 
-  private void scan(long totalBlocks, int diffsize, long missingMetaFile, long 
missingBlockFile,
-  long missingMemoryBlocks, long mismatchBlocks) throws IOException {
+  private void scan(long totalBlocks, int diffsize, long missingMetaFile,
+  long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks)
+  throws IOException, InterruptedException, TimeoutException {
 scan(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
  missingMemoryBlocks, mismatchBlocks, 0);
   }
 
   private void scan(long totalBlocks, int diffsize, long missingMetaFile,
   long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
-  long duplicateBlocks) throws IOException {
+  long duplicateBlocks)
+  throws IOException, InterruptedException, TimeoutException {
 scanner.reconcile();
-verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
-missingMemoryBlocks, mismatchBlocks, duplicateBlocks);
+
+GenericTestUtils.waitFor(() -> {
+  try {
+verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
+missingMemoryBlocks, mismatchBlocks, duplicateBlocks);
+  } catch (AssertionError ex) {
+return false;
+  }
+
+  return true;
+}, 50, 2000);
   }
 
   private void verifyStats(long totalBlocks, int diffsize, long 
missingMetaFile,
@@ -785,7 +797,8 @@ public class TestDirectoryScanner {
 }
   }
 
-  private float runThrottleTest(int blocks) throws IOException {
+  private float runThrottleTest(int blocks)
+  throws IOException, InterruptedException, TimeoutException {
 scanner.setRetainDiffs(true);
 scan(blocks, 0, 0, 0, 0, 0);
 scanner.shutdown();
@@ -1069,10 +1082,19 @@ public class TestDirectoryScanner {
   scanner.setRetainDiffs(true);
   scanner.reconcile();
   //Check blocks in corresponding BP
-  bpid = cluster.getNamesystem(1).getBlockPoolId();
-  verifyStats(bp1Files, 0, 0, 0, 0, 0, 0);
-  bpid = cluster.getNamesystem(3).getBlockPoolId();
-  verifyStats(bp2Files, 0, 0, 0, 0, 0, 0);
+
+  GenericTestUtils.waitFor(() -> {
+try {
+  bpid = cluster.getNamesystem(1).getBlockPoolId();
+  verifyStats(bp1Files, 0, 0, 0, 0, 0, 0);
+  bpid = cluster.getNamesystem(3).getBlockPoolId();
+  verifyStats(bp2Files, 0, 0, 0, 0, 0, 0);
+} catch (AssertionError ex) {
+  return false;
+}
+
+return true;
+  }, 50, 2000);
 } finally {
   if (scanner != null) {
 scanner.shutdown();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/14] hadoop git commit: HDFS-13668. FSPermissionChecker may throws AIOOE when check inode permission. Contributed by He Xiaoqiao.

2018-08-20 Thread arp
HDFS-13668. FSPermissionChecker may throws AIOOE when check inode permission. 
Contributed by He Xiaoqiao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/975d6068
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/975d6068
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/975d6068

Branch: refs/heads/branch-3.1
Commit: 975d60685eaf9961bdbd3547600b3e38bb088835
Parents: c0ac0a5
Author: drankye 
Authored: Mon Aug 13 17:32:56 2018 +0800
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:53:32 2018 -0700

--
 .../server/namenode/FSPermissionChecker.java|  2 +-
 .../namenode/TestINodeAttributeProvider.java| 43 ++--
 2 files changed, 41 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/975d6068/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 354b4e3..f70963c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -409,7 +409,7 @@ public class FSPermissionChecker implements 
AccessControlEnforcer {
 }
 final FsPermission mode = inode.getFsPermission();
 final AclFeature aclFeature = inode.getAclFeature();
-if (aclFeature != null) {
+if (aclFeature != null && aclFeature.getEntriesSize() > 0) {
   // It's possible that the inode has a default ACL but no access ACL.
   int firstEntry = aclFeature.getEntryAt(0);
   if (AclEntryStatusFormat.getScope(firstEntry) == AclEntryScope.ACCESS) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/975d6068/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index 9c7dcd3..b3bab06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -57,6 +57,11 @@ public class TestINodeAttributeProvider {
   public static class MyAuthorizationProvider extends INodeAttributeProvider {
 
 public static class MyAccessControlEnforcer implements 
AccessControlEnforcer {
+  AccessControlEnforcer ace;
+
+  public MyAccessControlEnforcer(AccessControlEnforcer defaultEnforcer) {
+this.ace = defaultEnforcer;
+  }
 
   @Override
   public void checkPermission(String fsOwner, String supergroup,
@@ -65,6 +70,13 @@ public class TestINodeAttributeProvider {
   int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
   FsAction parentAccess, FsAction access, FsAction subAccess,
   boolean ignoreEmptyDir) throws AccessControlException {
+if (ancestorIndex > 1
+&& inodes[1].getLocalName().equals("user")
+&& inodes[2].getLocalName().equals("acl")) {
+  this.ace.checkPermission(fsOwner, supergroup, ugi, inodeAttrs, 
inodes,
+  pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner,
+  ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
+}
 CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + 
"|" + access);
   }
 }
@@ -84,6 +96,7 @@ public class TestINodeAttributeProvider {
 final INodeAttributes inode) {
   CALLED.add("getAttributes");
   final boolean useDefault = useDefault(pathElements);
+  final boolean useNullAcl = useNullAclFeature(pathElements);
   return new INodeAttributes() {
 @Override
 public boolean isDirectory() {
@@ -126,7 +139,10 @@ public class TestINodeAttributeProvider {
 @Override
 public AclFeature getAclFeature() {
   AclFeature f;
-  if (useDefault) {
+  if (useNullAcl) {
+int[] entries = new int[0];
+f = new AclFeature(entries);
+  } else if (useDefault) {
 f = inode.getAclFeature();
   } else {
 AclEntry acl = new

[07/14] hadoop git commit: HDFS-13658. Expose HighestPriorityLowRedundancy blocks statistics. Contributed by Kitti Nanasi.

2018-08-20 Thread arp
HDFS-13658. Expose HighestPriorityLowRedundancy blocks statistics. Contributed 
by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0ac0a53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0ac0a53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0ac0a53

Branch: refs/heads/branch-3.1
Commit: c0ac0a533701a56b09b302a660c3233971512168
Parents: 366517b
Author: Xiao Chen 
Authored: Wed Aug 8 10:36:44 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:52:28 2018 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md  |  2 +
 .../hadoop/hdfs/protocol/ECBlockGroupStats.java | 27 +++-
 .../hdfs/protocol/ReplicatedBlockStats.java | 28 -
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 21 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  3 ++
 .../federation/metrics/NamenodeBeanMetrics.java | 10 +
 .../server/federation/router/ErasureCoding.java | 13 ++
 .../server/blockmanagement/BlockManager.java|  8 
 .../blockmanagement/LowRedundancyBlocks.java| 28 +
 .../hdfs/server/namenode/FSNamesystem.java  | 20 -
 .../hdfs/server/namenode/NameNodeMXBean.java| 18 
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 10 +
 .../TestLowRedundancyBlockQueues.java   | 43 +---
 .../namenode/metrics/TestNameNodeMetrics.java   | 12 ++
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 32 +++
 15 files changed, 247 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index f1dbc50..4d59c6e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -240,6 +240,8 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed 
heartbeat |
 | `NumStaleStorages` | Number of storages marked as content stale (after 
NameNode restart/failover before first block report is received) |
 | `MissingReplOneBlocks` | Current number of missing blocks with replication 
factor 1 |
+| `HighestPriorityLowRedundancyReplicatedBlocks` | Current number of 
non-corrupt, low redundancy replicated blocks with the highest risk of loss 
(have 0 or 1 replica). Will be recovered with the highest priority. |
+| `HighestPriorityLowRedundancyECBlocks` | Current number of non-corrupt, low 
redundancy EC blocks with the highest risk of loss. Will be recovered with the 
highest priority. |
 | `NumFilesUnderConstruction` | Current number of files under construction |
 | `NumActiveClients` | Current number of active clients holding lease |
 | `HAState` | (HA-only) Current state of the NameNode: initializing or active 
or standby or stopping state |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
index 9a8ad8c..3dde604 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
@@ -34,15 +34,26 @@ public final class ECBlockGroupStats {
   private final long missingBlockGroups;
   private final long bytesInFutureBlockGroups;
   private final long pendingDeletionBlocks;
+  private final Long highestPriorityLowRedundancyBlocks;
 
   public ECBlockGroupStats(long lowRedundancyBlockGroups,
   long corruptBlockGroups, long missingBlockGroups,
   long bytesInFutureBlockGroups, long pendingDeletionBlocks) {
+this(lowRedundancyBlockGroups, corruptBlockGroups, missingBlockGroups,
+bytesInFutureBlockGroups, pendingDeletionBlocks, null);
+  }
+
+  public ECBlockGroupStats(long lowRedundancyBlockGroups,
+  long corruptBlockGroups, long missingBlockGroups,
+  long bytesInFutureBlockGroups, long pendingDeletionBlocks,
+  Long highestPriorityLowRedundancyBlocks) {
 this.lowRedundancyBlockGroups = lowRedundancyBlockGroups;
 this.corruptBlockGroups = corruptBlockGrou

[13/14] hadoop git commit: HADOOP-15655. Enhance KMS client retry behavior. Contributed by Kitti Nanasi.

2018-08-20 Thread arp
HADOOP-15655. Enhance KMS client retry behavior. Contributed by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a630a27c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a630a27c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a630a27c

Branch: refs/heads/branch-3.1
Commit: a630a27c53107322a72f9b76e395c4537b09c3fc
Parents: 8656500
Author: Xiao Chen 
Authored: Thu Aug 16 22:32:32 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:57:51 2018 -0700

--
 .../key/kms/LoadBalancingKMSClientProvider.java |  43 ++---
 .../kms/TestLoadBalancingKMSClientProvider.java | 181 ++-
 2 files changed, 193 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a630a27c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index 9677b0d..e0ffdb1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -113,8 +113,8 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 return providers;
   }
 
-  private  T doOp(ProviderCallable op, int currPos)
-  throws IOException {
+  private  T doOp(ProviderCallable op, int currPos,
+  boolean isIdempotent) throws IOException {
 if (providers.length == 0) {
   throw new IOException("No providers configured !");
 }
@@ -143,7 +143,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 }
 RetryAction action = null;
 try {
-  action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false);
+  action = retryPolicy.shouldRetry(ioe, 0, numFailovers, isIdempotent);
 } catch (Exception e) {
   if (e instanceof IOException) {
 throw (IOException)e;
@@ -201,7 +201,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   public Token[] call(KMSClientProvider provider) throws IOException {
 return provider.addDelegationTokens(renewer, credentials);
   }
-}, nextIdx());
+}, nextIdx(), false);
   }
 
   @Override
@@ -211,7 +211,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   public Long call(KMSClientProvider provider) throws IOException {
 return provider.renewDelegationToken(token);
   }
-}, nextIdx());
+}, nextIdx(), false);
   }
 
   @Override
@@ -222,7 +222,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 provider.cancelDelegationToken(token);
 return null;
   }
-}, nextIdx());
+}, nextIdx(), false);
   }
 
   // This request is sent to all providers in the load-balancing group
@@ -275,7 +275,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 throws IOException, GeneralSecurityException {
   return provider.generateEncryptedKey(encryptionKeyName);
 }
-  }, nextIdx());
+  }, nextIdx(), true);
 } catch (WrapperException we) {
   if (we.getCause() instanceof GeneralSecurityException) {
 throw (GeneralSecurityException) we.getCause();
@@ -295,7 +295,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 throws IOException, GeneralSecurityException {
   return provider.decryptEncryptedKey(encryptedKeyVersion);
 }
-  }, nextIdx());
+  }, nextIdx(), true);
 } catch (WrapperException we) {
   if (we.getCause() instanceof GeneralSecurityException) {
 throw (GeneralSecurityException) we.getCause();
@@ -315,7 +315,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 throws IOException, GeneralSecurityException {
   return provider.reencryptEncryptedKey(ekv);
 }
-  }, nextIdx());
+  }, nextIdx(), true);
 } catch (WrapperException we) {
   if (we.getCause() instanceof GeneralSecurityException) {
 throw (GeneralSecurityException) we.getCause();
@@ -335,7 +335,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   provider.reencryptEncryptedKeys(ekvs);
   return null;
 }
-  }, nextIdx());
+  }, nextIdx(), true);
 } catch

[03/14] hadoop git commit: HDFS-13792. Fix FSN read/write lock metrics name. Contributed by Chao Sun.

2018-08-20 Thread arp
HDFS-13792. Fix FSN read/write lock metrics name. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4b75ad5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4b75ad5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4b75ad5

Branch: refs/heads/branch-3.1
Commit: e4b75ad583fe7d78a529288992f3721f29213f89
Parents: 0d155de
Author: Yiqun Lin 
Authored: Tue Aug 7 09:32:51 2018 +0800
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:50:47 2018 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4b75ad5/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 676ab0b..f1dbc50 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -252,8 +252,10 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `NumInMaintenanceLiveDataNodes` | Number of live Datanodes which are in 
maintenance state |
 | `NumInMaintenanceDeadDataNodes` | Number of dead Datanodes which are in 
maintenance state |
 | `NumEnteringMaintenanceDataNodes` | Number of Datanodes that are entering 
the maintenance state |
-| `FSN(Read/Write)Lock`*OperationName*`NumOps` | Total number of acquiring 
lock by operations |
-| `FSN(Read/Write)Lock`*OperationName*`AvgTime` | Average time of holding the 
lock by operations in milliseconds |
+| `FSN(Read/Write)Lock`*OperationName*`NanosNumOps` | Total number of 
acquiring lock by operations |
+| `FSN(Read/Write)Lock`*OperationName*`NanosAvgTime` | Average time of holding 
the lock by operations in nanoseconds |
+| `FSN(Read/Write)LockOverallNanosNumOps`  | Total number of acquiring lock by 
all operations |
+| `FSN(Read/Write)LockOverallNanosAvgTime` | Average time of holding the lock 
by all operations in nanoseconds |
 
 JournalNode
 ---


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/14] hadoop git commit: HADOOP-14212. Expose SecurityEnabled boolean field in JMX for other services besides NameNode. Contributed by Adam Antal.

2018-08-20 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 44c4928b6 -> 3712b79b3


HADOOP-14212. Expose SecurityEnabled boolean field in JMX for other services 
besides NameNode. Contributed by Adam Antal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78fb14ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78fb14ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78fb14ba

Branch: refs/heads/branch-3.1
Commit: 78fb14ba4908cc9222bc5c1239f63f7c203564a0
Parents: 44c4928
Author: Wei-Chiu Chuang 
Authored: Tue Aug 14 17:19:00 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:49:24 2018 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |  5 ++
 .../hdfs/server/datanode/DataNodeMXBean.java|  7 +++
 .../hdfs/server/namenode/SecondaryNameNode.java |  5 ++
 .../namenode/SecondaryNameNodeInfoMXBean.java   |  7 +++
 .../server/datanode/TestDataNodeMXBean.java | 47 +-
 .../server/namenode/TestSecureNameNode.java | 52 +++-
 .../yarn/server/nodemanager/NodeManager.java| 20 +++-
 .../server/resourcemanager/ResourceManager.java | 18 ++-
 8 files changed, 156 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 1e9c57a..4823358 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3142,6 +3142,11 @@ public class DataNode extends ReconfigurableBase
 }
   }
 
+  @Override
+  public boolean isSecurityEnabled() {
+return UserGroupInformation.isSecurityEnabled();
+  }
+
   public void refreshNamenodes(Configuration conf) throws IOException {
 blockPoolManager.refreshNamenodes(conf);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
index b5f0cd0..9d11e14 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
@@ -146,4 +146,11 @@ public interface DataNodeMXBean {
* @return list of slow disks
*/
   String getSlowDisks();
+
+  /**
+   * Gets if security is enabled.
+   *
+   * @return true, if security is enabled.
+   */
+  boolean isSecurityEnabled();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index ff83e34..4d7b747 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -722,6 +722,11 @@ public class SecondaryNameNode implements Runnable,
 return NetUtils.getHostPortString(nameNodeAddr);
   }
 
+  @Override
+  public boolean isSecurityEnabled() {
+return UserGroupInformation.isSecurityEnabled();
+  }
+
   @Override // SecondaryNameNodeInfoMXBean
   public long getStartTime() {
 return starttime;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNodeInfoMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNodeInfoMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNodeInfoMXBean.java
index 785c5ee..a042dc2

[05/14] hadoop git commit: HDFS-13785. EC: 'removePolicy' is not working for built-in/system Erasure Code policies. Contributed by Ayush Saxena

2018-08-20 Thread arp
HDFS-13785. EC: 'removePolicy' is not working for built-in/system Erasure Code 
policies. Contributed by Ayush Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a517ee4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a517ee4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a517ee4f

Branch: refs/heads/branch-3.1
Commit: a517ee4f5be39828bf18795462457fdf1bb0db00
Parents: 7540699
Author: Vinayakumar B 
Authored: Wed Aug 8 12:42:20 2018 +0530
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:51:53 2018 -0700

--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  4 ++--
 .../src/site/markdown/HDFSErasureCoding.md  |  4 ++--
 .../test/resources/testErasureCodingConf.xml| 22 +++-
 3 files changed, 25 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a517ee4f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 9b9fe14..56706b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -154,7 +154,7 @@ public class ECAdmin extends Configured implements Tool {
   listing.addRow("",
   "The path of the xml file which defines the EC policies to add");
   return getShortUsage() + "\n" +
-  "Add a list of erasure coding policies.\n" +
+  "Add a list of user defined erasure coding policies.\n" +
   listing.toString();
 }
 
@@ -268,7 +268,7 @@ public class ECAdmin extends Configured implements Tool {
   TableListing listing = AdminHelper.getOptionDescriptionListing();
   listing.addRow("", "The name of the erasure coding policy");
   return getShortUsage() + "\n" +
-  "Remove an erasure coding policy.\n" +
+  "Remove an user defined erasure coding policy.\n" +
   listing.toString();
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a517ee4f/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index f3b920f..2e8cbbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -203,7 +203,7 @@ Below are the details about each command.
 
  *  `[-addPolicies -policyFile ]`
 
- Add a list of erasure coding policies. Please refer 
etc/hadoop/user_ec_policies.xml.template for the example policy file. The 
maximum cell size is defined in property 
'dfs.namenode.ec.policies.max.cellsize' with the default value 4MB. Currently 
HDFS allows the user to add 64 policies in total, and the added policy ID is in 
range of 64 to 127. Adding policy will fail if there are already 64 policies 
added.
+ Add a list of user defined erasure coding policies. Please refer 
etc/hadoop/user_ec_policies.xml.template for the example policy file. The 
maximum cell size is defined in property 
'dfs.namenode.ec.policies.max.cellsize' with the default value 4MB. Currently 
HDFS allows the user to add 64 policies in total, and the added policy ID is in 
range of 64 to 127. Adding policy will fail if there are already 64 policies 
added.
 
  *  `[-listCodecs]`
 
@@ -211,7 +211,7 @@ Below are the details about each command.
 
 *  `[-removePolicy -policy ]`
 
- Remove an erasure coding policy.
+ Remove an user defined erasure coding policy.
 
 *  `[-enablePolicy -policy ]`
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a517ee4f/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 2f7a6a7..9070367 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -154,7 +154,7 @@
   
 
   SubstringComparator
-  Add a list of erasure coding 
policies
+  Add a list of user defined erasure coding 
policies
 
 
   SubstringComparator
@@ -164,6 

[11/14] hadoop git commit: HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel.

2018-08-20 Thread arp
HDFS-13732. ECAdmin should print the policy name when an EC policy is set. 
Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04247152
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04247152
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04247152

Branch: refs/heads/branch-3.1
Commit: 0424715207cd07debeee5c624973e9db90d36fb6
Parents: 81fbfe5
Author: Xiao Chen 
Authored: Wed Aug 15 13:51:14 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:57:07 2018 -0700

--
 .../main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 12 ++--
 .../src/test/resources/testErasureCodingConf.xml|  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04247152/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 56706b2..56d453b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -357,16 +357,16 @@ public class ECAdmin extends Configured implements Tool {
   final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
-if (ecPolicyName == null){
-  ecPolicyName = "default";
-}
-System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
-" " + path);
+
+String actualECPolicyName = dfs.getErasureCodingPolicy(p).getName();
+
+System.out.println("Set " + actualECPolicyName +
+" erasure coding policy on "+ path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
   "non-empty directory will not automatically convert existing " +
-  "files to " + ecPolicyName + " erasure coding policy");
+  "files to " + actualECPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04247152/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 9070367..b47d50f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -710,7 +710,7 @@
   
 
   SubstringComparator
-  Set default erasure coding policy on 
/ecdir
+  Set RS-6-3-1024k erasure coding policy on 
/ecdir
 
   
 
@@ -728,11 +728,11 @@
   
 
   SubstringComparator
-  Set default erasure coding policy on 
/ecdir
+  Set RS-6-3-1024k erasure coding policy on 
/ecdir
 
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/14] hadoop git commit: HADOOP-14212. Addendum patch: Expose SecurityEnabled boolean field in JMX for other services besides NameNode. Contributed by Adam Antal.

2018-08-20 Thread arp
HADOOP-14212. Addendum patch: Expose SecurityEnabled boolean field in JMX for 
other services besides NameNode. Contributed by Adam Antal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d155de1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d155de1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d155de1

Branch: refs/heads/branch-3.1
Commit: 0d155de1591781ab374803ad79c12cf1e91f1692
Parents: 78fb14b
Author: Wei-Chiu Chuang 
Authored: Tue Aug 14 18:24:32 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 20 14:49:28 2018 -0700

--
 .../server/nodemanager/NodeManagerMXBean.java   | 38 +
 .../nodemanager/TestNodeManagerMXBean.java  | 56 
 .../resourcemanager/ResourceManagerMXBean.java  | 38 +
 .../TestResourceManagerMXBean.java  | 56 
 4 files changed, 188 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d155de1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerMXBean.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerMXBean.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerMXBean.java
new file mode 100644
index 000..b4ab0aa
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerMXBean.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This is the JMX management interface for NodeManager.
+ * End users shouldn't be implementing these interfaces, and instead
+ * access this information through the JMX APIs.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface NodeManagerMXBean {
+  /**
+   * Gets if security is enabled.
+   *
+   * @return true, if security is enabled.
+   * */
+  boolean isSecurityEnabled();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d155de1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
new file mode 100644
index 000..80b915c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli

[1/2] hadoop git commit: HADOOP-15679. ShutdownHookManager shutdown time needs to be configurable & extended. Contributed by Steve Loughran.

2018-08-20 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 3712b79b3 -> 95fcdc043
  refs/heads/trunk e0f6ffdba -> 34577d2c2


HADOOP-15679. ShutdownHookManager shutdown time needs to be configurable & 
extended.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34577d2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34577d2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34577d2c

Branch: refs/heads/trunk
Commit: 34577d2c21096046861d2deefdbb2638b411c687
Parents: e0f6ffd
Author: Steve Loughran 
Authored: Mon Aug 20 18:36:24 2018 -0700
Committer: Steve Loughran 
Committed: Mon Aug 20 18:36:24 2018 -0700

--
 .../fs/CommonConfigurationKeysPublic.java   |   9 +
 .../apache/hadoop/util/ShutdownHookManager.java | 169 --
 .../src/main/resources/core-default.xml |  16 +
 .../hadoop/util/TestShutdownHookManager.java| 328 +++
 4 files changed, 418 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34577d2c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index c7f32f9..b101b3b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -905,5 +905,14 @@ public class CommonConfigurationKeysPublic {
 
   public static final String HADOOP_TAGS_SYSTEM = "hadoop.tags.system";
   public static final String HADOOP_TAGS_CUSTOM = "hadoop.tags.custom";
+
+  /** Configuration option for the shutdown hook manager shutdown time:
+   *  {@value}. */
+  public static final String SERVICE_SHUTDOWN_TIMEOUT =
+  "hadoop.service.shutdown.timeout";
+
+  /** Default shutdown hook timeout: {@value} seconds. */
+  public static final long SERVICE_SHUTDOWN_TIMEOUT_DEFAULT = 30;
+
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34577d2c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
index 153f92b..2ca8e55 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
@@ -17,11 +17,17 @@
  */
 package org.apache.hadoop.util;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
@@ -34,6 +40,9 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.SERVICE_SHUTDOWN_TIMEOUT;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.SERVICE_SHUTDOWN_TIMEOUT_DEFAULT;
+
 /**
  * The ShutdownHookManager enables running shutdownHook
  * in a deterministic order, higher priority first.
@@ -42,53 +51,55 @@ import java.util.concurrent.atomic.AtomicBoolean;
  * This class registers a single JVM shutdownHook and run all the
  * shutdownHooks registered to it (to this class) in order based on their
  * priority.
+ *
+ * Unless a hook was registered with a shutdown explicitly set through
+ * {@link #addShutdownHook(Runnable, int, long, TimeUnit)},
+ * the shutdown time allocated to it is set by the configuration option
+ * {@link CommonConfigurationKeysPublic#SERVICE_SHUTDOWN_TIMEOUT} in
+ * {@code core-site.xml}, with a default value of
+ * {@link CommonConfigurationKeysPublic#SERVICE_SHUTDOWN_TIMEOUT_DEFAULT}
+ * seconds.
  */
-public class ShutdownHookManager {
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class ShutdownHo

[2/2] hadoop git commit: HADOOP-15679. ShutdownHookManager shutdown time needs to be configurable & extended. Contributed by Steve Loughran.

2018-08-20 Thread stevel
HADOOP-15679. ShutdownHookManager shutdown time needs to be configurable & 
extended. Contributed by Steve Loughran.

Change-Id: Ifd4a6e3f796b4dc88e97f63066289e2534c77a29


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95fcdc04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95fcdc04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95fcdc04

Branch: refs/heads/branch-3.1
Commit: 95fcdc04355920697337db1136ffe463d4794616
Parents: 3712b79
Author: Steve Loughran 
Authored: Mon Aug 20 18:44:14 2018 -0700
Committer: Steve Loughran 
Committed: Mon Aug 20 18:48:49 2018 -0700

--
 .../fs/CommonConfigurationKeysPublic.java   |   9 +
 .../apache/hadoop/util/ShutdownHookManager.java | 169 --
 .../src/main/resources/core-default.xml |  16 +
 .../hadoop/util/TestShutdownHookManager.java| 328 +++
 4 files changed, 418 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95fcdc04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 132c9bf..7a91993 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -890,5 +890,14 @@ public class CommonConfigurationKeysPublic {
   HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS);
   public static final String HADOOP_SYSTEM_TAGS = "hadoop.system.tags";
   public static final String HADOOP_CUSTOM_TAGS = "hadoop.custom.tags";
+
+  /** Configuration option for the shutdown hook manager shutdown time:
+   *  {@value}. */
+  public static final String SERVICE_SHUTDOWN_TIMEOUT =
+  "hadoop.service.shutdown.timeout";
+
+  /** Default shutdown hook timeout: {@value} seconds. */
+  public static final long SERVICE_SHUTDOWN_TIMEOUT_DEFAULT = 30;
+
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95fcdc04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
index 153f92b..2ca8e55 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
@@ -17,11 +17,17 @@
  */
 package org.apache.hadoop.util;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
@@ -34,6 +40,9 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.SERVICE_SHUTDOWN_TIMEOUT;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.SERVICE_SHUTDOWN_TIMEOUT_DEFAULT;
+
 /**
  * The ShutdownHookManager enables running shutdownHook
  * in a deterministic order, higher priority first.
@@ -42,53 +51,55 @@ import java.util.concurrent.atomic.AtomicBoolean;
  * This class registers a single JVM shutdownHook and run all the
  * shutdownHooks registered to it (to this class) in order based on their
  * priority.
+ *
+ * Unless a hook was registered with a shutdown explicitly set through
+ * {@link #addShutdownHook(Runnable, int, long, TimeUnit)},
+ * the shutdown time allocated to it is set by the configuration option
+ * {@link CommonConfigurationKeysPublic#SERVICE_SHUTDOWN_TIMEOUT} in
+ * {@code core-site.xml}, with a default value of
+ * {@link CommonConfigurationKeysPublic#SERVICE_SHUTDOWN_TIMEOUT_DEFAULT}
+ * seconds.
  */
-public class ShutdownHookManager {
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class ShutdownHookManager {
 
   pri

hadoop git commit: HADOOP-15679. ShutdownHookManager shutdown time needs to be configurable & extended. Contributed by Steve Loughran.

2018-08-20 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 5e2e68d4b -> 53ce37ad2


HADOOP-15679. ShutdownHookManager shutdown time needs to be configurable & 
extended. Contributed by Steve Loughran.

Change-Id: Ifd4a6e3f796b4dc88e97f63066289e2534c77a29
(cherry picked from commit 95fcdc04355920697337db1136ffe463d4794616)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53ce37ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53ce37ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53ce37ad

Branch: refs/heads/branch-3.0
Commit: 53ce37ad29b4fe83a5e73a4fd685348e8d13e762
Parents: 5e2e68d
Author: Steve Loughran 
Authored: Mon Aug 20 19:18:37 2018 -0700
Committer: Steve Loughran 
Committed: Mon Aug 20 19:18:37 2018 -0700

--
 .../fs/CommonConfigurationKeysPublic.java   |   9 +
 .../apache/hadoop/util/ShutdownHookManager.java | 169 --
 .../src/main/resources/core-default.xml |  16 +
 .../hadoop/util/TestShutdownHookManager.java| 328 +++
 4 files changed, 418 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53ce37ad/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 63803c9..30d2f31 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -888,5 +888,14 @@ public class CommonConfigurationKeysPublic {
   "credential$",
   "oauth.*token$",
   HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS);
+
+  /** Configuration option for the shutdown hook manager shutdown time:
+   *  {@value}. */
+  public static final String SERVICE_SHUTDOWN_TIMEOUT =
+  "hadoop.service.shutdown.timeout";
+
+  /** Default shutdown hook timeout: {@value} seconds. */
+  public static final long SERVICE_SHUTDOWN_TIMEOUT_DEFAULT = 30;
+
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53ce37ad/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
index 153f92b..2ca8e55 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
@@ -17,11 +17,17 @@
  */
 package org.apache.hadoop.util;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
@@ -34,6 +40,9 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.SERVICE_SHUTDOWN_TIMEOUT;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.SERVICE_SHUTDOWN_TIMEOUT_DEFAULT;
+
 /**
  * The ShutdownHookManager enables running shutdownHook
  * in a deterministic order, higher priority first.
@@ -42,53 +51,55 @@ import java.util.concurrent.atomic.AtomicBoolean;
  * This class registers a single JVM shutdownHook and run all the
  * shutdownHooks registered to it (to this class) in order based on their
  * priority.
+ *
+ * Unless a hook was registered with a shutdown explicitly set through
+ * {@link #addShutdownHook(Runnable, int, long, TimeUnit)},
+ * the shutdown time allocated to it is set by the configuration option
+ * {@link CommonConfigurationKeysPublic#SERVICE_SHUTDOWN_TIMEOUT} in
+ * {@code core-site.xml}, with a default value of
+ * {@link CommonConfigurationKeysPublic#SERVICE_SHUTDOWN_TIMEOUT_DEFAULT}
+ * seconds.
  */
-public class ShutdownHookManager {
+@InterfaceAudience.Public
+@InterfaceStabil

[2/2] hadoop git commit: HDFS-13772. Erasure coding: Unnecessary NameNode Logs displaying for Enabling/Disabling Erasure coding policies which are already enabled/disabled. Contributed by Ayush Saxena

2018-08-20 Thread vinayakumarb
HDFS-13772. Erasure coding: Unnecessary NameNode Logs displaying for 
Enabling/Disabling Erasure coding policies which are already enabled/disabled. 
Contributed by Ayush Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8df2eb81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8df2eb81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8df2eb81

Branch: refs/heads/branch-3.1
Commit: 8df2eb8119188b8e5515295523afc23046e1db81
Parents: 95fcdc0
Author: Vinayakumar B 
Authored: Tue Aug 21 09:33:19 2018 +0530
Committer: Vinayakumar B 
Committed: Tue Aug 21 09:34:57 2018 +0530

--
 .../namenode/ErasureCodingPolicyManager.java| 15 +++
 .../server/namenode/FSDirErasureCodingOp.java   | 22 -
 .../hdfs/server/namenode/FSNamesystem.java  | 26 +++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 +++---
 .../server/namenode/TestNamenodeRetryCache.java |  2 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |  2 +-
 6 files changed, 47 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8df2eb81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 3a310da..e7de05b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -356,7 +356,7 @@ public final class ErasureCodingPolicyManager {
   /**
* Disable an erasure coding policy by policyName.
*/
-  public synchronized void disablePolicy(String name) {
+  public synchronized boolean disablePolicy(String name) {
 ErasureCodingPolicyInfo info = policiesByName.get(name);
 if (info == null) {
   throw new HadoopIllegalArgumentException("The policy name " +
@@ -367,27 +367,32 @@ public final class ErasureCodingPolicyManager {
   enabledPoliciesByName.remove(name);
   enabledPolicies =
   enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
+  info.setState(ErasureCodingPolicyState.DISABLED);
+  LOG.info("Disable the erasure coding policy " + name);
+  return true;
 }
-info.setState(ErasureCodingPolicyState.DISABLED);
-LOG.info("Disable the erasure coding policy " + name);
+return false;
   }
 
   /**
* Enable an erasure coding policy by policyName.
*/
-  public synchronized void enablePolicy(String name) {
+  public synchronized boolean enablePolicy(String name) {
 final ErasureCodingPolicyInfo info = policiesByName.get(name);
 if (info == null) {
   throw new HadoopIllegalArgumentException("The policy name " +
   name + " does not exist");
 }
-
+if (enabledPoliciesByName.containsKey(name)) {
+  return false;
+}
 final ErasureCodingPolicy ecPolicy = info.getPolicy();
 enabledPoliciesByName.put(name, ecPolicy);
 info.setState(ErasureCodingPolicyState.ENABLED);
 enabledPolicies =
 enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
 LOG.info("Enable the erasure coding policy " + name);
+return true;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8df2eb81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 3a32db4..f6a4093 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -252,11 +252,16 @@ final class FSDirErasureCodingOp {
*  rebuilding
* @throws IOException
*/
-  static void enableErasureCodingPolicy(final FSNamesystem fsn,
+  static boolean enableErasureCodingPolicy(final FSNamesystem fsn,
   String ecPolicyName, final boolean logRetryCache) throws IOException {
 Preconditions.checkNotNull(ecPolicyName);
-fsn.getErasureCodingPolicyManager().enablePolicy(ecPolicyName);
-fsn.getEditLog().lo

[1/2] hadoop git commit: HDFS-13772. Erasure coding: Unnecessary NameNode Logs displaying for Enabling/Disabling Erasure coding policies which are already enabled/disabled. Contributed by Ayush Saxena

2018-08-20 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 95fcdc043 -> 8df2eb811
  refs/heads/trunk 34577d2c2 -> 770d9d9bb


HDFS-13772. Erasure coding: Unnecessary NameNode Logs displaying for 
Enabling/Disabling Erasure coding policies which are already enabled/disabled. 
Contributed by Ayush Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/770d9d9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/770d9d9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/770d9d9b

Branch: refs/heads/trunk
Commit: 770d9d9bb686bacef601ec8c4f884dc5afa9d4e2
Parents: 34577d2
Author: Vinayakumar B 
Authored: Tue Aug 21 09:33:19 2018 +0530
Committer: Vinayakumar B 
Committed: Tue Aug 21 09:33:19 2018 +0530

--
 .../namenode/ErasureCodingPolicyManager.java| 15 +++
 .../server/namenode/FSDirErasureCodingOp.java   | 22 -
 .../hdfs/server/namenode/FSNamesystem.java  | 26 +++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 +++---
 .../server/namenode/TestNamenodeRetryCache.java |  2 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |  2 +-
 6 files changed, 47 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/770d9d9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 3a310da..e7de05b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -356,7 +356,7 @@ public final class ErasureCodingPolicyManager {
   /**
* Disable an erasure coding policy by policyName.
*/
-  public synchronized void disablePolicy(String name) {
+  public synchronized boolean disablePolicy(String name) {
 ErasureCodingPolicyInfo info = policiesByName.get(name);
 if (info == null) {
   throw new HadoopIllegalArgumentException("The policy name " +
@@ -367,27 +367,32 @@ public final class ErasureCodingPolicyManager {
   enabledPoliciesByName.remove(name);
   enabledPolicies =
   enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
+  info.setState(ErasureCodingPolicyState.DISABLED);
+  LOG.info("Disable the erasure coding policy " + name);
+  return true;
 }
-info.setState(ErasureCodingPolicyState.DISABLED);
-LOG.info("Disable the erasure coding policy " + name);
+return false;
   }
 
   /**
* Enable an erasure coding policy by policyName.
*/
-  public synchronized void enablePolicy(String name) {
+  public synchronized boolean enablePolicy(String name) {
 final ErasureCodingPolicyInfo info = policiesByName.get(name);
 if (info == null) {
   throw new HadoopIllegalArgumentException("The policy name " +
   name + " does not exist");
 }
-
+if (enabledPoliciesByName.containsKey(name)) {
+  return false;
+}
 final ErasureCodingPolicy ecPolicy = info.getPolicy();
 enabledPoliciesByName.put(name, ecPolicy);
 info.setState(ErasureCodingPolicyState.ENABLED);
 enabledPolicies =
 enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
 LOG.info("Enable the erasure coding policy " + name);
+return true;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770d9d9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 769c137..2ba840e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -253,11 +253,16 @@ final class FSDirErasureCodingOp {
*  rebuilding
* @throws IOException
*/
-  static void enableErasureCodingPolicy(final FSNamesystem fsn,
+  static boolean enableErasureCodingPolicy(final FSNamesystem fsn,
   String ecPolicyName, final boolean logRetryCache) throws IOException {
 Precondition

hadoop git commit: YARN-8129. Improve error message for invalid value in fields attribute. Contributed by Abhishek Modi.

2018-08-20 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk 770d9d9bb -> d3fef7a5c


YARN-8129. Improve error message for invalid value in fields attribute. 
Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3fef7a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3fef7a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3fef7a5

Branch: refs/heads/trunk
Commit: d3fef7a5c5b83d27e87b5e49928254a7d1b935e5
Parents: 770d9d9
Author: Rohith Sharma K S 
Authored: Tue Aug 21 11:58:07 2018 +0530
Committer: Rohith Sharma K S 
Committed: Tue Aug 21 11:58:07 2018 +0530

--
 .../timelineservice/reader/TimelineReaderWebServicesUtils.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fef7a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index efaecd2..63529a4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -214,7 +214,11 @@ public final class TimelineReaderWebServicesUtils {
 String[] strs = str.split(delimiter);
 EnumSet fieldList = EnumSet.noneOf(Field.class);
 for (String s : strs) {
-  fieldList.add(Field.valueOf(s.trim().toUpperCase()));
+  try {
+fieldList.add(Field.valueOf(s.trim().toUpperCase()));
+  } catch (IllegalArgumentException e) {
+throw new IllegalArgumentException(s + " is not a valid field.");
+  }
 }
 return fieldList;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org