[hadoop] branch branch-3.2 updated: YARN-9864. Format CS Configuration present in Configuration Store. Contributeed by Prabhu Joseph

2019-09-30 Thread sunilg
This is an automated email from the ASF dual-hosted git repository.

sunilg pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 52f815d  YARN-9864. Format CS Configuration present in Configuration 
Store. Contributeed by Prabhu Joseph
52f815d is described below

commit 52f815d39d9e5585770ae0a62cb1c328fea96e94
Author: Sunil G 
AuthorDate: Tue Oct 1 09:08:41 2019 +0530

YARN-9864. Format CS Configuration present in Configuration Store. 
Contributeed by Prabhu Joseph

(cherry picked from commit 137546a78a45adbfb5ef338c1c2984a97b3cecc2)
---
 .../hadoop-yarn/hadoop-yarn-client/pom.xml |  10 ++
 .../hadoop/yarn/client/cli/SchedConfCLI.java   |  89 +-
 .../hadoop/yarn/client/cli/TestSchedConfCLI.java   | 187 -
 .../scheduler/MutableConfigurationProvider.java|   2 +
 .../conf/FSSchedulerConfigurationStore.java|   5 +
 .../capacity/conf/InMemoryConfigurationStore.java  |   5 +
 .../capacity/conf/LeveldbConfigurationStore.java   |   7 +
 .../conf/MutableCSConfigurationProvider.java   |  46 -
 .../capacity/conf/YarnConfigurationStore.java  |   7 +
 .../capacity/conf/ZKConfigurationStore.java|   5 +
 .../server/resourcemanager/webapp/RMWSConsts.java  |   3 +
 .../resourcemanager/webapp/RMWebServices.java  |  31 
 .../conf/TestFSSchedulerConfigurationStore.java|  16 ++
 .../conf/TestMutableCSConfigurationProvider.java   |   8 +
 .../capacity/conf/TestZKConfigurationStore.java|   9 +
 .../TestRMWebServicesConfigurationMutation.java|  13 ++
 16 files changed, 431 insertions(+), 12 deletions(-)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index c21c381..5568734 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -65,6 +65,16 @@
   mockito-all
   test
 
+
+  com.sun.jersey.jersey-test-framework
+  jersey-test-framework-core
+  test
+
+
+  com.sun.jersey.jersey-test-framework
+  jersey-test-framework-grizzly2
+  test
+
 
 
   org.apache.hadoop
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
index be54553..daf4add 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.client.cli;
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.api.client.WebResource.Builder;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.GnuParser;
@@ -30,6 +31,7 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
@@ -56,6 +58,7 @@ public class SchedConfCLI extends Configured implements Tool {
   private static final String REMOVE_QUEUES_OPTION = "removeQueues";
   private static final String UPDATE_QUEUES_OPTION = "updateQueues";
   private static final String GLOBAL_OPTIONS = "globalUpdates";
+  private static final String FORMAT_CONF = "formatConfig";
   private static final String HELP_CMD = "help";
 
   private static final String CONF_ERR_MSG = "Specify configuration key " +
@@ -83,6 +86,9 @@ public class SchedConfCLI extends Configured implements Tool {
 "Update queue configurations");
 opts.addOption("global", GLOBAL_OPTIONS, true,
 "Update global scheduler configurations");
+opts.addOption("format", FORMAT_CONF, false,
+"Format Scheduler Configuration and reload from" +
+" capacity-scheduler.xml");
 opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
 
 int exitCode = -1;
@@ -101,6 +107,7 @@ public class SchedConfCLI extends Configured implements 
Tool {
 }
 
 boolean hasOption = false;
+boolean format = false;
 SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
 try {
   if (parsedCli.hasOption(ADD_QUEUES_OPTION)) {
@@ -121,6 +128,11 @@ public class SchedConfCLI 

[hadoop] branch trunk updated: YARN-9864. Format CS Configuration present in Configuration Store. Contributeed by Prabhu Joseph

2019-09-30 Thread sunilg
This is an automated email from the ASF dual-hosted git repository.

sunilg pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 137546a  YARN-9864. Format CS Configuration present in Configuration 
Store. Contributeed by Prabhu Joseph
137546a is described below

commit 137546a78a45adbfb5ef338c1c2984a97b3cecc2
Author: Sunil G 
AuthorDate: Tue Oct 1 09:08:41 2019 +0530

YARN-9864. Format CS Configuration present in Configuration Store. 
Contributeed by Prabhu Joseph
---
 .../hadoop-yarn/hadoop-yarn-client/pom.xml |  10 ++
 .../hadoop/yarn/client/cli/SchedConfCLI.java   |  89 +-
 .../hadoop/yarn/client/cli/TestSchedConfCLI.java   | 187 -
 .../scheduler/MutableConfigurationProvider.java|   2 +
 .../conf/FSSchedulerConfigurationStore.java|   5 +
 .../capacity/conf/InMemoryConfigurationStore.java  |   5 +
 .../capacity/conf/LeveldbConfigurationStore.java   |   7 +
 .../conf/MutableCSConfigurationProvider.java   |  46 -
 .../capacity/conf/YarnConfigurationStore.java  |   7 +
 .../capacity/conf/ZKConfigurationStore.java|   5 +
 .../server/resourcemanager/webapp/RMWSConsts.java  |   3 +
 .../resourcemanager/webapp/RMWebServices.java  |  31 
 .../conf/TestFSSchedulerConfigurationStore.java|  16 ++
 .../conf/TestMutableCSConfigurationProvider.java   |   8 +
 .../capacity/conf/TestZKConfigurationStore.java|   9 +
 .../TestRMWebServicesConfigurationMutation.java|  13 ++
 16 files changed, 431 insertions(+), 12 deletions(-)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index 3efbddd..88e5e24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -65,6 +65,16 @@
   mockito-core
   test
 
+
+  com.sun.jersey.jersey-test-framework
+  jersey-test-framework-core
+  test
+
+
+  com.sun.jersey.jersey-test-framework
+  jersey-test-framework-grizzly2
+  test
+
 
 
   org.apache.hadoop
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
index be54553..daf4add 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.client.cli;
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.api.client.WebResource.Builder;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.GnuParser;
@@ -30,6 +31,7 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
@@ -56,6 +58,7 @@ public class SchedConfCLI extends Configured implements Tool {
   private static final String REMOVE_QUEUES_OPTION = "removeQueues";
   private static final String UPDATE_QUEUES_OPTION = "updateQueues";
   private static final String GLOBAL_OPTIONS = "globalUpdates";
+  private static final String FORMAT_CONF = "formatConfig";
   private static final String HELP_CMD = "help";
 
   private static final String CONF_ERR_MSG = "Specify configuration key " +
@@ -83,6 +86,9 @@ public class SchedConfCLI extends Configured implements Tool {
 "Update queue configurations");
 opts.addOption("global", GLOBAL_OPTIONS, true,
 "Update global scheduler configurations");
+opts.addOption("format", FORMAT_CONF, false,
+"Format Scheduler Configuration and reload from" +
+" capacity-scheduler.xml");
 opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
 
 int exitCode = -1;
@@ -101,6 +107,7 @@ public class SchedConfCLI extends Configured implements 
Tool {
 }
 
 boolean hasOption = false;
+boolean format = false;
 SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
 try {
   if (parsedCli.hasOption(ADD_QUEUES_OPTION)) {
@@ -121,6 +128,11 @@ public class SchedConfCLI extends Configured implements 
Tool {
 hasOption = true;
 

[hadoop] branch branch-2 updated: HDFS-14305. Fix serial number calculation in BlockTokenSecretManager to avoid token key ID overlap between NameNodes. Contributed by Konstantin V Shvachko.

2019-09-30 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new dc2b838  HDFS-14305. Fix serial number calculation in 
BlockTokenSecretManager to avoid token key ID overlap between NameNodes. 
Contributed by Konstantin V Shvachko.
dc2b838 is described below

commit dc2b838a8e6dfe58598cac8ec37546332eeedeb2
Author: Konstantin V Shvachko 
AuthorDate: Mon Sep 30 18:04:16 2019 -0700

HDFS-14305. Fix serial number calculation in BlockTokenSecretManager to 
avoid token key ID overlap between NameNodes. Contributed by Konstantin V 
Shvachko.
---
 .../token/block/BlockTokenSecretManager.java   | 12 +++
 .../hdfs/security/token/block/TestBlockToken.java  | 24 ++
 .../ha/TestFailoverWithBlockTokensEnabled.java |  5 ++---
 3 files changed, 34 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index a934232..dae89c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -116,8 +116,6 @@ public class BlockTokenSecretManager extends
 encryptionAlgorithm, nnIndex, numNNs, shouldWrapQOP);
 Preconditions.checkArgument(nnIndex >= 0);
 Preconditions.checkArgument(numNNs > 0);
-setSerialNo(new SecureRandom().nextInt());
-generateKeys();
   }
 
   public BlockTokenSecretManager(long keyUpdateInterval,
@@ -140,13 +138,19 @@ public class BlockTokenSecretManager extends
 this.encryptionAlgorithm = encryptionAlgorithm;
 this.shouldWrapQOP = shouldWrapQOP;
 this.timer = new Timer();
+setSerialNo(new SecureRandom().nextInt(Integer.MAX_VALUE));
+LOG.info("Block token key range: [" +
+nnRangeStart + ", " + (nnRangeStart + intRange) + ")");
 generateKeys();
   }
   
   @VisibleForTesting
-  public synchronized void setSerialNo(int serialNo) {
+  public synchronized void setSerialNo(int nextNo) {
 // we mod the serial number by the range and then add that times the index
-this.serialNo = (serialNo % intRange) + (nnRangeStart);
+this.serialNo = (nextNo % intRange) + (nnRangeStart);
+assert serialNo >= nnRangeStart && serialNo < (nnRangeStart + intRange) :
+  "serialNo " + serialNo + " is not in the designated range: [" +
+  nnRangeStart + ", " + (nnRangeStart + intRange) + ")";
   }
   
   public void setBlockPoolId(String blockPoolId) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index 55e9d30..7d0c90f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -411,4 +411,28 @@ public class TestBlockToken {
   cluster.shutdown();
 }
   }
+
+  /**
+   * Verify that block token serialNo is always within the range designated to
+   * to the NameNode.
+   */
+  @Test
+  public void testBlockTokenRanges() throws IOException {
+final int interval = 1024;
+final int numNNs = Integer.MAX_VALUE / interval;
+for(int nnIdx = 0; nnIdx < 64; nnIdx++) {
+  BlockTokenSecretManager sm = new BlockTokenSecretManager(
+  blockKeyUpdateInterval, blockTokenLifetime, nnIdx, numNNs,
+  "fake-pool", null, false);
+  int rangeStart = nnIdx * interval;
+  for(int i = 0; i < interval * 3; i++) {
+int serialNo = sm.getSerialNoForTesting();
+assertTrue(
+"serialNo " + serialNo + " is not in the designated range: [" +
+rangeStart + ", " + (rangeStart + interval) + ")",
+serialNo >= rangeStart && serialNo < (rangeStart + interval));
+sm.updateKeys();
+  }
+}
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
index 43ab69d..ff90121 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
@@ 

[hadoop] branch branch-3.2 updated: HADOOP-16531. Log more timing information for slow RPCs. Contributed by Chen Zhang.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new f5fb7d8  HADOOP-16531. Log more timing information for slow RPCs. 
Contributed by Chen Zhang.
f5fb7d8 is described below

commit f5fb7d8c188dde5653e9386c23251bd99a2cfb49
Author: Erik Krogen 
AuthorDate: Fri Sep 6 10:28:21 2019 -0700

HADOOP-16531. Log more timing information for slow RPCs. Contributed by 
Chen Zhang.

(cherry picked from commit a23417533e1ee052893baf207ec636c4993c5994)
---
 .../src/main/java/org/apache/hadoop/ipc/Server.java  | 16 +++-
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index afc8379..92395a9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -506,7 +506,7 @@ public abstract class Server {
* Logs a Slow RPC Request.
*
* @param methodName - RPC Request method name
-   * @param processingTime - Processing Time.
+   * @param details - Processing Detail.
*
* if this request took too much time relative to other requests
* we consider that as a slow RPC. 3 is a magic number that comes
@@ -515,7 +515,8 @@ public abstract class Server {
* if and only if it falls above 99.7% of requests. We start this logic
* only once we have enough sample size.
*/
-  void logSlowRpcCalls(String methodName, Call call, long processingTime) {
+  void logSlowRpcCalls(String methodName, Call call,
+  ProcessingDetails details) {
 final int deviation = 3;
 
 // 1024 for minSampleSize just a guess -- not a number computed based on
@@ -526,10 +527,15 @@ public abstract class Server {
 final double threeSigma = rpcMetrics.getProcessingMean() +
 (rpcMetrics.getProcessingStdDev() * deviation);
 
+long processingTime =
+details.get(Timing.PROCESSING, RpcMetrics.TIMEUNIT);
 if ((rpcMetrics.getProcessingSampleCount() > minSampleSize) &&
 (processingTime > threeSigma)) {
-  LOG.warn("Slow RPC : {} took {} {} to process from client {}",
-  methodName, processingTime, RpcMetrics.TIMEUNIT, call);
+  LOG.warn(
+  "Slow RPC : {} took {} {} to process from client {},"
+  + " the processing detail is {}",
+  methodName, processingTime, RpcMetrics.TIMEUNIT, call,
+  details.toString());
   rpcMetrics.incrSlowRpc();
 }
   }
@@ -568,7 +574,7 @@ public abstract class Server {
 rpcDetailedMetrics.addProcessingTime(name, processingTime);
 callQueue.addResponseTime(name, call, details);
 if (isLogSlowRPC()) {
-  logSlowRpcCalls(name, call, processingTime);
+  logSlowRpcCalls(name, call, details);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-16531. Log more timing information for slow RPCs. Contributed by Chen Zhang.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 5e56914  HADOOP-16531. Log more timing information for slow RPCs. 
Contributed by Chen Zhang.
5e56914 is described below

commit 5e56914650a3ecbc39c3d4c19a59eb52f78fba55
Author: Erik Krogen 
AuthorDate: Fri Sep 6 10:28:21 2019 -0700

HADOOP-16531. Log more timing information for slow RPCs. Contributed by 
Chen Zhang.

(cherry picked from commit a23417533e1ee052893baf207ec636c4993c5994)
---
 .../src/main/java/org/apache/hadoop/ipc/Server.java  | 16 +++-
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 4cdeefa..e34ce92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -507,7 +507,7 @@ public abstract class Server {
* Logs a Slow RPC Request.
*
* @param methodName - RPC Request method name
-   * @param processingTime - Processing Time.
+   * @param details - Processing Detail.
*
* if this request took too much time relative to other requests
* we consider that as a slow RPC. 3 is a magic number that comes
@@ -516,7 +516,8 @@ public abstract class Server {
* if and only if it falls above 99.7% of requests. We start this logic
* only once we have enough sample size.
*/
-  void logSlowRpcCalls(String methodName, Call call, long processingTime) {
+  void logSlowRpcCalls(String methodName, Call call,
+  ProcessingDetails details) {
 final int deviation = 3;
 
 // 1024 for minSampleSize just a guess -- not a number computed based on
@@ -527,10 +528,15 @@ public abstract class Server {
 final double threeSigma = rpcMetrics.getProcessingMean() +
 (rpcMetrics.getProcessingStdDev() * deviation);
 
+long processingTime =
+details.get(Timing.PROCESSING, RpcMetrics.TIMEUNIT);
 if ((rpcMetrics.getProcessingSampleCount() > minSampleSize) &&
 (processingTime > threeSigma)) {
-  LOG.warn("Slow RPC : {} took {} {} to process from client {}",
-  methodName, processingTime, RpcMetrics.TIMEUNIT, call);
+  LOG.warn(
+  "Slow RPC : {} took {} {} to process from client {},"
+  + " the processing detail is {}",
+  methodName, processingTime, RpcMetrics.TIMEUNIT, call,
+  details.toString());
   rpcMetrics.incrSlowRpc();
 }
   }
@@ -569,7 +575,7 @@ public abstract class Server {
 rpcDetailedMetrics.addProcessingTime(name, processingTime);
 callQueue.addResponseTime(name, call, details);
 if (isLogSlowRPC()) {
-  logSlowRpcCalls(name, call, processingTime);
+  logSlowRpcCalls(name, call, details);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-15865. ConcurrentModificationException in Configuration.overlay() method. Contributed by Oleksandr Shevchenko.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 46e7277  HADOOP-15865. ConcurrentModificationException in 
Configuration.overlay() method. Contributed by Oleksandr Shevchenko.
46e7277 is described below

commit 46e72775f5b2156f49b15beac8b9cd5aad7f
Author: Wei-Chiu Chuang 
AuthorDate: Thu Aug 1 19:56:51 2019 -0700

HADOOP-15865. ConcurrentModificationException in Configuration.overlay() 
method. Contributed by Oleksandr Shevchenko.

(cherry picked from commit e872ceb810a343da7fce7185dca78d3b9aad9b7b)
---
 .../src/main/java/org/apache/hadoop/conf/Configuration.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 3ab4f53..c6ceea5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -3406,8 +3406,10 @@ public class Configuration implements 
Iterable>,
   }
 
   private void overlay(Properties to, Properties from) {
-for (Entry entry: from.entrySet()) {
-  to.put(entry.getKey(), entry.getValue());
+synchronized (from) {
+  for (Entry entry : from.entrySet()) {
+to.put(entry.getKey(), entry.getValue());
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-15865. ConcurrentModificationException in Configuration.overlay() method. Contributed by Oleksandr Shevchenko.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 72ef752  HADOOP-15865. ConcurrentModificationException in 
Configuration.overlay() method. Contributed by Oleksandr Shevchenko.
72ef752 is described below

commit 72ef752876347dead099ba31f8bbf63ff0f60c6e
Author: Wei-Chiu Chuang 
AuthorDate: Thu Aug 1 19:56:51 2019 -0700

HADOOP-15865. ConcurrentModificationException in Configuration.overlay() 
method. Contributed by Oleksandr Shevchenko.

(cherry picked from commit e872ceb810a343da7fce7185dca78d3b9aad9b7b)
(cherry picked from commit 46e72775f5b2156f49b15beac8b9cd5aad7f)
---
 .../src/main/java/org/apache/hadoop/conf/Configuration.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 8a9aee6..72beb92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -3395,8 +3395,10 @@ public class Configuration implements 
Iterable>,
   }
 
   private void overlay(Properties to, Properties from) {
-for (Entry entry: from.entrySet()) {
-  to.put(entry.getKey(), entry.getValue());
+synchronized (from) {
+  for (Entry entry : from.entrySet()) {
+to.put(entry.getKey(), entry.getValue());
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HDFS-14305. Fix serial number calculation in BlockTokenSecretManager to avoid token key ID overlap between NameNodes. Contributed by Konstantin V Shvachko.

2019-09-30 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 8fc4e406ad52649753f463cee02631f1c0ebb23b
Author: Konstantin V Shvachko 
AuthorDate: Mon Sep 30 16:48:10 2019 -0700

HDFS-14305. Fix serial number calculation in BlockTokenSecretManager to 
avoid token key ID overlap between NameNodes. Contributed by Konstantin V 
Shvachko.
---
 .../token/block/BlockTokenSecretManager.java   | 12 +++
 .../hdfs/security/token/block/TestBlockToken.java  | 23 ++
 .../ha/TestFailoverWithBlockTokensEnabled.java |  5 ++---
 3 files changed, 33 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index a541976..1c5c19b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -121,8 +121,6 @@ public class BlockTokenSecretManager extends
 encryptionAlgorithm, nnIndex, numNNs, useProto, shouldWrapQOP);
 Preconditions.checkArgument(nnIndex >= 0);
 Preconditions.checkArgument(numNNs > 0);
-setSerialNo(new SecureRandom().nextInt());
-generateKeys();
   }
 
   /**
@@ -151,13 +149,19 @@ public class BlockTokenSecretManager extends
 this.useProto = useProto;
 this.shouldWrapQOP = shouldWrapQOP;
 this.timer = new Timer();
+setSerialNo(new SecureRandom().nextInt(Integer.MAX_VALUE));
+LOG.info("Block token key range: [" + 
+nnRangeStart + ", " + (nnRangeStart + intRange) + ")");
 generateKeys();
   }
 
   @VisibleForTesting
-  public synchronized void setSerialNo(int serialNo) {
+  public synchronized void setSerialNo(int nextNo) {
 // we mod the serial number by the range and then add that times the index
-this.serialNo = (serialNo % intRange) + (nnRangeStart);
+this.serialNo = (nextNo % intRange) + (nnRangeStart);
+assert serialNo >= nnRangeStart && serialNo < (nnRangeStart + intRange) :
+  "serialNo " + serialNo + " is not in the designated range: [" +
+  nnRangeStart + ", " + (nnRangeStart + intRange) + ")";
   }
 
   public void setBlockPoolId(String blockPoolId) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index c16b471..6f62042 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -814,4 +814,27 @@ public class TestBlockToken {
 testBadStorageIDCheckAccess(true);
   }
 
+  /**
+   * Verify that block token serialNo is always within the range designated to
+   * to the NameNode.
+   */
+  @Test
+  public void testBlockTokenRanges() throws IOException {
+final int interval = 1024;
+final int numNNs = Integer.MAX_VALUE / interval;
+for(int nnIdx = 0; nnIdx < 64; nnIdx++) {
+  BlockTokenSecretManager sm = new BlockTokenSecretManager(
+  blockKeyUpdateInterval, blockTokenLifetime, nnIdx, numNNs,
+  "fake-pool", null, false);
+  int rangeStart = nnIdx * interval;
+  for(int i = 0; i < interval * 3; i++) {
+int serialNo = sm.getSerialNoForTesting();
+assertTrue(
+"serialNo " + serialNo + " is not in the designated range: [" +
+rangeStart + ", " + (rangeStart + interval) + ")",
+serialNo >= rangeStart && serialNo < (rangeStart + interval));
+sm.updateKeys();
+  }
+}
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
index 43ab69d..ff90121 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
@@ -92,11 +92,10 @@ public class TestFailoverWithBlockTokensEnabled {
 
 setAndCheckSerialNumber(0, btsm1, btsm2, btsm3);
 setAndCheckSerialNumber(Integer.MAX_VALUE, btsm1, btsm2, btsm3);
-setAndCheckSerialNumber(Integer.MIN_VALUE, btsm1, btsm2, btsm3);
 setAndCheckSerialNumber(Integer.MAX_VALUE / 2, btsm1, btsm2, btsm3);
-   

[hadoop] 01/02: Revert "HDFS-14305. Fix serial number calculation in BlockTokenSecretManager to avoid token key ID overlap between NameNodes. Contributed by He Xiaoqiao."

2019-09-30 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit bece1a1f70a66891d32c0fe44a1eec9a6f7998ea
Author: Konstantin V Shvachko 
AuthorDate: Sun Sep 29 13:11:31 2019 -0700

Revert "HDFS-14305. Fix serial number calculation in 
BlockTokenSecretManager to avoid token key ID overlap between NameNodes. 
Contributed by He Xiaoqiao."

This reverts commit 0feba4396f6e96c332743a39f965de7995b67bde.
---
 .../token/block/BlockTokenSecretManager.java   | 21 +--
 .../ha/TestFailoverWithBlockTokensEnabled.java | 31 +-
 2 files changed, 7 insertions(+), 45 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index 20ad2bb..a541976 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -64,17 +64,6 @@ public class BlockTokenSecretManager extends
 
   public static final Token DUMMY_TOKEN = new 
Token();
 
-  /**
-   * In order to prevent serial No. of different NameNode from overlapping,
-   * Using 6 bits (identify 64=2^6 namenodes, and presuppose that no scenario
-   * where deploy more than 64 namenodes (include ANN, SBN, Observers, etc.)
-   * in one namespace) to identify index of NameNode, and the remainder 26 bits
-   * auto-incr to change the serial No.
-   */
-  @VisibleForTesting
-  public static final int NUM_VALID_BITS = 26;
-  private static final int LOW_MASK = (1 << NUM_VALID_BITS) - 1;
-
   private final boolean isMaster;
 
   /**
@@ -91,8 +80,8 @@ public class BlockTokenSecretManager extends
   private String blockPoolId;
   private final String encryptionAlgorithm;
 
-  private final int nnIndex;
-
+  private final int intRange;
+  private final int nnRangeStart;
   private final boolean useProto;
 
   private final boolean shouldWrapQOP;
@@ -151,7 +140,8 @@ public class BlockTokenSecretManager extends
   private BlockTokenSecretManager(boolean isMaster, long keyUpdateInterval,
   long tokenLifetime, String blockPoolId, String encryptionAlgorithm,
   int nnIndex, int numNNs, boolean useProto, boolean shouldWrapQOP) {
-this.nnIndex = nnIndex;
+this.intRange = Integer.MAX_VALUE / numNNs;
+this.nnRangeStart = intRange * nnIndex;
 this.isMaster = isMaster;
 this.keyUpdateInterval = keyUpdateInterval;
 this.tokenLifetime = tokenLifetime;
@@ -166,7 +156,8 @@ public class BlockTokenSecretManager extends
 
   @VisibleForTesting
   public synchronized void setSerialNo(int serialNo) {
-this.serialNo = (serialNo & LOW_MASK) | (nnIndex << NUM_VALID_BITS);
+// we mod the serial number by the range and then add that times the index
+this.serialNo = (serialNo % intRange) + (nnRangeStart);
   }
 
   public void setBlockPoolId(String blockPoolId) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
index 850b961..43ab69d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
@@ -116,36 +116,7 @@ public class TestFailoverWithBlockTokensEnabled {
   }
 }
   }
-
-  @Test
-  public void testSerialNumberMaskMatchIndex() {
-BlockTokenSecretManager btsm1 = cluster.getNamesystem(0).getBlockManager()
-.getBlockTokenSecretManager();
-BlockTokenSecretManager btsm2 = cluster.getNamesystem(1).getBlockManager()
-.getBlockTokenSecretManager();
-BlockTokenSecretManager btsm3 = cluster.getNamesystem(2).getBlockManager()
-.getBlockTokenSecretManager();
-int[] testSet = {0, Integer.MAX_VALUE, Integer.MIN_VALUE,
-Integer.MAX_VALUE / 2, Integer.MIN_VALUE / 2,
-Integer.MAX_VALUE / 3, Integer.MIN_VALUE / 3};
-for (int i = 0; i < testSet.length; i++) {
-  setAndCheckHighBitsSerialNumber(testSet[i], btsm1, 0);
-  setAndCheckHighBitsSerialNumber(testSet[i], btsm2, 1);
-  setAndCheckHighBitsSerialNumber(testSet[i], btsm3, 2);
-}
-  }
-
-  /**
-   * Check mask of serial number if equal to index of NameNode.
-   */
-  private void setAndCheckHighBitsSerialNumber(int serialNumber,
-  BlockTokenSecretManager btsm, int nnIndex) {
-btsm.setSerialNo(serialNumber);
-int serialNo = 

[hadoop] branch branch-3.1 updated (db5b835 -> 8fc4e40)

2019-09-30 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a change to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from db5b835  HADOOP-15014. Addendum: KMS should log the IP address of the 
clients. Contributed by Zsombor Gegesy.
 new bece1a1  Revert "HDFS-14305. Fix serial number calculation in 
BlockTokenSecretManager to avoid token key ID overlap between NameNodes. 
Contributed by He Xiaoqiao."
 new 8fc4e40  HDFS-14305. Fix serial number calculation in 
BlockTokenSecretManager to avoid token key ID overlap between NameNodes. 
Contributed by Konstantin V Shvachko.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../token/block/BlockTokenSecretManager.java   | 31 ---
 .../hdfs/security/token/block/TestBlockToken.java  | 23 ++
 .../ha/TestFailoverWithBlockTokensEnabled.java | 36 ++
 3 files changed, 39 insertions(+), 51 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: HADOOP-15014. KMS should log the IP address of the clients. Contributed by Zsombor Gegesy.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 85aff9faccd4252f73994c79020b3590e323ed7c
Author: Zsombor Gegesy 
AuthorDate: Tue Apr 16 05:27:29 2019 -0700

HADOOP-15014. KMS should log the IP address of the clients. Contributed by 
Zsombor Gegesy.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit 008766c119d9ed9d568f9458ed0c02136962da5b)
---
 .../key/kms/server/KMSExceptionsProvider.java  |  5 +-
 .../hadoop/crypto/key/kms/server/KMSMDCFilter.java | 58 +-
 2 files changed, 48 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
index 3d97753..ceaa8bc 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
@@ -111,9 +111,10 @@ public class KMSExceptionsProvider implements 
ExceptionMapper {
 UserGroupInformation ugi = KMSMDCFilter.getUgi();
 String method = KMSMDCFilter.getMethod();
 String url = KMSMDCFilter.getURL();
+String remoteClientAddress = KMSMDCFilter.getRemoteClientAddress();
 String msg = getOneLineMessage(ex);
-LOG.warn("User:'{}' Method:{} URL:{} Response:{}-{}", ugi, method, url,
-status, msg, ex);
+LOG.warn("User:'{}' Method:{} URL:{} From:{} Response:{}-{}", ugi, method,
+url, remoteClientAddress, status, msg, ex);
   }
 
 }
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
index 81591e5..f3c0bbd 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
@@ -21,6 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.security.UserGroupInformation;
 import 
org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
@@ -38,29 +40,40 @@ import java.io.IOException;
 public class KMSMDCFilter implements Filter {
 
   private static class Data {
-private UserGroupInformation ugi;
-private String method;
-private StringBuffer url;
+private final UserGroupInformation ugi;
+private final String method;
+private final String url;
+private final String remoteClientAddress;
 
-private Data(UserGroupInformation ugi, String method, StringBuffer url) {
+private Data(UserGroupInformation ugi, String method, String url,
+String remoteClientAddress) {
   this.ugi = ugi;
   this.method = method;
   this.url = url;
+  this.remoteClientAddress = remoteClientAddress;
 }
   }
 
   private static final ThreadLocal DATA_TL = new ThreadLocal();
 
   public static UserGroupInformation getUgi() {
-return DATA_TL.get().ugi;
+Data data = DATA_TL.get();
+return data != null ? data.ugi : null;
   }
 
   public static String getMethod() {
-return DATA_TL.get().method;
+Data data = DATA_TL.get();
+return data != null ? data.method : null;
   }
 
   public static String getURL() {
-return DATA_TL.get().url.toString();
+Data data = DATA_TL.get();
+return data != null ? data.url : null;
+  }
+
+  public static String getRemoteClientAddress() {
+Data data = DATA_TL.get();
+return data != null ? data.remoteClientAddress : null;
   }
 
   @Override
@@ -72,22 +85,41 @@ public class KMSMDCFilter implements Filter {
   FilterChain chain)
   throws IOException, ServletException {
 try {
-  DATA_TL.remove();
+  clearContext();
   UserGroupInformation ugi = HttpUserGroupInformation.get();
-  String method = ((HttpServletRequest) request).getMethod();
-  StringBuffer requestURL = ((HttpServletRequest) request).getRequestURL();
-  String queryString = ((HttpServletRequest) request).getQueryString();
+  HttpServletRequest httpServletRequest = (HttpServletRequest) request;
+  String method = httpServletRequest.getMethod();
+  StringBuffer requestURL = httpServletRequest.getRequestURL();
+  String queryString = httpServletRequest.getQueryString();
   if (queryString != null) {
 requestURL.append("?").append(queryString);
   }
-  DATA_TL.set(new Data(ugi, method, 

[hadoop] branch branch-3.2 updated (79359e2 -> f42c8a4)

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 79359e2  HADOOP-16461. Regression: FileSystem cache lock parses XML 
within the lock.
 new 85aff9f  HADOOP-15014. KMS should log the IP address of the clients. 
Contributed by Zsombor Gegesy.
 new f42c8a4  HADOOP-15014. Addendum: KMS should log the IP address of the 
clients. Contributed by Zsombor Gegesy.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../key/kms/server/KMSExceptionsProvider.java  |  5 +-
 .../hadoop/crypto/key/kms/server/KMSMDCFilter.java | 58 ++
 .../crypto/key/kms/server/TestKMSMDCFilter.java| 88 ++
 3 files changed, 136 insertions(+), 15 deletions(-)
 create mode 100644 
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSMDCFilter.java


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: HADOOP-15014. KMS should log the IP address of the clients. Contributed by Zsombor Gegesy.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b59911d841ec016c0b0ee3115c8efea4a84a6d1d
Author: Zsombor Gegesy 
AuthorDate: Tue Apr 16 05:27:29 2019 -0700

HADOOP-15014. KMS should log the IP address of the clients. Contributed by 
Zsombor Gegesy.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit 008766c119d9ed9d568f9458ed0c02136962da5b)
---
 .../key/kms/server/KMSExceptionsProvider.java  |  5 +-
 .../hadoop/crypto/key/kms/server/KMSMDCFilter.java | 58 +-
 2 files changed, 48 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
index 3d97753..ceaa8bc 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
@@ -111,9 +111,10 @@ public class KMSExceptionsProvider implements 
ExceptionMapper {
 UserGroupInformation ugi = KMSMDCFilter.getUgi();
 String method = KMSMDCFilter.getMethod();
 String url = KMSMDCFilter.getURL();
+String remoteClientAddress = KMSMDCFilter.getRemoteClientAddress();
 String msg = getOneLineMessage(ex);
-LOG.warn("User:'{}' Method:{} URL:{} Response:{}-{}", ugi, method, url,
-status, msg, ex);
+LOG.warn("User:'{}' Method:{} URL:{} From:{} Response:{}-{}", ugi, method,
+url, remoteClientAddress, status, msg, ex);
   }
 
 }
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
index 81591e5..f3c0bbd 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
@@ -21,6 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.security.UserGroupInformation;
 import 
org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
@@ -38,29 +40,40 @@ import java.io.IOException;
 public class KMSMDCFilter implements Filter {
 
   private static class Data {
-private UserGroupInformation ugi;
-private String method;
-private StringBuffer url;
+private final UserGroupInformation ugi;
+private final String method;
+private final String url;
+private final String remoteClientAddress;
 
-private Data(UserGroupInformation ugi, String method, StringBuffer url) {
+private Data(UserGroupInformation ugi, String method, String url,
+String remoteClientAddress) {
   this.ugi = ugi;
   this.method = method;
   this.url = url;
+  this.remoteClientAddress = remoteClientAddress;
 }
   }
 
   private static final ThreadLocal DATA_TL = new ThreadLocal();
 
   public static UserGroupInformation getUgi() {
-return DATA_TL.get().ugi;
+Data data = DATA_TL.get();
+return data != null ? data.ugi : null;
   }
 
   public static String getMethod() {
-return DATA_TL.get().method;
+Data data = DATA_TL.get();
+return data != null ? data.method : null;
   }
 
   public static String getURL() {
-return DATA_TL.get().url.toString();
+Data data = DATA_TL.get();
+return data != null ? data.url : null;
+  }
+
+  public static String getRemoteClientAddress() {
+Data data = DATA_TL.get();
+return data != null ? data.remoteClientAddress : null;
   }
 
   @Override
@@ -72,22 +85,41 @@ public class KMSMDCFilter implements Filter {
   FilterChain chain)
   throws IOException, ServletException {
 try {
-  DATA_TL.remove();
+  clearContext();
   UserGroupInformation ugi = HttpUserGroupInformation.get();
-  String method = ((HttpServletRequest) request).getMethod();
-  StringBuffer requestURL = ((HttpServletRequest) request).getRequestURL();
-  String queryString = ((HttpServletRequest) request).getQueryString();
+  HttpServletRequest httpServletRequest = (HttpServletRequest) request;
+  String method = httpServletRequest.getMethod();
+  StringBuffer requestURL = httpServletRequest.getRequestURL();
+  String queryString = httpServletRequest.getQueryString();
   if (queryString != null) {
 requestURL.append("?").append(queryString);
   }
-  DATA_TL.set(new Data(ugi, method, 

[hadoop] 02/02: HADOOP-15014. Addendum: KMS should log the IP address of the clients. Contributed by Zsombor Gegesy.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit db5b8358cae68ab99464481f36b5f7fa223c8e4a
Author: Zsombor Gegesy 
AuthorDate: Wed Aug 7 20:55:10 2019 -0700

HADOOP-15014. Addendum: KMS should log the IP address of the clients. 
Contributed by Zsombor Gegesy.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit b0131bc265453051820e54908e70d39433c227ab)
---
 .../crypto/key/kms/server/TestKMSMDCFilter.java| 88 ++
 1 file changed, 88 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSMDCFilter.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSMDCFilter.java
new file mode 100644
index 000..42d1dc0
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSMDCFilter.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+
+import javax.servlet.FilterChain;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Test for {@link KMSMDCFilter}.
+ *
+ */
+public class TestKMSMDCFilter {
+
+  private static final String REMOTE_ADDRESS = "192.168.100.100";
+  private static final String URL = "/admin";
+  private static final String METHOD = "GET";
+
+  private KMSMDCFilter filter;
+  private HttpServletRequest httpRequest;
+  private HttpServletResponse httpResponse;
+
+  @Before
+  public void setUp() throws IOException {
+filter = new KMSMDCFilter();
+httpRequest = Mockito.mock(HttpServletRequest.class);
+httpResponse = Mockito.mock(HttpServletResponse.class);
+KMSMDCFilter.setContext(null, null, null, null);
+  }
+
+  @Test
+  public void testFilter() throws IOException, ServletException {
+when(httpRequest.getMethod()).thenReturn(METHOD);
+when(httpRequest.getRequestURL()).thenReturn(new StringBuffer(URL));
+when(httpRequest.getRemoteAddr()).thenReturn(REMOTE_ADDRESS);
+
+FilterChain filterChain = new FilterChain() {
+  @Override
+  public void doFilter(ServletRequest request, ServletResponse response)
+  throws IOException, ServletException {
+assertEquals("filter.remoteClientAddress", REMOTE_ADDRESS,
+KMSMDCFilter.getRemoteClientAddress());
+assertEquals("filter.method", METHOD, KMSMDCFilter.getMethod());
+assertEquals("filter.url", URL, KMSMDCFilter.getURL());
+  }
+};
+
+checkMDCValuesAreEmpty();
+filter.doFilter(httpRequest, httpResponse, filterChain);
+checkMDCValuesAreEmpty();
+  }
+
+  private void checkMDCValuesAreEmpty() {
+assertNull("getRemoteClientAddress", 
KMSMDCFilter.getRemoteClientAddress());
+assertNull("getMethod", KMSMDCFilter.getMethod());
+assertNull("getURL", KMSMDCFilter.getURL());
+assertNull("getUgi", KMSMDCFilter.getUgi());
+  }
+
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HADOOP-15014. Addendum: KMS should log the IP address of the clients. Contributed by Zsombor Gegesy.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit f42c8a44ae686590595fc8aef4a2ca853c7be6c4
Author: Zsombor Gegesy 
AuthorDate: Wed Aug 7 20:55:10 2019 -0700

HADOOP-15014. Addendum: KMS should log the IP address of the clients. 
Contributed by Zsombor Gegesy.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit b0131bc265453051820e54908e70d39433c227ab)
---
 .../crypto/key/kms/server/TestKMSMDCFilter.java| 88 ++
 1 file changed, 88 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSMDCFilter.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSMDCFilter.java
new file mode 100644
index 000..42d1dc0
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSMDCFilter.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+
+import javax.servlet.FilterChain;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Test for {@link KMSMDCFilter}.
+ *
+ */
+public class TestKMSMDCFilter {
+
+  private static final String REMOTE_ADDRESS = "192.168.100.100";
+  private static final String URL = "/admin";
+  private static final String METHOD = "GET";
+
+  private KMSMDCFilter filter;
+  private HttpServletRequest httpRequest;
+  private HttpServletResponse httpResponse;
+
+  @Before
+  public void setUp() throws IOException {
+filter = new KMSMDCFilter();
+httpRequest = Mockito.mock(HttpServletRequest.class);
+httpResponse = Mockito.mock(HttpServletResponse.class);
+KMSMDCFilter.setContext(null, null, null, null);
+  }
+
+  @Test
+  public void testFilter() throws IOException, ServletException {
+when(httpRequest.getMethod()).thenReturn(METHOD);
+when(httpRequest.getRequestURL()).thenReturn(new StringBuffer(URL));
+when(httpRequest.getRemoteAddr()).thenReturn(REMOTE_ADDRESS);
+
+FilterChain filterChain = new FilterChain() {
+  @Override
+  public void doFilter(ServletRequest request, ServletResponse response)
+  throws IOException, ServletException {
+assertEquals("filter.remoteClientAddress", REMOTE_ADDRESS,
+KMSMDCFilter.getRemoteClientAddress());
+assertEquals("filter.method", METHOD, KMSMDCFilter.getMethod());
+assertEquals("filter.url", URL, KMSMDCFilter.getURL());
+  }
+};
+
+checkMDCValuesAreEmpty();
+filter.doFilter(httpRequest, httpResponse, filterChain);
+checkMDCValuesAreEmpty();
+  }
+
+  private void checkMDCValuesAreEmpty() {
+assertNull("getRemoteClientAddress", 
KMSMDCFilter.getRemoteClientAddress());
+assertNull("getMethod", KMSMDCFilter.getMethod());
+assertNull("getURL", KMSMDCFilter.getURL());
+assertNull("getUgi", KMSMDCFilter.getUgi());
+  }
+
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated (b9dc2c1 -> db5b835)

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from b9dc2c1  HADOOP-16461. Regression: FileSystem cache lock parses XML 
within the lock.
 new b59911d  HADOOP-15014. KMS should log the IP address of the clients. 
Contributed by Zsombor Gegesy.
 new db5b835  HADOOP-15014. Addendum: KMS should log the IP address of the 
clients. Contributed by Zsombor Gegesy.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../key/kms/server/KMSExceptionsProvider.java  |  5 +-
 .../hadoop/crypto/key/kms/server/KMSMDCFilter.java | 58 ++
 .../crypto/key/kms/server/TestKMSMDCFilter.java| 88 ++
 3 files changed, 136 insertions(+), 15 deletions(-)
 create mode 100644 
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSMDCFilter.java


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-16461. Regression: FileSystem cache lock parses XML within the lock.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 79359e2  HADOOP-16461. Regression: FileSystem cache lock parses XML 
within the lock.
79359e2 is described below

commit 79359e209110638e56f0d08b7ca635520ea32cc0
Author: Gopal V 
AuthorDate: Fri Jul 26 11:32:13 2019 +0100

HADOOP-16461. Regression: FileSystem cache lock parses XML within the lock.

Contributed by Gopal V.

Change-Id: If6654f850e9c24ee0d9519a46fd6269b18e1a7a4
(cherry picked from commit aebac6d2d2e612e400a7d73be67dafb47e239211)
---
 .../src/main/java/org/apache/hadoop/fs/FileSystem.java | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 35d653a..8f9088e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -3350,6 +3350,9 @@ public abstract class FileSystem extends Configured
   }
 
   fs = createFileSystem(uri, conf);
+  final long timeout = conf.getTimeDuration(SERVICE_SHUTDOWN_TIMEOUT,
+  SERVICE_SHUTDOWN_TIMEOUT_DEFAULT,
+  ShutdownHookManager.TIME_UNIT_DEFAULT);
   synchronized (this) { // refetch the lock again
 FileSystem oldfs = map.get(key);
 if (oldfs != null) { // a file system is created while lock is 
releasing
@@ -3360,7 +3363,9 @@ public abstract class FileSystem extends Configured
 // now insert the new file system into the map
 if (map.isEmpty()
 && !ShutdownHookManager.get().isShutdownInProgress()) {
-  ShutdownHookManager.get().addShutdownHook(clientFinalizer, 
SHUTDOWN_HOOK_PRIORITY);
+  ShutdownHookManager.get().addShutdownHook(clientFinalizer,
+  SHUTDOWN_HOOK_PRIORITY, timeout,
+  ShutdownHookManager.TIME_UNIT_DEFAULT);
 }
 fs.key = key;
 map.put(key, fs);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-16461. Regression: FileSystem cache lock parses XML within the lock.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new b9dc2c1  HADOOP-16461. Regression: FileSystem cache lock parses XML 
within the lock.
b9dc2c1 is described below

commit b9dc2c17de54c1f93cbb7fdb52533071c2918346
Author: Gopal V 
AuthorDate: Fri Jul 26 11:32:13 2019 +0100

HADOOP-16461. Regression: FileSystem cache lock parses XML within the lock.

Contributed by Gopal V.

Change-Id: If6654f850e9c24ee0d9519a46fd6269b18e1a7a4
(cherry picked from commit aebac6d2d2e612e400a7d73be67dafb47e239211)
---
 .../src/main/java/org/apache/hadoop/fs/FileSystem.java | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 043d36b..ad53acb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -3350,6 +3350,9 @@ public abstract class FileSystem extends Configured
   }
 
   fs = createFileSystem(uri, conf);
+  final long timeout = conf.getTimeDuration(SERVICE_SHUTDOWN_TIMEOUT,
+  SERVICE_SHUTDOWN_TIMEOUT_DEFAULT,
+  ShutdownHookManager.TIME_UNIT_DEFAULT);
   synchronized (this) { // refetch the lock again
 FileSystem oldfs = map.get(key);
 if (oldfs != null) { // a file system is created while lock is 
releasing
@@ -3360,7 +3363,9 @@ public abstract class FileSystem extends Configured
 // now insert the new file system into the map
 if (map.isEmpty()
 && !ShutdownHookManager.get().isShutdownInProgress()) {
-  ShutdownHookManager.get().addShutdownHook(clientFinalizer, 
SHUTDOWN_HOOK_PRIORITY);
+  ShutdownHookManager.get().addShutdownHook(clientFinalizer,
+  SHUTDOWN_HOOK_PRIORITY, timeout,
+  ShutdownHookManager.TIME_UNIT_DEFAULT);
 }
 fs.key = key;
 map.put(key, fs);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: Revert "HDFS-14305. Fix serial number calculation in BlockTokenSecretManager to avoid token key ID overlap between NameNodes. Contributed by He Xiaoqiao."

2019-09-30 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3912a6fc7dc53845bc877aa8e099248ae7b6f79a
Author: Konstantin V Shvachko 
AuthorDate: Sun Sep 29 13:11:31 2019 -0700

Revert "HDFS-14305. Fix serial number calculation in 
BlockTokenSecretManager to avoid token key ID overlap between NameNodes. 
Contributed by He Xiaoqiao."

This reverts commit 0feba4396f6e96c332743a39f965de7995b67bde.
---
 .../token/block/BlockTokenSecretManager.java   | 21 +--
 .../ha/TestFailoverWithBlockTokensEnabled.java | 31 +-
 2 files changed, 7 insertions(+), 45 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index 57f84ea..77e175d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -65,17 +65,6 @@ public class BlockTokenSecretManager extends
 
   public static final Token DUMMY_TOKEN = new 
Token();
 
-  /**
-   * In order to prevent serial No. of different NameNode from overlapping,
-   * Using 6 bits (identify 64=2^6 namenodes, and presuppose that no scenario
-   * where deploy more than 64 namenodes (include ANN, SBN, Observers, etc.)
-   * in one namespace) to identify index of NameNode, and the remainder 26 bits
-   * auto-incr to change the serial No.
-   */
-  @VisibleForTesting
-  public static final int NUM_VALID_BITS = 26;
-  private static final int LOW_MASK = (1 << NUM_VALID_BITS) - 1;
-
   private final boolean isMaster;
 
   /**
@@ -92,8 +81,8 @@ public class BlockTokenSecretManager extends
   private String blockPoolId;
   private final String encryptionAlgorithm;
 
-  private final int nnIndex;
-
+  private final int intRange;
+  private final int nnRangeStart;
   private final boolean useProto;
 
   private final boolean shouldWrapQOP;
@@ -152,7 +141,8 @@ public class BlockTokenSecretManager extends
   private BlockTokenSecretManager(boolean isMaster, long keyUpdateInterval,
   long tokenLifetime, String blockPoolId, String encryptionAlgorithm,
   int nnIndex, int numNNs, boolean useProto, boolean shouldWrapQOP) {
-this.nnIndex = nnIndex;
+this.intRange = Integer.MAX_VALUE / numNNs;
+this.nnRangeStart = intRange * nnIndex;
 this.isMaster = isMaster;
 this.keyUpdateInterval = keyUpdateInterval;
 this.tokenLifetime = tokenLifetime;
@@ -167,7 +157,8 @@ public class BlockTokenSecretManager extends
 
   @VisibleForTesting
   public synchronized void setSerialNo(int serialNo) {
-this.serialNo = (serialNo & LOW_MASK) | (nnIndex << NUM_VALID_BITS);
+// we mod the serial number by the range and then add that times the index
+this.serialNo = (serialNo % intRange) + (nnRangeStart);
   }
 
   public void setBlockPoolId(String blockPoolId) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
index 850b961..43ab69d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
@@ -116,36 +116,7 @@ public class TestFailoverWithBlockTokensEnabled {
   }
 }
   }
-
-  @Test
-  public void testSerialNumberMaskMatchIndex() {
-BlockTokenSecretManager btsm1 = cluster.getNamesystem(0).getBlockManager()
-.getBlockTokenSecretManager();
-BlockTokenSecretManager btsm2 = cluster.getNamesystem(1).getBlockManager()
-.getBlockTokenSecretManager();
-BlockTokenSecretManager btsm3 = cluster.getNamesystem(2).getBlockManager()
-.getBlockTokenSecretManager();
-int[] testSet = {0, Integer.MAX_VALUE, Integer.MIN_VALUE,
-Integer.MAX_VALUE / 2, Integer.MIN_VALUE / 2,
-Integer.MAX_VALUE / 3, Integer.MIN_VALUE / 3};
-for (int i = 0; i < testSet.length; i++) {
-  setAndCheckHighBitsSerialNumber(testSet[i], btsm1, 0);
-  setAndCheckHighBitsSerialNumber(testSet[i], btsm2, 1);
-  setAndCheckHighBitsSerialNumber(testSet[i], btsm3, 2);
-}
-  }
-
-  /**
-   * Check mask of serial number if equal to index of NameNode.
-   */
-  private void setAndCheckHighBitsSerialNumber(int serialNumber,
-  BlockTokenSecretManager btsm, int nnIndex) {
-btsm.setSerialNo(serialNumber);
-int serialNo = 

[hadoop] 02/02: HDFS-14305. Fix serial number calculation in BlockTokenSecretManager to avoid token key ID overlap between NameNodes. Contributed by Konstantin V Shvachko.

2019-09-30 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 665360ec6fc80196b3c206f131ff87dfd11b4b67
Author: Konstantin V Shvachko 
AuthorDate: Mon Sep 30 16:48:10 2019 -0700

HDFS-14305. Fix serial number calculation in BlockTokenSecretManager to 
avoid token key ID overlap between NameNodes. Contributed by Konstantin V 
Shvachko.
---
 .../token/block/BlockTokenSecretManager.java   | 12 +++
 .../hdfs/security/token/block/TestBlockToken.java  | 23 ++
 .../ha/TestFailoverWithBlockTokensEnabled.java |  5 ++---
 3 files changed, 33 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index 77e175d..15bdc10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -122,8 +122,6 @@ public class BlockTokenSecretManager extends
 encryptionAlgorithm, nnIndex, numNNs, useProto, shouldWrapQOP);
 Preconditions.checkArgument(nnIndex >= 0);
 Preconditions.checkArgument(numNNs > 0);
-setSerialNo(new SecureRandom().nextInt());
-generateKeys();
   }
 
   /**
@@ -152,13 +150,19 @@ public class BlockTokenSecretManager extends
 this.useProto = useProto;
 this.shouldWrapQOP = shouldWrapQOP;
 this.timer = new Timer();
+setSerialNo(new SecureRandom().nextInt(Integer.MAX_VALUE));
+LOG.info("Block token key range: [{}, {})",
+nnRangeStart, nnRangeStart + intRange);
 generateKeys();
   }
 
   @VisibleForTesting
-  public synchronized void setSerialNo(int serialNo) {
+  public synchronized void setSerialNo(int nextNo) {
 // we mod the serial number by the range and then add that times the index
-this.serialNo = (serialNo % intRange) + (nnRangeStart);
+this.serialNo = (nextNo % intRange) + (nnRangeStart);
+assert serialNo >= nnRangeStart && serialNo < (nnRangeStart + intRange) :
+  "serialNo " + serialNo + " is not in the designated range: [" +
+  nnRangeStart + ", " + (nnRangeStart + intRange) + ")";
   }
 
   public void setBlockPoolId(String blockPoolId) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index 4bdd34c..a89a66f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -815,4 +815,27 @@ public class TestBlockToken {
 testBadStorageIDCheckAccess(true);
   }
 
+  /**
+   * Verify that block token serialNo is always within the range designated to
+   * to the NameNode.
+   */
+  @Test
+  public void testBlockTokenRanges() throws IOException {
+final int interval = 1024;
+final int numNNs = Integer.MAX_VALUE / interval;
+for(int nnIdx = 0; nnIdx < 64; nnIdx++) {
+  BlockTokenSecretManager sm = new BlockTokenSecretManager(
+  blockKeyUpdateInterval, blockTokenLifetime, nnIdx, numNNs,
+  "fake-pool", null, false);
+  int rangeStart = nnIdx * interval;
+  for(int i = 0; i < interval * 3; i++) {
+int serialNo = sm.getSerialNoForTesting();
+assertTrue(
+"serialNo " + serialNo + " is not in the designated range: [" +
+rangeStart + ", " + (rangeStart + interval) + ")",
+serialNo >= rangeStart && serialNo < (rangeStart + interval));
+sm.updateKeys();
+  }
+}
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
index 43ab69d..ff90121 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
@@ -92,11 +92,10 @@ public class TestFailoverWithBlockTokensEnabled {
 
 setAndCheckSerialNumber(0, btsm1, btsm2, btsm3);
 setAndCheckSerialNumber(Integer.MAX_VALUE, btsm1, btsm2, btsm3);
-setAndCheckSerialNumber(Integer.MIN_VALUE, btsm1, btsm2, btsm3);
 setAndCheckSerialNumber(Integer.MAX_VALUE / 2, btsm1, btsm2, btsm3);
-

[hadoop] branch branch-3.2 updated (bd5b59f -> 665360e)

2019-09-30 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a change to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from bd5b59f  HADOOP-15849. Upgrade netty version to 3.10.6.
 new 3912a6f  Revert "HDFS-14305. Fix serial number calculation in 
BlockTokenSecretManager to avoid token key ID overlap between NameNodes. 
Contributed by He Xiaoqiao."
 new 665360e  HDFS-14305. Fix serial number calculation in 
BlockTokenSecretManager to avoid token key ID overlap between NameNodes. 
Contributed by Konstantin V Shvachko.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../token/block/BlockTokenSecretManager.java   | 31 ---
 .../hdfs/security/token/block/TestBlockToken.java  | 23 ++
 .../ha/TestFailoverWithBlockTokensEnabled.java | 36 ++
 3 files changed, 39 insertions(+), 51 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14305. Fix serial number calculation in BlockTokenSecretManager to avoid token key ID overlap between NameNodes. Contributed by Konstantin V Shvachko.

2019-09-30 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b3275ab  HDFS-14305. Fix serial number calculation in 
BlockTokenSecretManager to avoid token key ID overlap between NameNodes. 
Contributed by Konstantin V Shvachko.
b3275ab is described below

commit b3275ab1f2f4546ba4bdc0e48cfa60b5b05071b9
Author: Konstantin V Shvachko 
AuthorDate: Mon Sep 30 16:48:10 2019 -0700

HDFS-14305. Fix serial number calculation in BlockTokenSecretManager to 
avoid token key ID overlap between NameNodes. Contributed by Konstantin V 
Shvachko.
---
 .../token/block/BlockTokenSecretManager.java   | 12 +++
 .../hdfs/security/token/block/TestBlockToken.java  | 23 ++
 .../ha/TestFailoverWithBlockTokensEnabled.java |  5 ++---
 3 files changed, 33 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index 335bb9f..a56074a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -122,8 +122,6 @@ public class BlockTokenSecretManager extends
 encryptionAlgorithm, nnIndex, numNNs, useProto, shouldWrapQOP);
 Preconditions.checkArgument(nnIndex >= 0);
 Preconditions.checkArgument(numNNs > 0);
-setSerialNo(new SecureRandom().nextInt());
-generateKeys();
   }
 
   /**
@@ -152,13 +150,19 @@ public class BlockTokenSecretManager extends
 this.useProto = useProto;
 this.shouldWrapQOP = shouldWrapQOP;
 this.timer = new Timer();
+setSerialNo(new SecureRandom().nextInt(Integer.MAX_VALUE));
+LOG.info("Block token key range: [{}, {})",
+nnRangeStart, nnRangeStart + intRange);
 generateKeys();
   }
 
   @VisibleForTesting
-  public synchronized void setSerialNo(int serialNo) {
+  public synchronized void setSerialNo(int nextNo) {
 // we mod the serial number by the range and then add that times the index
-this.serialNo = (serialNo % intRange) + (nnRangeStart);
+this.serialNo = (nextNo % intRange) + (nnRangeStart);
+assert serialNo >= nnRangeStart && serialNo < (nnRangeStart + intRange) :
+  "serialNo " + serialNo + " is not in the designated range: [" +
+  nnRangeStart + ", " + (nnRangeStart + intRange) + ")";
   }
 
   public void setBlockPoolId(String blockPoolId) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index 20e0d46..d993b66 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -819,4 +819,27 @@ public class TestBlockToken {
 testBadStorageIDCheckAccess(true);
   }
 
+  /**
+   * Verify that block token serialNo is always within the range designated to
+   * to the NameNode.
+   */
+  @Test
+  public void testBlockTokenRanges() throws IOException {
+final int interval = 1024;
+final int numNNs = Integer.MAX_VALUE / interval;
+for(int nnIdx = 0; nnIdx < 64; nnIdx++) {
+  BlockTokenSecretManager sm = new BlockTokenSecretManager(
+  blockKeyUpdateInterval, blockTokenLifetime, nnIdx, numNNs,
+  "fake-pool", null, false);
+  int rangeStart = nnIdx * interval;
+  for(int i = 0; i < interval * 3; i++) {
+int serialNo = sm.getSerialNoForTesting();
+assertTrue(
+"serialNo " + serialNo + " is not in the designated range: [" +
+rangeStart + ", " + (rangeStart + interval) + ")",
+serialNo >= rangeStart && serialNo < (rangeStart + interval));
+sm.updateKeys();
+  }
+}
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
index 43ab69d..ff90121 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
@@ -92,11 +92,10 @@ public class TestFailoverWithBlockTokensEnabled {
 
 

[hadoop] branch branch-3.1 updated: HDFS-14235. Handle ArrayIndexOutOfBoundsException in DataNodeDiskMetrics#slowDiskDetectionDaemon. Contributed by Ranith Sardar.

2019-09-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 1030ac8  HDFS-14235. Handle ArrayIndexOutOfBoundsException in 
DataNodeDiskMetrics#slowDiskDetectionDaemon. Contributed by Ranith Sardar.
1030ac8 is described below

commit 1030ac85bbfa44e775c03ff17e067d1087d7a475
Author: Surendra Singh Lilhore 
AuthorDate: Wed Feb 20 16:56:10 2019 +0530

HDFS-14235. Handle ArrayIndexOutOfBoundsException in 
DataNodeDiskMetrics#slowDiskDetectionDaemon. Contributed by Ranith Sardar.

(cherry picked from commit 41e18feda3f5ff924c87c4bed5b5cbbaecb19ae1)
(cherry picked from commit b93b127956508072904b44098fdc1c0dfc899606)
---
 .../datanode/metrics/DataNodeDiskMetrics.java  | 78 --
 1 file changed, 43 insertions(+), 35 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java
index f2954e8..a8a6c85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java
@@ -57,6 +57,10 @@ public class DataNodeDiskMetrics {
   private volatile Map>
   diskOutliersStats = Maps.newHashMap();
 
+  // Adding for test purpose. When addSlowDiskForTesting() called from test
+  // code, status should not be overridden by daemon thread.
+  private boolean overrideStatus = true;
+
   public DataNodeDiskMetrics(DataNode dn, long diskOutlierDetectionIntervalMs) 
{
 this.dn = dn;
 this.detectionInterval = diskOutlierDetectionIntervalMs;
@@ -71,41 +75,43 @@ public class DataNodeDiskMetrics {
   @Override
   public void run() {
 while (shouldRun) {
-  Map metadataOpStats = Maps.newHashMap();
-  Map readIoStats = Maps.newHashMap();
-  Map writeIoStats = Maps.newHashMap();
-  FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
-  try {
-fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
-Iterator volumeIterator = fsVolumeReferences
-.iterator();
-while (volumeIterator.hasNext()) {
-  FsVolumeSpi volume = volumeIterator.next();
-  DataNodeVolumeMetrics metrics = 
volumeIterator.next().getMetrics();
-  String volumeName = volume.getBaseURI().getPath();
-
-  metadataOpStats.put(volumeName,
-  metrics.getMetadataOperationMean());
-  readIoStats.put(volumeName, metrics.getReadIoMean());
-  writeIoStats.put(volumeName, metrics.getWriteIoMean());
-}
-  } finally {
-if (fsVolumeReferences != null) {
-  try {
-fsVolumeReferences.close();
-  } catch (IOException e) {
-LOG.error("Error in releasing FS Volume references", e);
+  if (dn.getFSDataset() != null) {
+Map metadataOpStats = Maps.newHashMap();
+Map readIoStats = Maps.newHashMap();
+Map writeIoStats = Maps.newHashMap();
+FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
+try {
+  fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
+  Iterator volumeIterator = fsVolumeReferences
+  .iterator();
+  while (volumeIterator.hasNext()) {
+FsVolumeSpi volume = volumeIterator.next();
+DataNodeVolumeMetrics metrics = volume.getMetrics();
+String volumeName = volume.getBaseURI().getPath();
+
+metadataOpStats.put(volumeName,
+metrics.getMetadataOperationMean());
+readIoStats.put(volumeName, metrics.getReadIoMean());
+writeIoStats.put(volumeName, metrics.getWriteIoMean());
+  }
+} finally {
+  if (fsVolumeReferences != null) {
+try {
+  fsVolumeReferences.close();
+} catch (IOException e) {
+  LOG.error("Error in releasing FS Volume references", e);
+}
   }
 }
-  }
-  if (metadataOpStats.isEmpty() && readIoStats.isEmpty() &&
-  writeIoStats.isEmpty()) {
-LOG.debug("No disk stats available for detecting outliers.");
-return;
-  }
+if (metadataOpStats.isEmpty() && readIoStats.isEmpty()
+&& writeIoStats.isEmpty()) {
+  LOG.debug("No disk stats available for detecting outliers.");
+  

[hadoop] branch trunk updated: HDDS-2205. checkstyle.sh reports wrong failure count

2019-09-30 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e5bba59  HDDS-2205. checkstyle.sh reports wrong failure count
e5bba59 is described below

commit e5bba592a84a94e0545479b668e6925eb4b8858c
Author: Doroszlai, Attila 
AuthorDate: Mon Sep 30 09:35:14 2019 +0200

HDDS-2205. checkstyle.sh reports wrong failure count

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/dev-support/checks/checkstyle.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh 
b/hadoop-ozone/dev-support/checks/checkstyle.sh
index 7a218a4..685bf14 100755
--- a/hadoop-ozone/dev-support/checks/checkstyle.sh
+++ b/hadoop-ozone/dev-support/checks/checkstyle.sh
@@ -36,7 +36,7 @@ find "." -name checkstyle-errors.xml -print0 \
   | tee "$REPORT_FILE"
 
 ## generate counter
-wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures"
+grep -c ':' "$REPORT_FILE" > "$REPORT_DIR/failures"
 
 if [[ -s "${REPORT_FILE}" ]]; then
exit 1


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-15849. Upgrade netty version to 3.10.6.

2019-09-30 Thread iwasakims
This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 5167a29  HADOOP-15849. Upgrade netty version to 3.10.6.
5167a29 is described below

commit 5167a29adb56f1b2bef7f6e59ba1471e8d3b398f
Author: Xiao Chen 
AuthorDate: Sat Oct 13 20:21:36 2018 -0700

HADOOP-15849. Upgrade netty version to 3.10.6.

(cherry picked from commit 8853fc8a55b07ecdc5ce8d85278b822e5675d97a)
---
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index ff9e73f..15e6adb 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -776,7 +776,7 @@
   
 io.netty
 netty
-3.10.5.Final
+3.10.6.Final
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-15849. Upgrade netty version to 3.10.6.

2019-09-30 Thread iwasakims
This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new bd5b59f  HADOOP-15849. Upgrade netty version to 3.10.6.
bd5b59f is described below

commit bd5b59feead36079349b529968842e2dc946a6d9
Author: Xiao Chen 
AuthorDate: Sat Oct 13 20:21:36 2018 -0700

HADOOP-15849. Upgrade netty version to 3.10.6.

(cherry picked from commit 8853fc8a55b07ecdc5ce8d85278b822e5675d97a)
---
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 5b7f2d3..f75a598 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -833,7 +833,7 @@
   
 io.netty
 netty
-3.10.5.Final
+3.10.6.Final
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2 updated: HADOOP-16544. update io.netty in branch-2.

2019-09-30 Thread iwasakims
This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 9482da7  HADOOP-16544. update io.netty in branch-2.
9482da7 is described below

commit 9482da7053a5a6acc825a9f62a278cfe5bf2ea1d
Author: Masatake Iwasaki 
AuthorDate: Sun Sep 22 08:41:51 2019 +0900

HADOOP-16544. update io.netty in branch-2.

Signed-off-by: Masatake Iwasaki 
---
 .../tools/TestDelegationTokenRemoteFetcher.java|  6 ++--
 .../org/apache/hadoop/mapred/ShuffleHandler.java   | 32 --
 hadoop-project/pom.xml |  4 +--
 3 files changed, 23 insertions(+), 19 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java
index c75c722..744f07a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java
@@ -243,7 +243,7 @@ public class TestDelegationTokenRemoteFetcher {
   ChannelBuffer cbuffer = ChannelBuffers.buffer(fileLength);
   cbuffer.writeBytes(out.getData());
   HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
-  response.setHeader(HttpHeaders.Names.CONTENT_LENGTH,
+  response.headers().set(HttpHeaders.Names.CONTENT_LENGTH,
   String.valueOf(fileLength));
   response.setContent(cbuffer);
   channel.write(response).addListener(ChannelFutureListener.CLOSE);
@@ -260,7 +260,7 @@ public class TestDelegationTokenRemoteFetcher {
   ChannelBuffer cbuffer = ChannelBuffers.buffer(bytes.length);
   cbuffer.writeBytes(bytes);
   HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
-  response.setHeader(HttpHeaders.Names.CONTENT_LENGTH,
+  response.headers().set(HttpHeaders.Names.CONTENT_LENGTH,
   String.valueOf(bytes.length));
   response.setContent(cbuffer);
   channel.write(response).addListener(ChannelFutureListener.CLOSE);
@@ -316,7 +316,7 @@ public class TestDelegationTokenRemoteFetcher {
 // Mimic SPNEGO authentication
 HttpResponse response = new DefaultHttpResponse(HTTP_1_1,
 HttpResponseStatus.OK);
-response.addHeader("Set-Cookie", "hadoop-auth=1234");
+response.headers().set("Set-Cookie", "hadoop-auth=1234");
 
e.getChannel().write(response).addListener(ChannelFutureListener.CLOSE);
   } else if (request.getMethod() != GET) {
 e.getChannel().close();
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 51cfa54..f2431f2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -940,9 +940,12 @@ public class ShuffleHandler extends AuxiliaryService {
   }
   // Check whether the shuffle version is compatible
   if (!ShuffleHeader.DEFAULT_HTTP_HEADER_NAME.equals(
-  request.getHeader(ShuffleHeader.HTTP_HEADER_NAME))
+  request.headers() != null ?
+  request.headers().get(ShuffleHeader.HTTP_HEADER_NAME) : null)
   || !ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION.equals(
-  request.getHeader(ShuffleHeader.HTTP_HEADER_VERSION))) {
+  request.headers() != null ?
+  request.headers()
+  .get(ShuffleHeader.HTTP_HEADER_VERSION) : null)) {
 sendError(ctx, "Incompatible shuffle request version", BAD_REQUEST);
   }
   final Map> q =
@@ -1183,12 +1186,12 @@ public class ShuffleHandler extends AuxiliaryService {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Setting connection close header...");
 }
-response.setHeader(HttpHeaders.CONNECTION, CONNECTION_CLOSE);
+response.headers().set(HttpHeaders.CONNECTION, CONNECTION_CLOSE);
   } else {
-response.setHeader(HttpHeaders.CONTENT_LENGTH,
+response.headers().set(HttpHeaders.CONTENT_LENGTH,
   String.valueOf(contentLength));
-response.setHeader(HttpHeaders.CONNECTION, HttpHeaders.KEEP_ALIVE);
-response.setHeader(HttpHeaders.KEEP_ALIVE, "timeout="
+response.headers().set(HttpHeaders.CONNECTION, HttpHeaders.KEEP_ALIVE);
+

[hadoop] branch trunk updated: YARN-9859. Refactoring of OpportunisticContainerAllocator. Contributed by Abhishek Modi.

2019-09-30 Thread abmodi
This is an automated email from the ASF dual-hosted git repository.

abmodi pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4d3c580  YARN-9859. Refactoring of OpportunisticContainerAllocator. 
Contributed by Abhishek Modi.
4d3c580 is described below

commit 4d3c580b03475a6ec9323d11e6875c542f8e3f6d
Author: Abhishek Modi 
AuthorDate: Mon Sep 30 23:40:15 2019 +0530

YARN-9859. Refactoring of OpportunisticContainerAllocator. Contributed by 
Abhishek Modi.
---
 ...DistributedOpportunisticContainerAllocator.java | 357 +
 .../scheduler/OpportunisticContainerAllocator.java | 347 +++-
 .../TestOpportunisticContainerAllocator.java   |   2 +-
 .../yarn/server/nodemanager/NodeManager.java   |   3 +-
 .../scheduler/TestDistributedScheduler.java|   4 +-
 .../OpportunisticContainerAllocatorAMService.java  |   8 +-
 6 files changed, 416 insertions(+), 305 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/DistributedOpportunisticContainerAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/DistributedOpportunisticContainerAllocator.java
new file mode 100644
index 000..da90167
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/DistributedOpportunisticContainerAllocator.java
@@ -0,0 +1,357 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.scheduler;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+import org.apache.hadoop.yarn.server.api.protocolrecords.RemoteNode;
+import org.apache.hadoop.yarn.server.metrics.OpportunisticSchedulerMetrics;
+import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * 
+ * The DistributedOpportunisticContainerAllocator allocates containers on a
+ * given list of nodes, after modifying the container sizes to respect the
+ * limits set by the ResourceManager. It tries to distribute the containers
+ * as evenly as possible.
+ * 
+ */
+public class DistributedOpportunisticContainerAllocator
+extends OpportunisticContainerAllocator {
+
+  private static final int NODE_LOCAL_LOOP = 0;
+  private static final int RACK_LOCAL_LOOP = 1;
+  private static final int OFF_SWITCH_LOOP = 2;
+
+  private static final Logger LOG =
+  
LoggerFactory.getLogger(DistributedOpportunisticContainerAllocator.class);
+
+  /**
+   * Create a new Opportunistic Container Allocator.
+   * @param tokenSecretManager TokenSecretManager
+   */
+  public DistributedOpportunisticContainerAllocator(
+  BaseContainerTokenSecretManager tokenSecretManager) {
+super(tokenSecretManager);
+  }
+
+  /**
+   * Create a new Opportunistic Container Allocator.
+   * @param tokenSecretManager TokenSecretManager
+   * @param maxAllocationsPerAMHeartbeat max number of containers to be
+   * allocated in one AM heartbeat
+   */
+  public DistributedOpportunisticContainerAllocator(
+  BaseContainerTokenSecretManager tokenSecretManager,
+  int maxAllocationsPerAMHeartbeat) {
+super(tokenSecretManager, maxAllocationsPerAMHeartbeat);
+  }
+
+  @Override
+  public List allocateContainers(ResourceBlacklistRequest blackList,
+  List 

[hadoop] branch trunk updated (b46d823 -> 98ca07e)

2019-09-30 Thread msingh
This is an automated email from the ASF dual-hosted git repository.

msingh pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from b46d823  HDDS-2202. Remove unused import in OmUtils
 add 98ca07e  HDDS-2207. Update Ratis to latest snapshot. Contributed by 
Shashikant Bannerjee. (#1550)

No new revisions were added by this update.

Summary of changes:
 .../container/common/transport/server/ratis/XceiverServerRatis.java | 2 +-
 hadoop-hdds/pom.xml | 2 +-
 hadoop-ozone/pom.xml| 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (a530ac3 -> b46d823)

2019-09-30 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from a530ac3  HDDS-2153. Add a config to tune max pending requests in Ratis 
leader
 add b46d823  HDDS-2202. Remove unused import in OmUtils

No new revisions were added by this update.

Summary of changes:
 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java | 1 -
 1 file changed, 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (d6b0a8d -> a530ac3)

2019-09-30 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from d6b0a8d  HDDS-2183. Container and pipline subcommands of scmcli should 
be grouped
 add a530ac3  HDDS-2153. Add a config to tune max pending requests in Ratis 
leader

No new revisions were added by this update.

Summary of changes:
 .../src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java   | 5 +
 .../src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java| 5 +
 hadoop-hdds/common/src/main/resources/ozone-default.xml   | 8 
 .../common/transport/server/ratis/XceiverServerRatis.java | 6 ++
 4 files changed, 24 insertions(+)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (760b523 -> d6b0a8d)

2019-09-30 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 760b523  Revert "HDFS-14305. Fix serial number calculation in 
BlockTokenSecretManager to avoid token key ID overlap between NameNodes. 
Contributed by He Xiaoqiao."
 add d6b0a8d  HDDS-2183. Container and pipline subcommands of scmcli should 
be grouped

No new revisions were added by this update.

Summary of changes:
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java | 22 --
 .../hdds/scm/cli/container/CloseSubcommand.java|  7 +++
 .../ContainerCommands.java}| 21 -
 .../hdds/scm/cli/container/CreateSubcommand.java   |  5 ++---
 .../hdds/scm/cli/container/DeleteSubcommand.java   |  7 +++
 .../hdds/scm/cli/container/InfoSubcommand.java |  5 ++---
 .../hdds/scm/cli/container/ListSubcommand.java |  5 ++---
 .../cli/pipeline/ActivatePipelineSubcommand.java   | 11 +--
 .../scm/cli/pipeline/ClosePipelineSubcommand.java  | 11 +--
 .../cli/pipeline/DeactivatePipelineSubcommand.java | 11 +--
 .../scm/cli/pipeline/ListPipelinesSubcommand.java  | 11 +--
 .../PipelineCommands.java} | 20 +++-
 12 files changed, 59 insertions(+), 77 deletions(-)
 copy 
hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/{ReplicationManagerCommands.java
 => container/ContainerCommands.java} (73%)
 copy 
hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/{ReplicationManagerCommands.java
 => pipeline/PipelineCommands.java} (73%)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org