[hadoop] branch trunk updated: HDDS-1404. Fix typos in HDDS. Contributed by bianqi.

2019-04-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c298415  HDDS-1404. Fix typos in HDDS. Contributed by bianqi.
c298415 is described below

commit c298415eb5c1922f9bd75dd6d0c4d90fb60c0ed3
Author: Nanda kumar 
AuthorDate: Fri Apr 12 14:12:10 2019 +0530

HDDS-1404. Fix typos in HDDS. Contributed by bianqi.
---
 hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto | 4 ++--
 .../common/src/main/proto/StorageContainerLocationProtocol.proto  | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 7396eb3..fd572ad 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -462,6 +462,6 @@ service XceiverClientProtocolService {
 }
 
 service IntraDatanodeProtocolService {
-  // An intradatanode service to copy the raw containerdata betwen nodes
+  // An intradatanode service to copy the raw container data between nodes
   rpc download (CopyContainerRequestProto) returns (stream 
CopyContainerResponseProto);
-}
\ No newline at end of file
+}
diff --git 
a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto 
b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
index d0f6c13..ade54a4 100644
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
@@ -34,7 +34,7 @@ import "hdds.proto";
 * Request send to SCM asking where the container should be created.
 */
 message ContainerRequestProto {
-  // Ozone only support replciation of either 1 or 3.
+  // Ozone only support replication of either 1 or 3.
   required ReplicationFactor replicationFactor = 2;
   required ReplicationType  replicationType = 3;
   required string owner = 4;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1416. MiniOzoneCluster should set custom value for hdds.datanode.replication.work.dir. Contributed by chencan.

2019-04-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4003849  HDDS-1416. MiniOzoneCluster should set custom value for 
hdds.datanode.replication.work.dir. Contributed by chencan.
4003849 is described below

commit 4003849fa48f7ba041c35901272744f9bd089724
Author: Nanda kumar 
AuthorDate: Fri Apr 12 14:45:57 2019 +0530

HDDS-1416. MiniOzoneCluster should set custom value for 
hdds.datanode.replication.work.dir.
Contributed by chencan.
---
 .../src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 8018bab..4cfc950 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -540,13 +540,18 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
 Path metaDir = Paths.get(datanodeBaseDir, "meta");
 Path dataDir = Paths.get(datanodeBaseDir, "data", "containers");
 Path ratisDir = Paths.get(datanodeBaseDir, "data", "ratis");
+Path wrokDir = Paths.get(datanodeBaseDir, "data", "replication",
+"work");
 Files.createDirectories(metaDir);
 Files.createDirectories(dataDir);
 Files.createDirectories(ratisDir);
+Files.createDirectories(wrokDir);
 dnConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
 dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, 
dataDir.toString());
 dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
 ratisDir.toString());
+dnConf.set(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR,
+wrokDir.toString());
 
 hddsDatanodes.add(
 HddsDatanodeService.createHddsDatanodeService(args, dnConf));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1363. ozone.metadata.dirs doesn't pick multiple dirs (#691)

2019-04-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3b08ac4  HDDS-1363. ozone.metadata.dirs doesn't pick multiple dirs 
(#691)
3b08ac4 is described below

commit 3b08ac46591e11323d8b1b1be742028bf5502bc0
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Fri Apr 12 11:30:08 2019 +0200

HDDS-1363. ozone.metadata.dirs doesn't pick multiple dirs (#691)
---
 .../java/org/apache/hadoop/hdds/HddsUtils.java |  24 
 .../org/apache/hadoop/hdds/scm/HddsServerUtil.java |  22 
 .../apache/hadoop/ozone/HddsDatanodeService.java   |   5 +-
 .../common/states/datanode/InitDatanodeState.java  |   4 +-
 .../org/apache/hadoop/hdds/server/ServerUtils.java |  25 ++---
 .../apache/hadoop/hdds/server/TestServerUtils.java | 123 +
 .../java/org/apache/hadoop/hdds/scm/ScmUtils.java  |  20 +---
 .../hadoop/hdds/scm/TestHddsServerUtils.java   |  49 
 .../apache/hadoop/ozone/web/utils/OzoneUtils.java  |  10 --
 .../org/apache/hadoop/ozone/recon/ReconUtils.java  |   2 +-
 10 files changed, 164 insertions(+), 120 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 2ca42d5..92ed9b6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -24,7 +24,6 @@ import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
-import java.nio.file.Paths;
 import java.util.Calendar;
 import java.util.Collection;
 import java.util.HashSet;
@@ -50,7 +49,6 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 
-import com.google.common.base.Strings;
 import com.google.common.net.HostAndPort;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
@@ -313,28 +311,6 @@ public final class HddsUtils {
 
 
   /**
-   * Get the path for datanode id file.
-   *
-   * @param conf - Configuration
-   * @return the path of datanode id as string
-   */
-  public static String getDatanodeIdFilePath(Configuration conf) {
-String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
-if (dataNodeIDPath == null) {
-  String metaPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS);
-  if (Strings.isNullOrEmpty(metaPath)) {
-// this means meta data is not found, in theory should not happen at
-// this point because should've failed earlier.
-throw new IllegalArgumentException("Unable to locate meta data" +
-"directory when getting datanode id path");
-  }
-  dataNodeIDPath = Paths.get(metaPath,
-  ScmConfigKeys.OZONE_SCM_DATANODE_ID_PATH_DEFAULT).toString();
-}
-return dataNodeIDPath;
-  }
-
-  /**
* Returns the hostname for this datanode. If the hostname is not
* explicitly configured in the given config, then it is determined
* via the DNS class.
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
index 3ff6e66..9d1880c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
@@ -372,4 +372,26 @@ public final class HddsServerUtil {
 File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf);
 return (new File(metaDirPath, "ratis")).getPath();
   }
+
+  /**
+   * Get the path for datanode id file.
+   *
+   * @param conf - Configuration
+   * @return the path of datanode id as string
+   */
+  public static String getDatanodeIdFilePath(Configuration conf) {
+String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
+if (dataNodeIDPath == null) {
+  File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf);
+  if (metaDirPath == null) {
+// this means meta data is not found, in theory should not happen at
+// this point because should've failed earlier.
+throw new IllegalArgumentException("Unable to locate meta data" +
+"directory when getting datanode id path");
+  }
+  dataNodeIDPath = new File(metaDirPath,
+  ScmConfigKeys.OZONE_SCM_DATANODE_ID_PATH_DEFAULT).toString();
+}
+return dataNodeIDPath;
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 

[hadoop] branch trunk updated: HDDS-1426. Minor logging improvements for MiniOzoneChaosCluster

2019-04-12 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fd676e1  HDDS-1426. Minor logging improvements for 
MiniOzoneChaosCluster
fd676e1 is described below

commit fd676e190a4b575121d55e49357d3d497456f10c
Author: Arpit Agarwal 
AuthorDate: Fri Apr 12 11:34:43 2019 +0200

HDDS-1426. Minor logging improvements for MiniOzoneChaosCluster

Closes #730
---
 hadoop-ozone/integration-test/src/test/bin/start-chaos.sh| 2 +-
 .../test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java | 9 +++--
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh 
b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
index 63e4a95..d98e9ca 100755
--- a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
+++ b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
@@ -15,7 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-date=`date +"%m-%d-%y-%T"`
+date=`date +"%Y-%m-%d--%H-%M-%S-%Z"`
 fileformat=".MiniOzoneChaosCluster.log"
 heapformat=".dump"
 current="/tmp/"
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 52a2d40..8bef479 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -87,11 +87,15 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
   }
 
   private void failNodes() {
-for (int i = 0; i < getNumberOfNodesToFail(); i++) {
+final int numNodesToFail = getNumberOfNodesToFail();
+LOG.info("Will restart {} nodes to simulate failure", numNodesToFail);
+for (int i = 0; i < numNodesToFail; i++) {
   boolean failureMode = isFastRestart();
   int failedNodeIndex = getNodeToFail();
   try {
+LOG.info("Restarting DataNodeIndex {}", failedNodeIndex);
 restartHddsDatanode(failedNodeIndex, failureMode);
+LOG.info("Completed restarting DataNodeIndex {}", failedNodeIndex);
   } catch (Exception e) {
 
   }
@@ -118,7 +122,8 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
   }
 
   void startChaos(long initialDelay, long period, TimeUnit timeUnit) {
-LOG.info("Starting Chaos with failure period:{} unit:{}", period, 
timeUnit);
+LOG.info("Starting Chaos with failure period:{} unit:{} numDataNodes:{}",
+period, timeUnit, numDatanodes);
 scheduledFuture = executorService.scheduleAtFixedRate(this::fail,
 initialDelay, period, timeUnit);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1421. Avoid unnecessary object allocations in TracingUtil

2019-04-12 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a9b46c5  HDDS-1421. Avoid unnecessary object allocations in TracingUtil
a9b46c5 is described below

commit a9b46c58fe101ddad372f3651081dc38e5f06d37
Author: Arpit Agarwal 
AuthorDate: Fri Apr 12 11:51:58 2019 +0200

HDDS-1421. Avoid unnecessary object allocations in TracingUtil

Closes #722
---
 .../java/org/apache/hadoop/hdds/tracing/TracingUtil.java | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
index c274625..8e82a37 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
@@ -34,6 +34,8 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
  */
 public final class TracingUtil {
 
+  private static final String NULL_SPAN_AS_STRING = "";
+
   private TracingUtil() {
   }
 
@@ -59,12 +61,13 @@ public final class TracingUtil {
* @return encoded tracing context.
*/
   public static String exportCurrentSpan() {
-StringBuilder builder = new StringBuilder();
 if (GlobalTracer.get().activeSpan() != null) {
+  StringBuilder builder = new StringBuilder();
   GlobalTracer.get().inject(GlobalTracer.get().activeSpan().context(),
   StringCodec.FORMAT, builder);
+  return builder.toString();
 }
-return builder.toString();
+return NULL_SPAN_AS_STRING;
   }
 
   /**
@@ -73,11 +76,12 @@ public final class TracingUtil {
* @return encoded tracing context.
*/
   public static String exportSpan(Span span) {
-StringBuilder builder = new StringBuilder();
 if (span != null) {
+  StringBuilder builder = new StringBuilder();
   GlobalTracer.get().inject(span.context(), StringCodec.FORMAT, builder);
+  return builder.toString();
 }
-return builder.toString();
+return NULL_SPAN_AS_STRING;
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1419. Fix shellcheck errors in start-chaos.sh

2019-04-12 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 75ba719  HDDS-1419. Fix shellcheck errors in start-chaos.sh
75ba719 is described below

commit 75ba719ddd3655df97130dd294d9536d7654a422
Author: Doroszlai, Attila 
AuthorDate: Thu Apr 11 16:37:17 2019 +0200

HDDS-1419. Fix shellcheck errors in start-chaos.sh

Closes #728
---
 .../integration-test/src/test/bin/start-chaos.sh | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh 
b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
index d98e9ca..5de6013 100755
--- a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
+++ b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
@@ -15,21 +15,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-date=`date +"%Y-%m-%d--%H-%M-%S-%Z"`
+date=$(date +"%Y-%m-%d--%H-%M-%S-%Z")
 fileformat=".MiniOzoneChaosCluster.log"
 heapformat=".dump"
 current="/tmp/"
-filename=$current$date$fileformat
-heapdumpfile=$current$date$heapformat
+filename="${current}${date}${fileformat}"
+heapdumpfile="${current}${date}${heapformat}"
 
-export MAVEN_OPTS="-XX:+HeapDumpOnOutOfMemoryError 
-XX:HeapDumpPath={$heapdumpfile}"
+export MAVEN_OPTS="-XX:+HeapDumpOnOutOfMemoryError 
-XX:HeapDumpPath=${heapdumpfile}"
 
-echo "logging to" ${filename}
-echo "heapdump to" ${heapdumpfile}
+echo "logging to ${filename}"
+echo "heapdump to ${heapdumpfile}"
 
 echo "Starting MiniOzoneChaosCluster"
-mvn clean install -DskipTests > ${filename} 2>&1
+mvn clean install -DskipTests > "${filename}" 2>&1
 mvn exec:java \
   -Dexec.mainClass="org.apache.hadoop.ozone.TestMiniChaosOzoneCluster" \
   -Dexec.classpathScope=test \
-  -Dexec.args="$*" >> ${filename} 2>&1
+  -Dexec.args="$*" >> "${filename}" 2>&1


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1420. Tracing exception in DataNode HddsDispatcher

2019-04-12 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new abace70  HDDS-1420. Tracing exception in DataNode HddsDispatcher
abace70 is described below

commit abace709cc0acf25ecd87fba3da5c99e627101b4
Author: Arpit Agarwal 
AuthorDate: Fri Apr 12 12:21:10 2019 +0200

HDDS-1420. Tracing exception in DataNode HddsDispatcher

Closes #723
---
 .../commandhandler/CloseContainerCommandHandler.java |  2 --
 .../ozone/container/common/impl/TestHddsDispatcher.java  |  1 -
 .../commandhandler/TestCloseContainerCommandHandler.java |  1 -
 .../ozone/client/rpc/TestContainerStateMachineFailures.java  |  2 --
 .../apache/hadoop/ozone/container/ContainerTestHelper.java   | 12 
 5 files changed, 18 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
index ac9d24e..d4c3ff7 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
@@ -39,7 +39,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.UUID;
 
 /**
  * Handler for close container command received from SCM.
@@ -138,7 +137,6 @@ public class CloseContainerCommandHandler implements 
CommandHandler {
 command.setContainerID(containerId);
 command.setCloseContainer(
 ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
-command.setTraceID(UUID.randomUUID().toString());
 command.setDatanodeUuid(datanodeDetails.getUuidString());
 return command.build();
   }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index 6e37b4c..d425820 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -266,7 +266,6 @@ public class TestHddsDispatcher {
 .newBuilder()
 .setContainerID(containerId)
 .setCmdType(ContainerProtos.Type.WriteChunk)
-.setTraceID(UUID.randomUUID().toString())
 .setDatanodeUuid(datanodeId)
 .setWriteChunk(writeChunkRequest)
 .build();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index 731e74c..7962cb2 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -305,7 +305,6 @@ public class TestCloseContainerCommandHandler {
 request.setContainerID(containerId.getId());
 request.setCreateContainer(
 ContainerProtos.CreateContainerRequestProto.getDefaultInstance());
-request.setTraceID(UUID.randomUUID().toString());
 request.setDatanodeUuid(datanodeDetails.getUuidString());
 ozoneContainer.getWriteChannel().submitRequest(
 request.build(), pipelineID.getProtobuf());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index a8b7295..5739d48 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -43,7 +43,6 @@ import java.io.File;
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.
@@ -181,7 +180,6 @@ public class TestContainerStateMachineFailures {
 request.setContainerID(con

[hadoop] branch trunk updated: HDDS-1285. Implement actions need to be taken after chill mode exit wait time. (#612)

2019-04-12 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a458c5b  HDDS-1285. Implement actions need to be taken after chill 
mode exit wait time. (#612)
a458c5b is described below

commit a458c5bd965d88cc8c786caf4f44a2cc7ada2306
Author: Bharat Viswanadham 
AuthorDate: Fri Apr 12 09:40:27 2019 -0700

HDDS-1285. Implement actions need to be taken after chill mode exit wait 
time. (#612)
---
 .../hdds/scm/chillmode/ChillModeHandler.java   |  32 +++-
 .../hdds/scm/server/StorageContainerManager.java   |  13 +-
 .../hdds/scm/chillmode/TestChillModeHandler.java   |   6 +-
 .../scm/server/TestSCMClientProtocolServer.java|   6 +-
 .../TestSCMChillModeWithPipelineRules.java | 202 +
 .../TestContainerStateManagerIntegration.java  |   2 +-
 .../hadoop/hdds/scm/pipeline/TestSCMRestart.java   |   2 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |   6 +-
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |   6 +-
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |   2 +-
 .../apache/hadoop/ozone/om/TestScmChillMode.java   |   2 +-
 11 files changed, 267 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeHandler.java
index ae41821..fcad6e5 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeHandler.java
@@ -21,11 +21,17 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.scm.block.BlockManager;
 import org.apache.hadoop.hdds.scm.container.ReplicationManager;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
 import 
org.apache.hadoop.hdds.scm.chillmode.SCMChillModeManager.ChillModeStatus;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.util.List;
 import java.util.Objects;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -36,12 +42,16 @@ import java.util.concurrent.atomic.AtomicBoolean;
  */
 public class ChillModeHandler implements EventHandler {
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ChillModeHandler.class);
+
   private final SCMClientProtocolServer scmClientProtocolServer;
   private final BlockManager scmBlockManager;
   private final long waitTime;
   private final AtomicBoolean isInChillMode = new AtomicBoolean(true);
   private final ReplicationManager replicationManager;
 
+  private final PipelineManager scmPipelineManager;
 
   /**
* ChillModeHandler, to handle the logic once we exit chill mode.
@@ -53,13 +63,15 @@ public class ChillModeHandler implements 
EventHandler {
   public ChillModeHandler(Configuration configuration,
   SCMClientProtocolServer clientProtocolServer,
   BlockManager blockManager,
-  ReplicationManager replicationManager) {
+  ReplicationManager replicationManager, PipelineManager pipelineManager) {
 Objects.requireNonNull(configuration, "Configuration cannot be null");
 Objects.requireNonNull(clientProtocolServer, "SCMClientProtocolServer " +
 "object cannot be null");
 Objects.requireNonNull(blockManager, "BlockManager object cannot be null");
 Objects.requireNonNull(replicationManager, "ReplicationManager " +
 "object cannot be null");
+Objects.requireNonNull(pipelineManager, "PipelineManager object cannot " +
+"be" + "null");
 this.waitTime = configuration.getTimeDuration(
 HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_CHILL_MODE_EXIT,
 HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_CHILL_MODE_EXIT_DEFAULT,
@@ -67,6 +79,7 @@ public class ChillModeHandler implements 
EventHandler {
 this.scmClientProtocolServer = clientProtocolServer;
 this.scmBlockManager = blockManager;
 this.replicationManager = replicationManager;
+this.scmPipelineManager = pipelineManager;
 
 final boolean chillModeEnabled = configuration.getBoolean(
 HddsConfigKeys.HDDS_SCM_CHILLMODE_ENABLED,
@@ -75,6 +88,8 @@ public class ChillModeHandler implements 
EventHandler {
 
   }
 
+
+
   /**
* Set ChillMode status based on
* {@link org.apache.hadoop.hdds.scm.events.SCMEvents#CHILL_MODE_STATUS}.
@@ -101,6 +116,7 @@ public class ChillModeHandler implements 
EventHandler {
   Thread.currentThread().

[hadoop] branch trunk updated: HDDS-1425. Ozone compose files are not compatible with the latest docker-compose. (#727)

2019-04-12 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0c1fec3  HDDS-1425. Ozone compose files are not compatible with the 
latest docker-compose. (#727)
0c1fec3 is described below

commit 0c1fec34f4b0980920b6357e81acd9d8a7fb3a94
Author: Elek, Márton 
AuthorDate: Fri Apr 12 18:45:36 2019 +0200

HDDS-1425. Ozone compose files are not compatible with the latest 
docker-compose. (#727)
---
 hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config | 2 +-
 hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config| 2 +-
 hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config| 2 +-
 hadoop-ozone/dist/src/main/compose/ozone/docker-config  | 2 +-
 hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config  | 2 +-
 hadoop-ozone/dist/src/main/compose/ozones3/docker-config| 2 +-
 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config | 2 +-
 hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config| 2 +-
 hadoop-ozone/dist/src/main/compose/ozonetrace/docker-config | 2 +-
 9 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
index ffce1a5..4fdb0cb 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
@@ -61,7 +61,7 @@ LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
 LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
 LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log
 
LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
 LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
index f699864..f7ebcde 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
@@ -61,7 +61,7 @@ LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
 LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
 LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log
 
LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
 LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config
index f00ace1..2493cae 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config
@@ -60,7 +60,7 @@ LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
 LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
 LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log
 
LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
 LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
index 6ba54b1..8f28967 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
@@ -58,7 +58,7 @@ LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
 LOG4J2.PROPE

[hadoop] branch trunk updated: HDDS-1428. Remove benign warning in handleCreateContainer. Contributed by Siddharth Wagle.

2019-04-12 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 62f4808  HDDS-1428. Remove benign warning in handleCreateContainer. 
Contributed by Siddharth Wagle.
62f4808 is described below

commit 62f4808617a354b8f4f803cdb8915c179b7210be
Author: Arpit Agarwal 
AuthorDate: Fri Apr 12 15:08:01 2019 -0700

HDDS-1428. Remove benign warning in handleCreateContainer. Contributed by 
Siddharth Wagle.
---
 .../org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index d2d7bf7..531fb02 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -251,7 +251,7 @@ public class KeyValueHandler extends Handler {
 // The create container request for an already existing container can
 // arrive in case the ContainerStateMachine reapplies the transaction
 // on datanode restart. Just log a warning msg here.
-LOG.warn("Container already exists." +
+LOG.debug("Container already exists." +
 "container Id " + containerID);
   }
 } catch (StorageContainerException ex) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1387. ConcurrentModificationException in TestMiniChaosOzoneCluster. Contributed by Marton Elek. (#732)

2019-04-12 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5379d85  HDDS-1387. ConcurrentModificationException in 
TestMiniChaosOzoneCluster. Contributed by Marton Elek. (#732)
5379d85 is described below

commit 5379d85d8ed09b35e293239d3a7f96f8f98c411c
Author: Elek, Márton 
AuthorDate: Sat Apr 13 00:19:50 2019 +0200

HDDS-1387. ConcurrentModificationException in TestMiniChaosOzoneCluster. 
Contributed by Marton Elek. (#732)
---
 .../src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 8bef479..059af5a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -136,11 +136,13 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
   }
 
   public void shutdown() {
-super.shutdown();
 try {
   stopChaos();
   executorService.shutdown();
   executorService.awaitTermination(1, TimeUnit.DAYS);
+  //this should be called after stopChaos to be sure that the
+  //datanode collection is not modified during the shutdown
+  super.shutdown();
 } catch (Exception e) {
   LOG.error("failed to shutdown MiniOzoneChaosCluster", e);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-13699. Add DFSClient sending handshake token to DataNode, and allow DataNode overwrite downstream QOP. Contributed by Chen Liang.

2019-04-12 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 626fec6  HDFS-13699. Add DFSClient sending handshake token to 
DataNode, and allow DataNode overwrite downstream QOP. Contributed by Chen 
Liang.
626fec6 is described below

commit 626fec652b9f3dae10c9af78fd220b1240f19fc7
Author: Chen Liang 
AuthorDate: Fri Apr 12 17:37:51 2019 -0700

HDFS-13699. Add DFSClient sending handshake token to DataNode, and allow 
DataNode overwrite downstream QOP. Contributed by Chen Liang.
---
 .../hadoop/security/token/SecretManager.java   |   2 +-
 .../hadoop/hdfs/client/HdfsClientConfigKeys.java   |   3 +
 .../datatransfer/sasl/DataTransferSaslUtil.java|  72 +++
 .../datatransfer/sasl/SaslDataTransferClient.java  | 117 +--
 .../src/main/proto/datatransfer.proto  |   6 +
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   4 +
 .../datatransfer/sasl/SaslDataTransferServer.java  |  53 -
 .../apache/hadoop/hdfs/server/datanode/DNConf.java |   6 +
 .../hadoop/hdfs/server/datanode/DataNode.java  |   7 +-
 .../hadoop/hdfs/server/datanode/DataXceiver.java   |  14 +-
 .../src/main/resources/hdfs-default.xml|  22 +++
 .../apache/hadoop/hdfs/TestHAAuxiliaryPort.java|   2 +-
 .../apache/hadoop/hdfs/TestMultipleNNPortQOP.java  | 219 +
 .../datanode/TestDataXceiverBackwardsCompat.java   |   2 +-
 14 files changed, 501 insertions(+), 28 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
index 798c8c9..514806d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
@@ -167,7 +167,7 @@ public abstract class SecretManager {
* @param key the secret key
* @return the bytes of the generated password
*/
-  protected static byte[] createPassword(byte[] identifier, 
+  public static byte[] createPassword(byte[] identifier,
  SecretKey key) {
 Mac mac = threadLocalMac.get();
 try {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index fae0c52..97a8472 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -157,6 +157,9 @@ public interface HdfsClientConfigKeys {
   String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY =
   "dfs.encrypt.data.transfer.cipher.suites";
 
+  String DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_NEW_QOP_KEY =
+  "dfs.encrypt.data.overwrite.downstream.new.qop";
+
   String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
   String DFS_DATA_TRANSFER_PROTECTION_DEFAULT = "";
   String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
index f4651eb..666a29f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
@@ -49,6 +49,7 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus;
+import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.security.SaslPropertiesResolver;
@@ -249,6 +250,51 @@ public final class DataTransferSaslUtil {
 }
   }
 
+  static class SaslMessageWithHandshake {
+private final byte[] payload;
+private final byte[] secret;
+private final String bpid;
+
+SaslMessageWithHandshake(byte[] payload, byte[] secret, String bpid) {

[hadoop] branch trunk updated: HADOOP-14747. S3AInputStream to implement CanUnbuffer.

2019-04-12 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2382f63  HADOOP-14747. S3AInputStream to implement CanUnbuffer.
2382f63 is described below

commit 2382f63fc0bb4108f3f3c542b4be7c04fbedd7c4
Author: Sahil Takiar 
AuthorDate: Fri Apr 12 18:09:14 2019 -0700

HADOOP-14747. S3AInputStream to implement CanUnbuffer.

Author:Sahil Takiar 
---
 .../site/markdown/filesystem/fsdatainputstream.md  |  37 ++
 .../fs/contract/AbstractContractUnbufferTest.java  | 125 +
 .../apache/hadoop/fs/contract/ContractOptions.java |   5 +
 .../fs/contract/hdfs/TestHDFSContractUnbuffer.java |  46 
 .../src/test/resources/contract/hdfs.xml   |   5 +
 .../org/apache/hadoop/fs/s3a/S3AInputStream.java   |  39 +--
 .../fs/contract/s3a/ITestS3AContractUnbuffer.java  |  41 +++
 .../org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java |  66 +++
 .../org/apache/hadoop/fs/s3a/TestS3AUnbuffer.java  |  76 +
 .../hadoop-aws/src/test/resources/contract/s3a.xml |   5 +
 10 files changed, 438 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
index e067b07..0906964 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstream.md
@@ -275,6 +275,43 @@ class, which can react to a checksum error in a read by 
attempting to source
 the data elsewhere. If a new source can be found it attempts to reread and
 recheck that portion of the file.
 
+### `CanUnbuffer.unbuffer()`
+
+This operation instructs the source to release any system resources they are
+currently holding on to, such as buffers, sockets, file descriptors, etc. Any
+subsequent IO operation will likely have to reacquire these resources.
+Unbuffering is useful in situation where streams need to remain open, but no IO
+operation is expected from the stream in the immediate future (examples include
+file handle cacheing).
+
+ Preconditions
+
+Not all subclasses implement this operation. In addition to implementing
+`CanUnbuffer`. Subclasses must implement the `StreamCapabilities` interface and
+`StreamCapabilities.hasCapability(UNBUFFER)` must return true. If a subclass
+implements `CanUnbuffer` but does not report the functionality via
+`StreamCapabilities` then the call to `unbuffer` does nothing. If a subclass
+reports that it does implement `UNBUFFER`, but does not implement the
+`CanUnbuffer` interface, an `UnsupportedOperationException` is thrown.
+
+supported(FSDIS, StreamCapabilities.hasCapability && 
FSDIS.hasCapability(UNBUFFER) && CanUnbuffer.unbuffer)
+
+This method is not thread-safe. If `unbuffer` is called while a `read` is in
+progress, the outcome is undefined.
+
+`unbuffer` can be called on a closed file, in which case `unbuffer` will do
+nothing.
+
+ Postconditions
+
+The majority of subclasses that do not implement this operation simply
+do nothing.
+
+If the operation is supported, `unbuffer` releases any and all system resources
+associated with the stream. The exact list of what these resources are is
+generally implementation dependent, however, in general, it may include
+buffers, sockets, file descriptors, etc.
+
 ##  interface `PositionedReadable`
 
 The `PositionedReadable` operations supply "positioned reads" ("pread").
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java
new file mode 100644
index 000..7ba32ba
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apa

[hadoop] branch trunk updated: HADOOP-16237. Fix new findbugs issues after updating guava to 27.0-jre.

2019-04-12 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1943db5  HADOOP-16237. Fix new findbugs issues after updating guava to 
27.0-jre.
1943db5 is described below

commit 1943db557124439f9f41c18a618455ccf4c3e6cc
Author: Gabor Bota 
AuthorDate: Fri Apr 12 18:27:41 2019 -0700

HADOOP-16237. Fix new findbugs issues after updating guava to 27.0-jre.

Author:Gabor Bota 
---
 .../hadoop-kms/dev-support/findbugsExcludeFile.xml |  8 +
 .../hadoop-yarn/dev-support/findbugs-exclude.xml   | 37 ++
 .../document/entity/TimelineEntityDocument.java| 22 +
 .../document/flowrun/FlowRunDocument.java  | 13 +---
 .../cosmosdb/CosmosDBDocumentStoreReader.java  |  2 +-
 .../cosmosdb/CosmosDBDocumentStoreWriter.java  |  2 +-
 6 files changed, 71 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
index f864c03..356189b 100644
--- a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
@@ -15,6 +15,14 @@
limitations under the License.
 -->
 
+
+  
+  
+
+
+
+  
+
   
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index dd42129..eaab7e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -660,4 +660,41 @@
 
   
 
+  
+  
+
+
+
+  
+
+  
+  
+
+
+
+  
+
+  
+  
+
+
+
+  
+
+  
+  
+
+
+
+
+  
+
+  
+  
+
+
+
+
+  
+
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/collection/document/entity/TimelineEntityDocument.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/collection/document/entity/TimelineEntityDocument.java
index ea72ee3..a0b42eb 100755
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/collection/document/entity/TimelineEntityDocument.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/collection/document/entity/TimelineEntityDocument.java
@@ -138,14 +138,18 @@ public class TimelineEntityDocument implements
   }
 
   public void setMetrics(Map> metrics) {
-for (String metricId : metrics.keySet()) {
-  for(TimelineMetricSubDoc metricSubDoc : metrics.get(metricId)) {
+for (Map.Entry> metricEntry :
+metrics.entrySet()) {
+  final String metricId = metricEntry.getKey();
+  final Set metricValue = metricEntry.getValue();
+
+  for(TimelineMetricSubDoc metricSubDoc : metricValue) {
 timelineEntity.addMetric(metricSubDoc.fetchTimelineMetric());
   }
   if (this.metrics.containsKey(metricId)) {
-this.metrics.get(metricId).addAll(metrics.get(metricId));
+this.metrics.get(metricId).addAll(metricValue);
   } else {
-this.metrics.put(metricId, new HashSet<>(metrics.get(metricId)));
+this.metrics.put(metricId, new HashSet<>(metricValue));
   }
 }
   }
@@ -155,14 +159,18 @@ public class TimelineEntityDocument implements
   }
 
   public void setEvents(Map> events) {
-for (String eventId : events.keySet()) {
-  for(TimelineEventSubDoc eventSubDoc: events.get(eventId)) {
+for (Map.Entry> eventEntry :
+events.entrySet()) {
+  final String eventId = eventEntry.getKey();
+  final Set eventValue = eventEntry.getValue();
+
+  for(TimelineEventSubDoc eventSubDoc : eventValue) {
 timelineEntity.addEvent(eventSubDoc.fetchTimelineEvent());
   }
   if (this.events.containsKey(eventId)) {
 this.events.get(eventId).addAll(events.get(eventId));
   } else {
-this.events.put(eventId, new HashSet<>(events.get(eventId)));
+this.events.put(eventId, new HashSet<>(eventValue));
   }
 }
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/collection/document/flowrun/FlowRunDocument.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-s