[hadoop] branch trunk updated: YARN-9800. TestRMDelegationTokens can fail in testRemoveExpiredMasterKeyInRMStateStore. Contributed by Adam Antal.

2019-08-30 Thread abmodi
This is an automated email from the ASF dual-hosted git repository.

abmodi pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7d998cb  YARN-9800. TestRMDelegationTokens can fail in 
testRemoveExpiredMasterKeyInRMStateStore. Contributed by Adam Antal.
7d998cb is described below

commit 7d998cb6278ad5963ef4e2a384b81ee19ff0a489
Author: Abhishek Modi 
AuthorDate: Sat Aug 31 10:03:09 2019 +0530

YARN-9800. TestRMDelegationTokens can fail in 
testRemoveExpiredMasterKeyInRMStateStore. Contributed by Adam Antal.
---
 .../security/TestRMDelegationTokens.java   | 33 +-
 1 file changed, 19 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
index c660f9b..94bf3ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
@@ -69,6 +69,22 @@ public class TestRMDelegationTokens {
 UserGroupInformation.setConfiguration(testConf);
   }
 
+  private static void assertMasterKeysAreSaved(
+  Set rmDTMasterKeyState,
+  RMDelegationTokenSecretManager dtSecretManager) {
+dtSecretManager.getAllMasterKeys().forEach(managerKey -> {
+  int keyId = managerKey.getKeyId();
+  boolean found = false;
+  for (DelegationKey stateKey: rmDTMasterKeyState) {
+if (stateKey.getKeyId() == keyId) {
+  found = true;
+  break;
+}
+  }
+  Assert.assertTrue("Master key not found: " + keyId, found);
+});
+  }
+
   // Test the DT mast key in the state-store when the mast key is being rolled.
   @Test(timeout = 15000)
   public void testRMDTMasterKeyStateOnRollingMasterKey() throws Exception {
@@ -96,18 +112,8 @@ public class TestRMDelegationTokens {
 
 RMDelegationTokenSecretManager dtSecretManager =
 rm1.getRMContext().getRMDelegationTokenSecretManager();
-// assert all master keys are saved
-dtSecretManager.getAllMasterKeys().forEach(managerKey -> {
-  int keyId = managerKey.getKeyId();
-  boolean found = false;
-  for (DelegationKey stateKey: rmDTMasterKeyState) {
-if (stateKey.getKeyId() == keyId) {
-  found = true;
-  break;
-}
-  }
-  Assert.assertTrue("Master key not found: " + keyId, found);
-});
+
+assertMasterKeysAreSaved(rmDTMasterKeyState, dtSecretManager);
 
 // request to generate a RMDelegationToken
 GetDelegationTokenRequest request = mock(GetDelegationTokenRequest.class);
@@ -154,8 +160,7 @@ public class TestRMDelegationTokens {
 RMDelegationTokenSecretManager dtSecretManager =
 rm1.getRMContext().getRMDelegationTokenSecretManager();
 
-// assert all master keys are saved
-Assert.assertEquals(dtSecretManager.getAllMasterKeys(), 
rmDTMasterKeyState);
+assertMasterKeysAreSaved(rmDTMasterKeyState, dtSecretManager);
 Set expiringKeys = new HashSet();
 expiringKeys.addAll(dtSecretManager.getAllMasterKeys());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9798. ApplicationMasterServiceTestBase#testRepeatedFinishApplicationMaster fails intermittently. Contributed by Tao Yang.

2019-08-30 Thread abmodi
This is an automated email from the ASF dual-hosted git repository.

abmodi pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new eeccd2f  YARN-9798. 
ApplicationMasterServiceTestBase#testRepeatedFinishApplicationMaster fails 
intermittently. Contributed by Tao Yang.
eeccd2f is described below

commit eeccd2f6f67f3eeeaa775f47a6a714ce575fcc19
Author: Abhishek Modi 
AuthorDate: Sat Aug 31 09:57:45 2019 +0530

YARN-9798. 
ApplicationMasterServiceTestBase#testRepeatedFinishApplicationMaster fails 
intermittently. Contributed by Tao Yang.
---
 .../yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java| 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java
index 0b713e7..80dbb84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java
@@ -382,6 +382,7 @@ public abstract class ApplicationMasterServiceTestBase {
   for (int i = 0; i < 10; i++) {
 am1.unregisterAppAttempt(req, false);
   }
+  rm.drainEvents();
   Assert.assertEquals("Expecting only one event", 1,
   dispatcher.getEventCount());
 } finally {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1413. Attempt to fix TestCloseContainerCommandHandler by adjusting timeouts

2019-08-30 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a2d083f  HDDS-1413. Attempt to fix TestCloseContainerCommandHandler by 
adjusting timeouts
a2d083f is described below

commit a2d083f2c546ef9e0a543ea287c2435c6440d9aa
Author: Doroszlai, Attila 
AuthorDate: Thu Aug 29 18:01:21 2019 +0200

HDDS-1413. Attempt to fix TestCloseContainerCommandHandler by adjusting 
timeouts

Signed-off-by: Anu Engineer 
---
 .../TestCloseContainerCommandHandler.java  | 410 +++--
 1 file changed, 143 insertions(+), 267 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index 219b504..84a1e5d 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -16,306 +16,187 @@
  */
 package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.statemachine
 .DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.util.TimeDuration;
-import org.junit.AfterClass;
-import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
 
-import java.io.File;
 import java.io.IOException;
-import java.util.Collections;
-import java.util.Random;
 import java.util.UUID;
-import java.util.concurrent.TimeUnit;
+
+import static java.util.Collections.singletonMap;
+import static org.apache.hadoop.ozone.OzoneConsts.GB;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 /**
  * Test cases to verify CloseContainerCommandHandler in datanode.
  */
 public class TestCloseContainerCommandHandler {
 
-  private final StateContext context = Mockito.mock(StateContext.class);
-  private final Random random = new Random();
-  private static File testDir;
+  private static final long CONTAINER_ID = 123L;
+
+  private OzoneContainer ozoneContainer;
+  private StateContext context;
+  private XceiverServerSpi writeChannel;
+  private Container container;
+  private Handler containerHandler;
+  private PipelineID pipelineID;
+  private PipelineID nonExistentPipelineID = PipelineID.randomId();
+
+  private CloseContainerCommandHandler subject =
+  new CloseContainerCommandHandler();
+
+  @Before
+  public void before() throws Exception {
+context = mock(StateContext.class);
+DatanodeStateMachine dnStateMachine = mock(DatanodeStateMachine.class);
+when(dnStateMachine.getDatanodeDetails())
+.thenReturn(randomDatanodeDetails());
+when(context.getParent()).thenReturn(d

[hadoop] branch branch-2 updated: YARN-9162. Fix TestRMAdminCLI#testHelp. Contributed by Ayush Saxena.

2019-08-30 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 758085a  YARN-9162. Fix TestRMAdminCLI#testHelp. Contributed by Ayush 
Saxena.
758085a is described below

commit 758085a87ea304349b22d6a28a015b831c3e1a56
Author: Akira Ajisaka 
AuthorDate: Mon Jan 7 17:32:28 2019 +0900

YARN-9162. Fix TestRMAdminCLI#testHelp. Contributed by Ayush Saxena.

(cherry picked from commit 5db7c4906235dd1070b9706ca4971f1eb5d74139)
(cherry picked from commit a453f38015fec454f63e9f5459b0330ac3bc7eb3)
(cherry picked from commit 3c9d2f53175e11ae12fe5857289980ee612124a8)
---
 .../src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 24c1da0..fdd5513 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -182,7 +182,8 @@ public class RMAdminCLI extends HAAdmin {
   private static void appendHAUsage(final StringBuilder usageBuilder) {
 for (Map.Entry cmdEntry : USAGE.entrySet()) {
   if (cmdEntry.getKey().equals("-help")
-  || cmdEntry.getKey().equals("-failover")) {
+  || cmdEntry.getKey().equals("-failover")
+  || cmdEntry.getKey().equals("-transitionToObserver")) {
 continue;
   }
   UsageInfo usageInfo = cmdEntry.getValue();
@@ -296,7 +297,8 @@ public class RMAdminCLI extends HAAdmin {
 }
 if (isHAEnabled) {
   for (String cmdKey : USAGE.keySet()) {
-if (!cmdKey.equals("-help") && !cmdKey.equals("-failover")) {
+if (!cmdKey.equals("-help") && !cmdKey.equals("-failover")
+&& !cmdKey.equals("-transitionToObserver")) {
   buildHelpMsg(cmdKey, helpBuilder);
   helpBuilder.append("\n");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9162. Fix TestRMAdminCLI#testHelp. Contributed by Ayush Saxena.

2019-08-30 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 3c9d2f5  YARN-9162. Fix TestRMAdminCLI#testHelp. Contributed by Ayush 
Saxena.
3c9d2f5 is described below

commit 3c9d2f53175e11ae12fe5857289980ee612124a8
Author: Akira Ajisaka 
AuthorDate: Mon Jan 7 17:32:28 2019 +0900

YARN-9162. Fix TestRMAdminCLI#testHelp. Contributed by Ayush Saxena.

(cherry picked from commit 5db7c4906235dd1070b9706ca4971f1eb5d74139)
(cherry picked from commit a453f38015fec454f63e9f5459b0330ac3bc7eb3)
---
 .../src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 027a786..c7d1423 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -190,7 +190,8 @@ public class RMAdminCLI extends HAAdmin {
   private static void appendHAUsage(final StringBuilder usageBuilder) {
 for (Map.Entry cmdEntry : USAGE.entrySet()) {
   if (cmdEntry.getKey().equals("-help")
-  || cmdEntry.getKey().equals("-failover")) {
+  || cmdEntry.getKey().equals("-failover")
+  || cmdEntry.getKey().equals("-transitionToObserver")) {
 continue;
   }
   UsageInfo usageInfo = cmdEntry.getValue();
@@ -305,7 +306,8 @@ public class RMAdminCLI extends HAAdmin {
 }
 if (isHAEnabled) {
   for (String cmdKey : USAGE.keySet()) {
-if (!cmdKey.equals("-help") && !cmdKey.equals("-failover")) {
+if (!cmdKey.equals("-help") && !cmdKey.equals("-failover")
+&& !cmdKey.equals("-transitionToObserver")) {
   buildHelpMsg(cmdKey, helpBuilder);
   helpBuilder.append("\n");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9162. Fix TestRMAdminCLI#testHelp. Contributed by Ayush Saxena.

2019-08-30 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new a453f38  YARN-9162. Fix TestRMAdminCLI#testHelp. Contributed by Ayush 
Saxena.
a453f38 is described below

commit a453f38015fec454f63e9f5459b0330ac3bc7eb3
Author: Akira Ajisaka 
AuthorDate: Mon Jan 7 17:32:28 2019 +0900

YARN-9162. Fix TestRMAdminCLI#testHelp. Contributed by Ayush Saxena.

(cherry picked from commit 5db7c4906235dd1070b9706ca4971f1eb5d74139)
---
 .../src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 068eaa8..356a8ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -187,7 +187,8 @@ public class RMAdminCLI extends HAAdmin {
   private static void appendHAUsage(final StringBuilder usageBuilder) {
 for (Map.Entry cmdEntry : USAGE.entrySet()) {
   if (cmdEntry.getKey().equals("-help")
-  || cmdEntry.getKey().equals("-failover")) {
+  || cmdEntry.getKey().equals("-failover")
+  || cmdEntry.getKey().equals("-transitionToObserver")) {
 continue;
   }
   UsageInfo usageInfo = cmdEntry.getValue();
@@ -302,7 +303,8 @@ public class RMAdminCLI extends HAAdmin {
 }
 if (isHAEnabled) {
   for (String cmdKey : USAGE.keySet()) {
-if (!cmdKey.equals("-help") && !cmdKey.equals("-failover")) {
+if (!cmdKey.equals("-help") && !cmdKey.equals("-failover")
+&& !cmdKey.equals("-transitionToObserver")) {
   buildHelpMsg(cmdKey, helpBuilder);
   helpBuilder.append("\n");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2042. Avoid log on console with Ozone shell

2019-08-30 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c4411f7  HDDS-2042. Avoid log on console with Ozone shell
c4411f7 is described below

commit c4411f7fdf745eefac32749dad4388635a0a9aae
Author: Doroszlai, Attila 
AuthorDate: Tue Aug 27 15:55:47 2019 +0200

HDDS-2042. Avoid log on console with Ozone shell

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/common/src/main/bin/ozone |  3 ++
 .../dist/dev-support/bin/dist-layout-stitching |  1 +
 .../src/main/conf/ozone-shell-log4j.properties | 33 ++
 .../src/main/smoketest/basic/ozone-shell.robot |  1 -
 .../dist/src/main/smoketest/createbucketenv.robot  |  1 -
 .../dist/src/main/smoketest/createmrenv.robot  |  1 -
 6 files changed, 37 insertions(+), 3 deletions(-)

diff --git a/hadoop-ozone/common/src/main/bin/ozone 
b/hadoop-ozone/common/src/main/bin/ozone
index e8cda82..47258d2 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -118,6 +118,8 @@ function ozonecmd_case
 ;;
 freon)
   HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.Freon
+  OZONE_FREON_OPTS="${OZONE_FREON_OPTS} -Dhadoop.log.file=ozone-freon.log 
-Dlog4j.configuration=file:${HADOOP_CONF_DIR}/ozone-shell-log4j.properties"
+  HADOOP_OPTS="${HADOOP_OPTS} ${OZONE_FREON_OPTS}"
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
 ;;
 genesis)
@@ -137,6 +139,7 @@ function ozonecmd_case
 ;;
 sh | shell)
   HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.OzoneShell
+  HDFS_OM_SH_OPTS="${HDFS_OM_SH_OPTS} -Dhadoop.log.file=ozone-shell.log 
-Dlog4j.configuration=file:${HADOOP_CONF_DIR}/ozone-shell-log4j.properties"
   HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_OM_SH_OPTS}"
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager"
 ;;
diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching 
b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
index 97acc54..00b1b9a 100755
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
@@ -90,6 +90,7 @@ run cp -r "${ROOT}/hadoop-hdds/common/src/main/conf/" 
"etc/hadoop"
 run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties" 
"etc/hadoop"
 run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties" 
"etc/hadoop"
 run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties" 
"etc/hadoop"
+run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties" 
"etc/hadoop"
 run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-site.xml" "etc/hadoop"
 run cp -f "${ROOT}/hadoop-ozone/dist/src/main/conf/log4j.properties" 
"etc/hadoop"
 run cp 
"${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-default.xml" 
"etc/hadoop"
diff --git a/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties 
b/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties
new file mode 100644
index 000..e8f5f2d
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.log.dir=.
+hadoop.log.file=ozone-shell.log
+
+log4j.rootLogger=INFO,FILE
+
+log4j.threshold=ALL
+
+log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.FILE.file=${hadoop.log.dir}/${hadoop.log.file}
+log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{1}:%L - 
%m%n
+
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
+log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot 
b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
index 044ae71..689e4af 100644
--- a/hadoop-ozone/dist

[hadoop] branch branch-2 updated: HDFS-14726. Fix JN incompatibility issue in branch-2 due to backport of HDFS-10519. Contributed by Chen Liang.

2019-08-30 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new f05d87b  HDFS-14726. Fix JN incompatibility issue in branch-2 due to 
backport of HDFS-10519. Contributed by Chen Liang.
f05d87b is described below

commit f05d87b32e5cda2d4d157342c0ec570e12438441
Author: Chen Liang 
AuthorDate: Fri Aug 30 15:55:35 2019 -0700

HDFS-14726. Fix JN incompatibility issue in branch-2 due to backport of 
HDFS-10519. Contributed by Chen Liang.
---
 .../org/apache/hadoop/hdfs/protocolPB/PBHelper.java  | 10 --
 .../hdfs/qjournal/client/QuorumJournalManager.java   | 20 +++-
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto  |  2 +-
 3 files changed, 24 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index fa54330..478a4cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -76,6 +76,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockKey;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@@ -292,8 +293,13 @@ public class PBHelper {
 for (RemoteEditLogProto l : manifest.getLogsList()) {
   logs.add(convert(l));
 }
-return new RemoteEditLogManifest(logs,
-manifest.getCommittedTxnId());
+long committedId = HdfsServerConstants.INVALID_TXID;
+if (manifest.hasCommittedTxnId()) {
+  // An older version JN may not have this field, in which case committedId
+  // is set to INVALID_TXID.
+  committedId = manifest.getCommittedTxnId();
+}
+return new RemoteEditLogManifest(logs, committedId);
   }
 
   public static CheckpointCommandProto convert(CheckpointCommand cmd) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index b545fb2..14e63bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -44,6 +44,7 @@ import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJourna
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
@@ -632,13 +633,22 @@ public class QuorumJournalManager implements 
JournalManager {
 
 // If it's bounded by durable Txns, endTxId could not be larger
 // than committedTxnId. This ensures the consistency.
+// An older version JN may not return the field committedTxnId,
+// in which case it is set to INVALID_TXID.
 if (onlyDurableTxns && inProgressOk) {
-  endTxId = Math.min(endTxId, committedTxnId);
-  if (endTxId < remoteLog.getStartTxId()) {
-LOG.warn("Found endTxId (" + endTxId + ") that is less than " +
-"the startTxId (" + remoteLog.getStartTxId() +
-") - setting it to startTxId.");
+  if (committedTxnId == HdfsServerConstants.INVALID_TXID) {
+LOG.warn("Received undefined committed txn id, "
++ " NN and JN are on different version? "
++ "- seting to startTxId");
 endTxId = remoteLog.getStartTxId();
+  } else {
+endTxId = Math.min(endTxId, committedTxnId);
+if (endTxId < remoteLog.getStartTxId()) {
+  LOG.warn("Found endTxId (" + endTxId + ") that is less than " +
+  "the startTxId (" + remoteLog.getStartTxId() +

[hadoop] branch branch-2 updated: YARN-7585. NodeManager should go unhealthy when state store throws DBException. Contributed by Wilfred Spiegelenburg.

2019-08-30 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 655154c  YARN-7585. NodeManager should go unhealthy when state store 
throws DBException. Contributed by Wilfred Spiegelenburg.
655154c is described below

commit 655154cb458a8db1d5b35f6400d54d3c8fb72c0c
Author: Miklos Szegedi 
AuthorDate: Tue Jan 2 18:03:04 2018 -0800

YARN-7585. NodeManager should go unhealthy when state store throws 
DBException. Contributed by Wilfred Spiegelenburg.

(cherry picked from commit 7f515f57ede74dae787994f37bfafd5d20c9aa4c)
---
 .../yarn/server/nodemanager/NodeManager.java   |  1 +
 .../recovery/NMLeveldbStateStoreService.java   | 72 ++
 .../nodemanager/recovery/NMStateStoreService.java  | 11 
 .../recovery/TestNMLeveldbStateStoreService.java   | 35 +++
 4 files changed, 119 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 30346e0..a9bc022 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -456,6 +456,7 @@ public class NodeManager extends CompositeService
 // so that we make sure everything is up before registering with RM. 
 addService(nodeStatusUpdater);
 ((NMContext) context).setNodeStatusUpdater(nodeStatusUpdater);
+nmStore.setNodeStatusUpdater(nodeStatusUpdater);
 
 // Do secure login before calling init for added services.
 try {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index 0cbf078..49c2764 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
+import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
 import org.apache.hadoop.yarn.server.records.Version;
@@ -158,6 +159,7 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
 
   private DB db;
   private boolean isNewlyCreated;
+  private boolean isHealthy;
   private Timer compactionTimer;
 
   /**
@@ -172,6 +174,8 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
 
   @Override
   protected void startStorage() throws IOException {
+// Assume that we're healthy when we start
+isHealthy = true;
   }
 
   @Override
@@ -190,6 +194,36 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
 return isNewlyCreated;
   }
 
+  /**
+   * If the state store throws an error after recovery has been performed
+   * then we can not trust it any more to reflect the NM state. We need to
+   * mark the store and node unhealthy.
+   * Errors during the recovery will cause a service failure and thus a NM
+   * start failure. Do not need to mark the store unhealthy for those.
+   * @param dbErr Exception
+   */
+  private void markStoreUnHealthy(DBException dbErr) {
+// Always log the error here, we might not see the error in the caller
+LOG.error("Statestore exception: ", dbErr);
+// We have already been marked unhealthy so no need to do it again.
+if (!isHealthy) {
+  return;
+}
+// Mark unhealthy, an out of band heartbeat will be sent and the state
+// will remain unhealthy (not recoverable).
+// No need to close the store: does not make any difference at this point.
+isHealthy = false;
+  

[hadoop] branch branch-3.1 updated: HDFS-8178. QJM doesn't move aside stale inprogress edits files. Contributed by Istvan Fajth.

2019-08-30 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new e3c01e1  HDFS-8178. QJM doesn't move aside stale inprogress edits 
files. Contributed by Istvan Fajth.
e3c01e1 is described below

commit e3c01e174c8885c6b50d8d505c60b28ac941ffb5
Author: Istvan Fajth 
AuthorDate: Fri Aug 30 13:13:01 2019 -0700

HDFS-8178. QJM doesn't move aside stale inprogress edits files. Contributed 
by Istvan Fajth.

Signed-off-by: Wei-Chiu Chuang 
---
 .../hadoop/hdfs/qjournal/server/JNStorage.java |  11 +-
 .../hdfs/server/namenode/FileJournalManager.java   |  48 +-
 .../server/namenode/NNStorageRetentionManager.java |  13 +-
 .../namenode/TestNNStorageRetentionManager.java| 174 ++---
 4 files changed, 178 insertions(+), 68 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
index 3789156..e886432 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
@@ -50,12 +50,7 @@ class JNStorage extends Storage {
   private final StorageDirectory sd;
   private StorageState state;
 
-  private static final List CURRENT_DIR_PURGE_REGEXES =
-  ImmutableList.of(
-Pattern.compile("edits_\\d+-(\\d+)"),
-Pattern.compile("edits_inprogress_(\\d+)(?:\\..*)?"));
-  
-  private static final List PAXOS_DIR_PURGE_REGEXES = 
+  private static final List PAXOS_DIR_PURGE_REGEXES =
   ImmutableList.of(Pattern.compile("(\\d+)"));
 
   private static final String STORAGE_EDITS_SYNC = "edits.sync";
@@ -177,8 +172,8 @@ class JNStorage extends Storage {
* the given txid.
*/
   void purgeDataOlderThan(long minTxIdToKeep) throws IOException {
-purgeMatching(sd.getCurrentDir(),
-CURRENT_DIR_PURGE_REGEXES, minTxIdToKeep);
+fjm.purgeLogsOlderThan(minTxIdToKeep);
+
 purgeMatching(getOrCreatePaxosDir(),
 PAXOS_DIR_PURGE_REGEXES, minTxIdToKeep);
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index c71c09a..83a2d68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -74,7 +74,8 @@ public class FileJournalManager implements JournalManager {
   private static final Pattern EDITS_INPROGRESS_STALE_REGEX = Pattern.compile(
   NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+).*(\\S+)");
 
-  private File currentInProgress = null;
+  @VisibleForTesting
+  File currentInProgress = null;
 
   /**
* A FileJournalManager should maintain the largest Tx ID that has been
@@ -177,20 +178,50 @@ public class FileJournalManager implements JournalManager 
{
 this.lastReadableTxId = id;
   }
 
+  /**
+   * Purges the unnecessary edits and edits_inprogress files.
+   *
+   * Edits files that are ending before the minTxIdToKeep are purged.
+   * Edits in progress files that are starting before minTxIdToKeep are purged.
+   * Edits in progress files that are marked as empty, trash, corrupted or
+   * stale by file extension and starting before minTxIdToKeep are purged.
+   * Edits in progress files that are after minTxIdToKeep, but before the
+   * current edits in progress files are marked as stale for clarity.
+   *
+   * In case file removal or rename is failing a warning is logged, but that
+   * does not fail the operation.
+   *
+   * @param minTxIdToKeep the lowest transaction ID that should be retained
+   * @throws IOException if listing the storage directory fails.
+   */
   @Override
   public void purgeLogsOlderThan(long minTxIdToKeep)
   throws IOException {
 LOG.info("Purging logs older than " + minTxIdToKeep);
 File[] files = FileUtil.listFiles(sd.getCurrentDir());
 List editLogs = matchEditLogs(files, true);
-for (EditLogFile log : editLogs) {
-  if (log.getFirstTxId() < minTxIdToKeep &&
-  log.getLastTxId() < minTxIdToKeep) {
-purger.purgeLog(log);
+synchronized (this) {
+  for (EditLogFile log : editLogs) {
+if (log.getFirstTxId() < minTxIdToKeep &&
+log.getLastTxId() < minTxIdToKeep) {
+  purger.purgeLog(log);
+} else if (isStaleInProgressLog(minTxIdToKeep, log)) {
+  purger.markStale(log);
+}
   }
 }
   }
 
+  private boolean isStaleInProg

[hadoop] branch trunk updated: HDDS-2061. Add hdds.container.chunk.persistdata as exception to TestOzoneConfigurationFields (#1382)

2019-08-30 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7085512  HDDS-2061. Add hdds.container.chunk.persistdata as exception 
to TestOzoneConfigurationFields (#1382)
7085512 is described below

commit 70855126d16c42d2c18bb6c190901e4912b96cec
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Fri Aug 30 21:31:52 2019 +0200

HDDS-2061. Add hdds.container.chunk.persistdata as exception to 
TestOzoneConfigurationFields (#1382)
---
 .../hadoop/ozone/TestOzoneConfigurationFields.java | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 8efa81c..0afd6b9 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.recon.ReconServerConfigKeys;
 import org.apache.hadoop.ozone.s3.S3GatewayConfigKeys;
 
-import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE;
+import java.util.Arrays;
 
 /**
  * Tests if configuration constants documented in ozone-defaults.xml.
@@ -47,12 +47,14 @@ public class TestOzoneConfigurationFields extends 
TestConfigurationFieldsBase {
   }
 
   private void addPropertiesNotInXml() {
-configurationPropsToSkipCompare.add(HddsConfigKeys.HDDS_KEY_ALGORITHM);
-configurationPropsToSkipCompare.add(HddsConfigKeys.HDDS_SECURITY_PROVIDER);
-
configurationPropsToSkipCompare.add(HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT);
-configurationPropsToSkipCompare.add(OMConfigKeys.OZONE_OM_NODES_KEY);
-configurationPropsToSkipCompare.add(OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
-configurationPropsToSkipCompare.add(OzoneConfigKeys.
-OZONE_S3_TOKEN_MAX_LIFETIME_KEY);
+configurationPropsToSkipCompare.addAll(Arrays.asList(
+HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA,
+HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT,
+HddsConfigKeys.HDDS_KEY_ALGORITHM,
+HddsConfigKeys.HDDS_SECURITY_PROVIDER,
+OMConfigKeys.OZONE_OM_NODES_KEY,
+OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE,
+OzoneConfigKeys.OZONE_S3_TOKEN_MAX_LIFETIME_KEY
+));
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2063. Integration tests create untracked file audit.log (#1384)

2019-08-30 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 472a26d  HDDS-2063. Integration tests create untracked file audit.log 
(#1384)
472a26d is described below

commit 472a26d2b8a5f4c91ba851f48345d33481f5bb24
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Fri Aug 30 21:23:25 2019 +0200

HDDS-2063. Integration tests create untracked file audit.log (#1384)
---
 .../hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java   | 2 +-
 .../src/test/resources/{log4j2.properties => auditlog.properties}   | 0
 2 files changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
index 7c8765b..ec34efe 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
@@ -102,7 +102,7 @@ public class TestOzoneRpcClientForAclAuditLog {
*/
   @BeforeClass
   public static void init() throws Exception {
-System.setProperty("log4j.configurationFile", "log4j2.properties");
+System.setProperty("log4j.configurationFile", "auditlog.properties");
 ugi = UserGroupInformation.getCurrentUser();
 OzoneConfiguration conf = new OzoneConfiguration();
 conf.setBoolean(OZONE_ACL_ENABLED, true);
diff --git a/hadoop-ozone/integration-test/src/test/resources/log4j2.properties 
b/hadoop-ozone/integration-test/src/test/resources/auditlog.properties
similarity index 100%
rename from hadoop-ozone/integration-test/src/test/resources/log4j2.properties
rename to hadoop-ozone/integration-test/src/test/resources/auditlog.properties


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2014. Create Symmetric Key for GDPR (#1362)

2019-08-30 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 46696bd  HDDS-2014. Create Symmetric Key for GDPR (#1362)
46696bd is described below

commit 46696bd9b0118dc49d4f225d668a7e8cbdd3a6a0
Author: dineshchitlangia 
AuthorDate: Fri Aug 30 12:55:36 2019 -0400

HDDS-2014. Create Symmetric Key for GDPR (#1362)
---
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |  9 +++
 .../hadoop/ozone/security/GDPRSymmetricKey.java| 81 ++
 .../ozone/security/TestGDPRSymmetricKey.java   | 66 ++
 3 files changed, 156 insertions(+)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 80e9260..398cce2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -312,4 +312,13 @@ public final class OzoneConsts {
   public static final int S3_BUCKET_MIN_LENGTH = 3;
   public static final int S3_BUCKET_MAX_LENGTH = 64;
 
+  //GDPR
+  public static final String GDPR_ALGORITHM_NAME = "AES";
+  public static final int GDPR_RANDOM_SECRET_LENGTH = 32;
+  public static final String GDPR_CHARSET = "UTF-8";
+  public static final String GDPR_LENGTH = "length";
+  public static final String GDPR_SECRET = "secret";
+  public static final String GDPR_ALGORITHM = "algorithm";
+
+
 }
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
new file mode 100644
index 000..77acf54
--- /dev/null
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.security;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.crypto.Cipher;
+import javax.crypto.spec.SecretKeySpec;
+
+/**
+ * Symmetric Key structure for GDPR.
+ */
+public class GDPRSymmetricKey {
+
+  private SecretKeySpec secretKey;
+  private Cipher cipher;
+  private String algorithm;
+  private String secret;
+
+  public SecretKeySpec getSecretKey() {
+return secretKey;
+  }
+
+  public Cipher getCipher() {
+return cipher;
+  }
+
+  /**
+   * Default constructor creates key with default values.
+   * @throws Exception
+   */
+  public GDPRSymmetricKey() throws Exception {
+algorithm = OzoneConsts.GDPR_ALGORITHM_NAME;
+secret = RandomStringUtils
+.randomAlphabetic(OzoneConsts.GDPR_RANDOM_SECRET_LENGTH);
+this.secretKey = new SecretKeySpec(
+secret.getBytes(OzoneConsts.GDPR_CHARSET), algorithm);
+this.cipher = Cipher.getInstance(algorithm);
+  }
+
+  /**
+   * Overloaded constructor creates key with specified values.
+   * @throws Exception
+   */
+  public GDPRSymmetricKey(String secret, String algorithm) throws Exception {
+Preconditions.checkArgument(secret.length() == 32,
+"Secret must be exactly 32 characters");
+this.secret = secret;
+this.algorithm = algorithm;
+this.secretKey = new SecretKeySpec(
+secret.getBytes(OzoneConsts.GDPR_CHARSET), algorithm);
+this.cipher = Cipher.getInstance(algorithm);
+  }
+
+  public Map getKeyDetails() {
+Map keyDetail = new HashMap<>();
+keyDetail.put(OzoneConsts.GDPR_SECRET, this.secret);
+keyDetail.put(OzoneConsts.GDPR_ALGORITHM, this.algorithm);
+return keyDetail;
+  }
+
+}
diff --git 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
new file mode 100644
index 000..4f06eab
--- /dev/null
+++ 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/Test

[hadoop] branch trunk updated: HDDS-2047. Datanodes fail to come up after 10 retries in a secure env… (#1379)

2019-08-30 Thread xyao
This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ec34cee  HDDS-2047. Datanodes fail to come up after 10 retries in a 
secure env… (#1379)
ec34cee is described below

commit ec34cee5e37ca48bf61403655eba8b89dba0ed57
Author: Xiaoyu Yao 
AuthorDate: Fri Aug 30 09:27:37 2019 -0700

HDDS-2047. Datanodes fail to come up after 10 retries in a secure env… 
(#1379)
---
 .../java/org/apache/hadoop/hdds/HddsUtils.java | 15 
 .../apache/hadoop/ozone/HddsDatanodeService.java   |  3 +--
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 28 +-
 3 files changed, 13 insertions(+), 33 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 3670cfc..6ff166a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -30,6 +30,7 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Optional;
 import java.util.TimeZone;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -43,6 +44,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
 import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
@@ -177,23 +180,27 @@ public final class HddsUtils {
   /**
* Create a scm security client.
* @param conf- Ozone configuration.
-   * @param address - inet socket address of scm.
*
* @return {@link SCMSecurityProtocol}
* @throws IOException
*/
   public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient(
-  OzoneConfiguration conf, InetSocketAddress address) throws IOException {
+  OzoneConfiguration conf) throws IOException {
 RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
 ProtobufRpcEngine.class);
 long scmVersion =
 RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
+InetSocketAddress address =
+getScmAddressForSecurityProtocol(conf);
+RetryPolicy retryPolicy =
+RetryPolicies.retryForeverWithFixedSleep(
+1000, TimeUnit.MILLISECONDS);
 SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient =
 new SCMSecurityProtocolClientSideTranslatorPB(
-RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion,
+RPC.getProtocolProxy(SCMSecurityProtocolPB.class, scmVersion,
 address, UserGroupInformation.getCurrentUser(),
 conf, NetUtils.getDefaultSocketFactory(conf),
-Client.getRpcTimeout(conf)));
+Client.getRpcTimeout(conf), retryPolicy).getProxy());
 return scmSecurityClient;
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index f43281c..b13c37d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -272,8 +272,7 @@ public class HddsDatanodeService extends GenericCli 
implements ServicePlugin {
   PKCS10CertificationRequest csr = getCSR(config);
   // TODO: For SCM CA we should fetch certificate from multiple SCMs.
   SCMSecurityProtocolClientSideTranslatorPB secureScmClient =
-  HddsUtils.getScmSecurityClient(config,
-  HddsUtils.getScmAddressForSecurityProtocol(config));
+  HddsUtils.getScmSecurityClient(config);
   SCMGetCertResponseProto response = secureScmClient.
   getDataNodeCertificateChain(datanodeDetails.getProtoBufMessage(),
   getEncodedString(csr));
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index a2e958f..48b095c 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -48,10 +48,8 @@ import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.Da

[hadoop] branch trunk updated: YARN-9540. TestRMAppTransitions fails intermittently. Contributed by Tao Yang.

2019-08-30 Thread abmodi
This is an automated email from the ASF dual-hosted git repository.

abmodi pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c929b38  YARN-9540. TestRMAppTransitions fails intermittently. 
Contributed by Tao Yang.
c929b38 is described below

commit c929b383f876b77056864cc26a4a3765a2bfd3d0
Author: Abhishek Modi 
AuthorDate: Fri Aug 30 17:17:43 2019 +0530

YARN-9540. TestRMAppTransitions fails intermittently. Contributed by Tao 
Yang.
---
 .../hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java   | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
index 122aa30..57bdac4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -653,6 +653,7 @@ public class TestRMAppTransitions {
 RMAppEvent finishedEvent = new RMAppEvent(application.getApplicationId(),
 RMAppEventType.ATTEMPT_FINISHED, diagnostics);
 application.handle(finishedEvent);
+rmDispatcher.await();
 
 //only run this verification if we created a finishing app
 if (submissionContext == null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14796. Define LOG instead of BlockManager.LOG in ErasureCodingWork/ReplicationWork. Contributed by Fei Hui.

2019-08-30 Thread surendralilhore
This is an automated email from the ASF dual-hosted git repository.

surendralilhore pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 96f7dc1  HDFS-14796. Define LOG instead of BlockManager.LOG in 
ErasureCodingWork/ReplicationWork. Contributed by Fei Hui.
96f7dc1 is described below

commit 96f7dc1992246a16031f613e55dc39ea0d64acd1
Author: Surendra Singh Lilhore 
AuthorDate: Fri Aug 30 14:28:13 2019 +0530

HDFS-14796. Define LOG instead of BlockManager.LOG in 
ErasureCodingWork/ReplicationWork. Contributed by Fei Hui.
---
 .../hdfs/server/blockmanagement/BlockReconstructionWork.java  | 6 ++
 .../hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java | 8 +++-
 .../hadoop/hdfs/server/blockmanagement/ReplicationWork.java   | 3 +--
 3 files changed, 10 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
index d383191..df76a15 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.net.Node;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.Collections;
 import java.util.List;
@@ -30,6 +32,10 @@ import java.util.Set;
  * Reconstruction is done by transferring data from srcNodes to targets
  */
 abstract class BlockReconstructionWork {
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(BlockReconstructionWork.class);
+
   private final BlockInfo block;
 
   private final String srcPath;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
index 147f8cf..f0e6d49 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
@@ -44,7 +44,7 @@ class ErasureCodingWork extends BlockReconstructionWork {
 liveReplicaStorages, additionalReplRequired, priority);
 this.blockPoolId = blockPoolId;
 this.liveBlockIndicies = liveBlockIndicies;
-BlockManager.LOG.debug("Creating an ErasureCodingWork to {} reconstruct ",
+LOG.debug("Creating an ErasureCodingWork to {} reconstruct ",
 block);
   }
 
@@ -157,10 +157,8 @@ class ErasureCodingWork extends BlockReconstructionWork {
 internBlkLen, stripedBlk.getGenerationStamp());
 source.addBlockToBeReplicated(targetBlk,
 new DatanodeStorageInfo[] {target});
-if (BlockManager.LOG.isDebugEnabled()) {
-  BlockManager.LOG.debug("Add replication task from source {} to "
-  + "target {} for EC block {}", source, target, targetBlk);
-}
+LOG.debug("Add replication task from source {} to "
++ "target {} for EC block {}", source, target, targetBlk);
   }
 
   private List findLeavingServiceSources() {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
index f250bcb..5e10ebe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
@@ -32,8 +32,7 @@ class ReplicationWork extends BlockReconstructionWork {
 assert getSrcNodes().length == 1 :
 "There should be exactly 1 source node that have been selected";
 getSrcNodes()[0].incrementPendingReplicationWithoutTargets();
-BlockManager.LOG
-.debug("Creating a ReplicationWork to reconstruct " + block);
+LOG.debug("Creating a ReplicationWork to reconstruct " + block);
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org