[hadoop] branch trunk updated: HDDS-1402. Remove unused ScmBlockLocationProtocol from ObjectStoreHandler (#707)

2019-04-16 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 04c0437  HDDS-1402. Remove unused ScmBlockLocationProtocol from 
ObjectStoreHandler (#707)
04c0437 is described below

commit 04c0437d13cbe8474224735cc6c41d0f6ea917f9
Author: Elek, Márton 
AuthorDate: Wed Apr 17 04:16:05 2019 +0200

HDDS-1402. Remove unused ScmBlockLocationProtocol from ObjectStoreHandler 
(#707)
---
 .../hdfs/server/datanode/ObjectStoreHandler.java   | 41 +++---
 1 file changed, 12 insertions(+), 29 deletions(-)

diff --git 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
index a2ee6d9..824eb40 100644
--- 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
+++ 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
@@ -16,14 +16,15 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import com.sun.jersey.api.container.ContainerFactory;
-import com.sun.jersey.api.core.ApplicationAdapter;
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import 
org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
 import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
@@ -41,23 +42,18 @@ import 
org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
 import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.protocol.ClientId;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.Map;
 
+import com.sun.jersey.api.container.ContainerFactory;
+import com.sun.jersey.api.core.ApplicationAdapter;
 import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
 import static 
com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
 import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY;
+import org.apache.ratis.protocol.ClientId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Implements object store handling within the DataNode process.  This class is
@@ -73,8 +69,7 @@ public final class ObjectStoreHandler implements Closeable {
   private final OzoneManagerProtocol ozoneManagerClient;
   private final StorageContainerLocationProtocol
   storageContainerLocationClient;
-  private final ScmBlockLocationProtocol
-  scmBlockLocationClient;
+
   private final StorageHandler storageHandler;
   private ClientId clientId = ClientId.randomId();
 
@@ -106,17 +101,6 @@ public final class ObjectStoreHandler implements Closeable 
{
 Client.getRpcTimeout(conf))),
 StorageContainerLocationProtocol.class, conf);
 
-InetSocketAddress scmBlockAddress =
-getScmAddressForBlockClients(conf);
-this.scmBlockLocationClient =
-TracingUtil.createProxy(
-new ScmBlockLocationProtocolClientSideTranslatorPB(
-RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
-scmBlockAddress, UserGroupInformation.getCurrentUser(),
-conf, NetUtils.getDefaultSocketFactory(conf),
-Client.getRpcTimeout(conf))),
-ScmBlockLocationProtocol.class, conf);
-
 RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
 ProtobufRpcEngine.class);
 long omVersion =
@@ -171,7 +155,6 @@ public final class ObjectStoreHandler implements Closeable {
 LOG.info("Closing ObjectStoreHandler.");
 storageHandler.close();
 

[hadoop] branch trunk updated: HDDS-1434. TestDatanodeStateMachine is flaky (#740)

2019-04-16 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0a87fd1  HDDS-1434. TestDatanodeStateMachine is flaky (#740)
0a87fd1 is described below

commit 0a87fd1464e39d754db3d639e1d954ecdec4555f
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Wed Apr 17 04:13:01 2019 +0200

HDDS-1434. TestDatanodeStateMachine is flaky (#740)
---
 .../apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java | 1 +
 .../statemachine/commandhandler/TestCloseContainerCommandHandler.java  | 3 +++
 2 files changed, 4 insertions(+)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 47438dc..e9c5e3e 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -86,6 +86,7 @@ public class TestDatanodeStateMachine {
 conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
 TimeUnit.MILLISECONDS);
 conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
+conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true);
 serverAddresses = new ArrayList<>();
 scmServers = new ArrayList<>();
 mockServers = new ArrayList<>();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index 6356d0b..1f6ed86 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -25,6 +25,7 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.statemachine
 .DatanodeStateMachine;
@@ -269,6 +270,8 @@ public class TestCloseContainerCommandHandler {
 TestCloseContainerCommandHandler.class.getName() + UUID.randomUUID());
 conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
 conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testDir.getPath());
+conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
+conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true);
 
 final DatanodeStateMachine datanodeStateMachine = Mockito.mock(
 DatanodeStateMachine.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9349. Improved log level practices for InvalidStateTransitionException. Contributed by Anuhan Torgonshar

2019-04-16 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9cf7401  YARN-9349.  Improved log level practices for 
InvalidStateTransitionException. Contributed by Anuhan Torgonshar
9cf7401 is described below

commit 9cf7401794def0d420876db5db20fcd76c52193f
Author: Eric Yang 
AuthorDate: Tue Apr 16 19:51:08 2019 -0400

YARN-9349.  Improved log level practices for 
InvalidStateTransitionException.
Contributed by Anuhan Torgonshar

(cherry picked from commit fe2370e039e1ee980d74769ae85d67434e0993cf)
---
 .../nodemanager/containermanager/application/ApplicationImpl.java   | 2 +-
 .../server/nodemanager/containermanager/container/ContainerImpl.java| 2 +-
 .../nodemanager/containermanager/localizer/LocalizedResource.java   | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index f3d4e51..1806af6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -646,7 +646,7 @@ public class ApplicationImpl implements Application {
 // queue event requesting init of the same app
 newState = stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.warn("Can't handle this event at current state", e);
+LOG.error("Can't handle this event at current state", e);
   }
   if (newState != null && oldState != newState) {
 LOG.info("Application " + applicationID + " transitioned from "
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index cfade27..b79c305 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -2125,7 +2125,7 @@ public class ContainerImpl implements Container {
 newState =
 stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.warn("Can't handle this event at current state: Current: ["
+LOG.error("Can't handle this event at current state: Current: ["
 + oldState + "], eventType: [" + event.getType() + "]," +
 " container: [" + containerID + "]", e);
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
index 279efd0..a75a13e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
@@ -196,7 +196,7 @@ public class LocalizedResource implements 
EventHandler {
   try {
 newState = this.stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.warn("Can't handle this event at current state", e);
+LOG.error("Can't handle this event at current state", e);
   }
   if (newState != null && oldState != newState) {
 LOG.debug("Resource {}{} size : {} transitioned from {} to {}",



[hadoop] branch trunk updated: HDDS-1376. Datanode exits while executing client command when scmId is null (#724)

2019-04-16 Thread hanishakoneru
This is an automated email from the ASF dual-hosted git repository.

hanishakoneru pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e543c3b  HDDS-1376. Datanode exits while executing client command when 
scmId is null (#724)
e543c3b is described below

commit e543c3b31aa607e62b7bd718d9cb7f17e6b03072
Author: Hanisha Koneru 
AuthorDate: Tue Apr 16 13:51:39 2019 -0700

HDDS-1376. Datanode exits while executing client command when scmId is null 
(#724)
---
 .../common/statemachine/DatanodeStateMachine.java |  1 -
 .../common/states/endpoint/VersionEndpointTask.java   |  3 ++-
 .../hadoop/ozone/container/ozoneimpl/OzoneContainer.java  |  3 ++-
 .../ozone/container/common/TestDatanodeStateMachine.java  | 15 +++
 .../commandhandler/TestCloseContainerCommandHandler.java  | 11 +--
 .../ozone/container/ozoneimpl/TestOzoneContainer.java |  5 ++---
 .../container/ozoneimpl/TestOzoneContainerWithTLS.java|  5 ++---
 .../container/ozoneimpl/TestSecureOzoneContainer.java |  5 ++---
 8 files changed, 30 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index ff39103..69782ef 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -166,7 +166,6 @@ public class DatanodeStateMachine implements Closeable {
   private void start() throws IOException {
 long now = 0;
 
-container.start();
 reportManager.init();
 initCommandHandlerThread(conf);
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index e4c0eb1..04eaa05 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -106,7 +106,8 @@ public class VersionEndpointTask implements
   volumeSet.writeUnlock();
 }
 
-ozoneContainer.getDispatcher().setScmId(scmId);
+// Start the container services after getting the version information
+ozoneContainer.start(scmId);
 
 EndpointStateMachine.EndPointStates nextState =
 rpcEndPoint.getState().getNextState();
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 87266a9..ed7c88c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -180,12 +180,13 @@ public class OzoneContainer {
*
* @throws IOException
*/
-  public void start() throws IOException {
+  public void start(String scmId) throws IOException {
 LOG.info("Attempting to start container services.");
 startContainerScrub();
 writeChannel.start();
 readChannel.start();
 hddsDispatcher.init();
+hddsDispatcher.setScmId(scmId);
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 29160ee..47438dc 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -258,6 +258,21 @@ public class TestDatanodeStateMachine {
 
   task.execute(executorService);
   newState = task.await(10, TimeUnit.SECONDS);
+
+  // Wait for GetVersion call (called by task.execute) to finish. After
+  // Earlier task.execute called into GetVersion. Wait for the execution
+  // to finish and the endPointState to move to REGISTER state.
+  GenericTestUtils.waitFor(() -> {
+for (EndpointStateMachine endpoint :
+stateMachine.getConnectionManager().getValues()) {
+  if (endpoint.getState() !=
+  

[hadoop] branch trunk updated: HDDS-1374. ContainerStateMap cannot find container while allocating blocks. (#735)

2019-04-16 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3ac3e50  HDDS-1374. ContainerStateMap cannot find container while 
allocating blocks. (#735)
3ac3e50 is described below

commit 3ac3e50b207e27c69e26669c47b1642827c76db0
Author: Bharat Viswanadham 
AuthorDate: Tue Apr 16 13:49:29 2019 -0700

HDDS-1374. ContainerStateMap cannot find container while allocating blocks. 
(#735)
---
 .../hadoop/hdds/scm/container/SCMContainerManager.java | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 1fa8395..80d7ec1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -246,7 +246,17 @@ public class SCMContainerManager implements 
ContainerManager {
   containerStateManager.allocateContainer(pipelineManager, type,
   replicationFactor, owner);
   // Add container to DB.
-  addContainerToDB(containerInfo);
+  try {
+addContainerToDB(containerInfo);
+  } catch (IOException ex) {
+// When adding to DB failed, we are removing from containerStateMap.
+// We should also remove from pipeline2Container Map in
+// PipelineStateManager.
+pipelineManager.removeContainerFromPipeline(
+containerInfo.getPipelineID(),
+new ContainerID(containerInfo.getContainerID()));
+throw ex;
+  }
   return containerInfo;
 } finally {
   lock.unlock();
@@ -440,6 +450,8 @@ public class SCMContainerManager implements 
ContainerManager {
 } catch (IOException ex) {
   // If adding to containerStore fails, we should remove the container
   // from in-memory map.
+  LOG.error("Add Container to DB failed for ContainerID #{}",
+  containerInfo.getContainerID());
   try {
 containerStateManager.removeContainer(containerInfo.containerID());
   } catch (ContainerNotFoundException cnfe) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1432. Ozone client list command truncates response without any indication. Contributed by Siddharth Wagle.

2019-04-16 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f2ab279  HDDS-1432. Ozone client list command truncates response 
without any indication. Contributed by Siddharth Wagle.
f2ab279 is described below

commit f2ab2795db0da1c912f86855031604de389411da
Author: Arpit Agarwal 
AuthorDate: Tue Apr 16 12:35:49 2019 -0700

HDDS-1432. Ozone client list command truncates response without any 
indication. Contributed by Siddharth Wagle.
---
 .../test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java  | 5 +
 .../org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java   | 7 +++
 2 files changed, 12 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 1b10135..0b53f69 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -1114,11 +1114,16 @@ public class TestOzoneShell {
 }
 
 out.reset();
+String msgText = "Listing first 3 entries of the result. " +
+"Use --length (-l) to override max returned keys.";
 args =
 new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName,
 "--length", "3"};
 execute(shell, args);
 commandOutput = out.toString();
+assertTrue("Expecting output to start with " + msgText,
+commandOutput.contains(msgText));
+commandOutput = commandOutput.replace(msgText, "");
 keys = (List) JsonUtils.toJsonList(commandOutput,
 KeyInfo.class);
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
index 5642bc7..111ce16 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
@@ -90,12 +90,19 @@ public class ListKeyHandler extends Handler {
 startKey);
 List keyInfos = new ArrayList<>();
 
+int maxKeyLimit = maxKeys;
 while (maxKeys > 0 && keyIterator.hasNext()) {
   KeyInfo key = OzoneClientUtils.asKeyInfo(keyIterator.next());
   keyInfos.add(key);
   maxKeys -= 1;
 }
 
+// More keys were returned notify about max length
+if (keyIterator.hasNext()) {
+  System.out.println("Listing first " + maxKeyLimit + " entries of the " +
+  "result. Use --length (-l) to override max returned keys.");
+}
+
 if (isVerbose()) {
   System.out.printf("Found : %d keys for bucket %s in volume : %s ",
   keyInfos.size(), bucketName, volumeName);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.8 updated: HDFS-10477. Stop decommission a rack of DataNodes caused NameNode fail over to standby. Contributed by yunjiong zhao, Wei-Chiu Chuang and star.

2019-04-16 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new 93394c8  HDFS-10477. Stop decommission a rack of DataNodes caused 
NameNode fail over to standby. Contributed by yunjiong zhao, Wei-Chiu Chuang 
and star.
93394c8 is described below

commit 93394c8f5541c49aa54ee26d45f7dc77f86ec02c
Author: Wei-Chiu Chuang 
AuthorDate: Tue Apr 16 11:09:16 2019 -0700

HDFS-10477. Stop decommission a rack of DataNodes caused NameNode fail over 
to standby. Contributed by yunjiong zhao, Wei-Chiu Chuang and star.

Signed-off-by: Wei-Chiu Chuang 
---
 .../hdfs/server/blockmanagement/BlockManager.java  | 53 --
 1 file changed, 39 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e2bdfcb..4012f57 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3430,21 +3430,46 @@ public class BlockManager implements BlockStatsMXBean {
 if (!isPopulatingReplQueues()) {
   return;
 }
-final Iterator it = srcNode.getBlockIterator();
+
 int numOverReplicated = 0;
-while(it.hasNext()) {
-  final BlockInfo block = it.next();
-  short expectedReplication = block.getReplication();
-  NumberReplicas num = countNodes(block);
-  int numCurrentReplica = num.liveReplicas();
-  if (numCurrentReplica > expectedReplication) {
-// over-replicated block 
-processOverReplicatedBlock(block, expectedReplication, null, null);
-numOverReplicated++;
-  }
-}
-LOG.info("Invalidated " + numOverReplicated + " over-replicated blocks on 
" +
-srcNode + " during recommissioning");
+for (DatanodeStorageInfo datanodeStorageInfo : srcNode.getStorageInfos()) {
+  // the namesystem lock is released between iterations. Make sure the
+  // storage is not removed before continuing.
+  if (srcNode.getStorageInfo(datanodeStorageInfo.getStorageID()) == null) {
+continue;
+  }
+  final Iterator it = datanodeStorageInfo.getBlockIterator();
+  while (it.hasNext()) {
+final BlockInfo block = it.next();
+if (block.isDeleted()) {
+  //Orphan block, will be handled eventually, skip
+  continue;
+}
+short expectedReplication = this.getExpectedReplicaNum(block);
+NumberReplicas num = countNodes(block);
+int numCurrentReplica = num.liveReplicas();
+if (numCurrentReplica > expectedReplication) {
+  // over-replicated block
+  processOverReplicatedBlock(block, expectedReplication, null,
+  null);
+  numOverReplicated++;
+}
+  }
+  // When called by tests like TestDefaultBlockPlacementPolicy.
+  // testPlacementWithLocalRackNodesDecommissioned, it is not protected by
+  // lock, only when called by DatanodeManager.refreshNodes have writeLock
+  if (namesystem.hasWriteLock()) {
+namesystem.writeUnlock();
+try {
+  Thread.sleep(1);
+} catch (InterruptedException e) {
+  Thread.currentThread().interrupt();
+}
+namesystem.writeLock();
+  }
+}
+LOG.info("Invalidated " + numOverReplicated +
+" over-replicated blocks on " + srcNode + " during recommissioning");
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9123. Clean up and split testcases in TestNMWebServices for GPU support. Contributed by Szilard Nemeth.

2019-04-16 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b8086ae  YARN-9123. Clean up and split testcases in TestNMWebServices 
for GPU support. Contributed by Szilard Nemeth.
b8086ae is described below

commit b8086aed86ddf5bad19951b5ca2125369c882b8f
Author: Szilard Nemeth 
AuthorDate: Tue Apr 16 11:06:25 2019 -0700

YARN-9123. Clean up and split testcases in TestNMWebServices for GPU 
support. Contributed by Szilard Nemeth.

Signed-off-by: Wei-Chiu Chuang 
---
 .../nodemanager/webapp/TestNMWebServices.java  | 216 +
 1 file changed, 131 insertions(+), 85 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
index dbd980b..62774f5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.webapp;
 
+import com.google.common.collect.ImmutableMap;
 import com.google.inject.Guice;
 import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.ClientResponse;
@@ -74,7 +75,6 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.AfterClass;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.w3c.dom.Document;
@@ -93,13 +93,13 @@ import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URL;
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import static 
org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseStatusCode;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
@@ -110,6 +110,7 @@ import static org.mockito.Mockito.when;
  */
 public class TestNMWebServices extends JerseyTestBase {
 
+  private static final long NM_RESOURCE_VALUE = 1000L;
   private static NodeManager.NMContext nmContext;
   private static ResourceView resourceView;
   private static ApplicationACLsManager aclsManager;
@@ -189,6 +190,89 @@ public class TestNMWebServices extends JerseyTestBase {
 Guice.createInjector(new WebServletModule()));
   }
 
+  private void setupMockPluginsWithNmResourceInfo() throws YarnException {
+ResourcePlugin mockPlugin1 = mock(ResourcePlugin.class);
+NMResourceInfo nmResourceInfo1 = new NMResourceInfo() {
+  private long a = NM_RESOURCE_VALUE;
+
+  public long getA() {
+return a;
+  }
+};
+when(mockPlugin1.getNMResourceInfo()).thenReturn(nmResourceInfo1);
+
+ResourcePluginManager pluginManager = createResourceManagerWithPlugins(
+ImmutableMap.builder()
+.put("resource-1", mockPlugin1)
+.put("yarn.io/resource-1", mockPlugin1)
+.put("resource-2", mock(ResourcePlugin.class))
+.build()
+);
+
+nmContext.setResourcePluginManager(pluginManager);
+  }
+
+  private void setupMockPluginsWithGpuResourceInfo() throws YarnException {
+GpuDeviceInformation gpuDeviceInformation = new GpuDeviceInformation();
+gpuDeviceInformation.setDriverVersion("1.2.3");
+gpuDeviceInformation.setGpus(Arrays.asList(new PerGpuDeviceInformation()));
+
+ResourcePlugin mockPlugin1 = mock(ResourcePlugin.class);
+List totalGpuDevices = Arrays.asList(
+new GpuDevice(1, 1), new GpuDevice(2, 2), new GpuDevice(3, 3));
+List assignedGpuDevices = Arrays.asList(
+new AssignedGpuDevice(2, 2, createContainerId(1)),
+new AssignedGpuDevice(3, 3, createContainerId(2)));
+NMResourceInfo nmResourceInfo1 = new 
NMGpuResourceInfo(gpuDeviceInformation,
+totalGpuDevices,
+assignedGpuDevices);
+when(mockPlugin1.getNMResourceInfo()).thenReturn(nmResourceInfo1);
+
+ResourcePluginManager pluginManager = createResourceManagerWithPlugins(
+ImmutableMap.builder()
+.put("resource-1", mockPlugin1)
+.put("yarn.io/resource-1", mockPlugin1)
+.put("resource-2", mock(ResourcePlugin.class))
+   

[hadoop] branch trunk updated: HDFS-14418. Remove redundant super user priveledge checks from namenode. Contributed by Ayush Saxena.

2019-04-16 Thread inigoiri
This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new be6c801  HDFS-14418. Remove redundant super user priveledge checks 
from namenode. Contributed by Ayush Saxena.
be6c801 is described below

commit be6c8014e66be919388269b70cb2966c35b8c578
Author: Inigo Goiri 
AuthorDate: Tue Apr 16 10:34:31 2019 -0700

HDFS-14418. Remove redundant super user priveledge checks from namenode. 
Contributed by Ayush Saxena.
---
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  3 --
 .../hdfs/server/namenode/NameNodeRpcServer.java|  1 -
 .../hadoop/hdfs/TestDistributedFileSystem.java | 55 ++
 3 files changed, 55 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 82015b2..9389719 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7397,7 +7397,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir,
   keyName, src);
   final FSPermissionChecker pc = getPermissionChecker();
-  checkSuperuserPrivilege(pc);
   checkOperation(OperationCategory.WRITE);
   final FileStatus resultingStat;
   writeLock();
@@ -7459,7 +7458,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 boolean success = false;
 checkOperation(OperationCategory.READ);
 final FSPermissionChecker pc = getPermissionChecker();
-checkSuperuserPrivilege(pc);
 readLock();
 try {
   checkOperation(OperationCategory.READ);
@@ -7497,7 +7495,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 boolean success = false;
 checkOperation(OperationCategory.READ);
 final FSPermissionChecker pc = getPermissionChecker();
-checkSuperuserPrivilege(pc);
 readLock();
 try {
   checkOperation(OperationCategory.READ);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 525d9c8..7a2a81c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1331,7 +1331,6 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
   @Override // NamenodeProtocol
   public CheckpointSignature rollEditLog() throws IOException {
 checkNNStartup();
-namesystem.checkSuperuserPrivilege();
 return namesystem.rollEditLog();
   }
   
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 60ff614..8ad7085 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -97,6 +97,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.ScriptBasedMapping;
@@ -104,6 +105,7 @@ import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
@@ -1805,6 +1807,59 @@ public class TestDistributedFileSystem {
   }
 
   @Test
+  public void testSuperUserPrivilege() throws Exception {
+HdfsConfiguration conf = new HdfsConfiguration();
+File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString());
+final Path jksPath = new Path(tmpDir.toString(), "test.jks");
+conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+JavaKeyStoreProvider.SCHEME_NAME + "://file" + 

[hadoop] branch trunk updated: YARN-9466. Fixed application catalog navigation bar height in Safari. Contributed by Eric Yang

2019-04-16 Thread billie
This is an automated email from the ASF dual-hosted git repository.

billie pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2364c7d  YARN-9466. Fixed application catalog navigation bar height in 
Safari. Contributed by Eric Yang
2364c7d is described below

commit 2364c7d0bf22f042b6a564b863fcfdbac48c4bfb
Author: Billie Rinaldi 
AuthorDate: Tue Apr 16 10:04:27 2019 -0700

YARN-9466. Fixed application catalog navigation bar height in Safari. 
Contributed by Eric Yang
---
 .../src/main/webapp/css/bootstrap-hadoop.css   | 55 +-
 .../src/main/webapp/css/specific.css   | 42 -
 .../src/main/webapp/index.html | 23 +
 .../src/main/webapp/partials/home.html |  6 +--
 4 files changed, 102 insertions(+), 24 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/webapp/css/bootstrap-hadoop.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/webapp/css/bootstrap-hadoop.css
index 231f9a9..d6f9fa2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/webapp/css/bootstrap-hadoop.css
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/webapp/css/bootstrap-hadoop.css
@@ -189,6 +189,11 @@
   background-color: #FFF;
   box-shadow: 0 0 2px 0 #1391c1;
 }
+.btn-secondary:visited {
+  color: #429929;
+  background-color: #FFF;
+  box-shadow: 0 0 2px 0 #1391c1;
+}
 .btn-secondary[disabled],
 .btn-secondary:focus[disabled],
 .btn-secondary.disabled,
@@ -772,20 +777,17 @@ input.radio:checked + label:after {
   border-radius: 6px;
 }
 .navigation-bar-container {
-  height: auto;
-  width: 230px;
-  background-color: #323544;
+  min-width: 230px;
   padding: 0;
   -ms-overflow-style: none;
   transition: width 0.5s ease-out;
   -webkit-font-smoothing: antialiased;
+  z-index: 999;
 }
 .navigation-bar-container ul.nav.side-nav-header {
-  width: 230px;
   transition: width 0.5s ease-out;
 }
 .navigation-bar-container ul.nav.side-nav-header li.navigation-header {
-  background: #313d54;
   padding: 15px 5px 15px 25px;
   height: 55px;
 }
@@ -849,8 +851,7 @@ input.radio:checked + label:after {
 }
 .navigation-bar-container ul.nav.side-nav-menu,
 .navigation-bar-container ul.nav.side-nav-footer {
-  background-color: #323544;
-  width: 230px;
+  min-width: 230px;
   transition: width 0.5s ease-out;
 }
 .navigation-bar-container ul.nav.side-nav-menu li,
@@ -858,15 +859,47 @@ input.radio:checked + label:after {
   padding: 0;
   margin: 0;
 }
+.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer 
span.navbar-close,
+.navigation-bar-container ul.nav.side-nav-footer li.navigation-footer 
span.navbar-close,
+.navigation-bar-container ul.nav.side-nav-menu li.submenu-li span.navbar-close,
+.navigation-bar-container ul.nav.side-nav-footer li.submenu-li 
span.navbar-close,
+.navigation-bar-container ul.nav.side-nav-menu li.mainmenu-li 
span.navbar-close,
+.navigation-bar-container ul.nav.side-nav-footer li.mainmenu-li 
span.navbar-close {
+  text-align:right;
+}
+.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > 
span.line,
+.navigation-bar-container ul.nav.side-nav-footer li.navigation-footer > 
span.line,
+.navigation-bar-container ul.nav.side-nav-menu li.submenu-li > span.line,
+.navigation-bar-container ul.nav.side-nav-footer li.submenu-li > span.line,
+.navigation-bar-container ul.nav.side-nav-menu li.mainmenu-li > span.line,
+.navigation-bar-container ul.nav.side-nav-footer li.mainmenu-li > span.line {
+  width: 49%;
+  display:inline-block;
+  height: 35px;
+  padding: 10px 5px 10px 25px;
+  white-space: nowrap;
+}
+.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer span.line 
a,
+.navigation-bar-container ul.nav.side-nav-footer li.navigation-footer 
span.line a,
+.navigation-bar-container ul.nav.side-nav-menu li.submenu-li span.line a,
+.navigation-bar-container ul.nav.side-nav-footer li.submenu-li span.line a,
+.navigation-bar-container ul.nav.side-nav-menu li.mainmenu-li span.line a,
+.navigation-bar-container ul.nav.side-nav-footer li.mainmenu-li span.line a {
+  font-family: 'Roboto', sans-serif;
+  font-weight: normal;
+  font-style: normal;
+  line-height: 1;
+  color: #333;
+  font-size: 14px;
+  color: #b8bec4;
+}
 .navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a,
 .navigation-bar-container ul.nav.side-nav-footer li.navigation-footer > a,
 .navigation-bar-container ul.nav.side-nav-menu li.submenu-li > a,
 .navigation-bar-container 

[hadoop] branch trunk updated: YARN-8530. Add SPNEGO filter to application catalog. Contributed by Eric Yang

2019-04-16 Thread billie
This is an automated email from the ASF dual-hosted git repository.

billie pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ad86588  YARN-8530. Add SPNEGO filter to application catalog. 
Contributed by Eric Yang
ad86588 is described below

commit ad865888a6980d1f1bd6c56dbac1252aeed8091b
Author: Billie Rinaldi 
AuthorDate: Tue Apr 16 09:52:14 2019 -0700

YARN-8530. Add SPNEGO filter to application catalog. Contributed by Eric 
Yang
---
 .../src/main/scripts/entrypoint.sh | 11 +
 .../src/main/scripts/setup-image.sh|  5 ++
 .../application/AppCatalogInitializer.java | 54 ++
 .../src/main/webapp/WEB-INF/web.xml| 42 +
 .../src/site/markdown/yarn-service/Examples.md | 10 
 5 files changed, 122 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/src/main/scripts/entrypoint.sh
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/src/main/scripts/entrypoint.sh
index 1666063..dfbd7e3 100755
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/src/main/scripts/entrypoint.sh
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/src/main/scripts/entrypoint.sh
@@ -42,4 +42,15 @@ if [ -e "$KEYTAB" ]; then
   export JAVA_OPTS="$JAVA_OPTS 
-Djava.security.auth.login.config=/etc/tomcat/jaas.config 
-Djava.security.krb5.conf=/etc/krb5.conf 
-Djavax.security.auth.useSubjectCredsOnly=false"
   template_generator /etc/tomcat/jaas.config.template /etc/tomcat/jaas.config
 fi
+if [ -e "$SPNEGO_KEYTAB" ]; then
+  sed -i.bak 's/authentication.type=.*$/authentication.type=kerberos/g' 
/etc/tomcat/catalina.properties
+  sed -i.bak 's/simple.anonymous.allowed=.*$/simple.anonymous.allowed=false/g' 
/etc/tomcat/catalina.properties
+  if [ -z "$SPNEGO_PRINCIPAL" ]; then
+echo "kerberos.principal=HTTP/$HOSTNAME" >> /etc/tomcat/catalina.properties
+  else
+echo "kerberos.principal=$SPNEGO_PRINCIPAL" >> 
/etc/tomcat/catalina.properties
+  fi
+  echo "kerberos.keytab=$SPNEGO_KEYTAB" >> /etc/tomcat/catalina.properties
+  echo "hostname=$HOSTNAME" >> /etc/tomcat/catalina.properties
+fi
 /usr/libexec/tomcat/server start
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/src/main/scripts/setup-image.sh
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/src/main/scripts/setup-image.sh
index c9376f9..8cc1ec6 100755
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/src/main/scripts/setup-image.sh
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/src/main/scripts/setup-image.sh
@@ -19,3 +19,8 @@ mkdir -p /etc/hadoop
 mkdir -p /opt/apache/solr/server/logs
 chmod -R 777 /opt/apache/solr/server/logs /var/log/tomcat /var/cache/tomcat 
/var/lib/tomcat/webapps /opt/apache/solr/server/solr
 chmod 777 /etc/tomcat
+{
+  echo 
"auth.filter=org.apache.hadoop.security.authentication.server.AuthenticationFilter"
+  echo "authentication.type=simple"
+  echo "simple.anonymous.allowed=true"
+} >> /etc/tomcat/catalina.properties
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogInitializer.java
new file mode 100644
index 000..92e95d8
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogInitializer.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * 

[hadoop] branch HDFS-13891 updated: HDFS-14422. RBF: Router shouldn't allow READ operations in safe mode. Contributed by Inigo Goiri.

2019-04-16 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch HDFS-13891
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/HDFS-13891 by this push:
 new bd3161e  HDFS-14422. RBF: Router shouldn't allow READ operations in 
safe mode. Contributed by Inigo Goiri.
bd3161e is described below

commit bd3161e83df3a6e939156059cee42f1020315b51
Author: Ayush Saxena 
AuthorDate: Tue Apr 16 19:45:51 2019 +0530

HDFS-14422. RBF: Router shouldn't allow READ operations in safe mode. 
Contributed by Inigo Goiri.
---
 .../federation/resolver/MountTableResolver.java| 18 -
 .../server/federation/router/RouterRpcServer.java  | 15 +++-
 .../federation/router/TestRouterSafemode.java  | 44 ++
 3 files changed, 74 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index 03b051d..8baa5e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -87,6 +87,8 @@ public class MountTableResolver
 
   /** If the tree has been initialized. */
   private boolean init = false;
+  /** If the mount table is manually disabled*/
+  private boolean disabled = false;
   /** Path -> Remote HDFS location. */
   private final TreeMap tree = new TreeMap<>();
   /** Path -> Remote location. */
@@ -391,7 +393,14 @@ public class MountTableResolver
   };
   return this.locationCache.get(path, meh);
 } catch (ExecutionException e) {
-  throw new IOException(e);
+  Throwable cause = e.getCause();
+  final IOException ioe;
+  if (cause instanceof IOException) {
+ioe = (IOException) cause;
+  } else {
+ioe = new IOException(cause);
+  }
+  throw ioe;
 } finally {
   readLock.unlock();
 }
@@ -504,7 +513,7 @@ public class MountTableResolver
* @throws StateStoreUnavailableException If it cannot connect to the store.
*/
   private void verifyMountTable() throws StateStoreUnavailableException {
-if (!this.init) {
+if (!this.init || disabled) {
   throw new StateStoreUnavailableException("Mount Table not initialized");
 }
   }
@@ -654,4 +663,9 @@ public class MountTableResolver
   public void setDefaultNSEnable(boolean defaultNSRWEnable) {
 this.defaultNSEnable = defaultNSRWEnable;
   }
+
+  @VisibleForTesting
+  public void setDisabled(boolean disable) {
+this.disabled = disable;
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index b934355..3a2f910 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -114,6 +114,7 @@ import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation;
 import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import 
org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import 
org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
@@ -480,17 +481,26 @@ public class RouterRpcServer extends AbstractService
 // Store the category of the operation category for this thread
 opCategory.set(op);
 
-// We allow unchecked and read operations
+// We allow unchecked and read operations to try, fail later
 if (op == OperationCategory.UNCHECKED || op == OperationCategory.READ) {
   return;
 }
+checkSafeMode();
+  }
 
+  /**
+   * Check if the Router is in safe mode.
+   * @throws StandbyException If the Router is in safe mode and cannot serve
+   *  client requests.
+   */
+  private void checkSafeMode() throws StandbyException {
 RouterSafemodeService safemodeService = router.getSafemodeService();
 if (safemodeService != null && safemodeService.isInSafeMode()) {
   // Throw standby exception, router is not available
   if (rpcMonitor != null) {
 

[hadoop] branch trunk updated: HDDS-1380. Add functonality to write from multiple clients in MiniOzoneChaosCluster. Contributed by Shashikant Banerjee.

2019-04-16 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new bfcb653  HDDS-1380. Add functonality to write from multiple clients in 
MiniOzoneChaosCluster. Contributed by Shashikant Banerjee.
bfcb653 is described below

commit bfcb6534cd59b8e3d20f335d5833202c45b95a9c
Author: Shashikant Banerjee 
AuthorDate: Tue Apr 16 18:52:07 2019 +0530

HDDS-1380. Add functonality to write from multiple clients in 
MiniOzoneChaosCluster. Contributed by Shashikant Banerjee.
---
 .../org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java| 13 -
 .../org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java | 14 +++---
 2 files changed, 19 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
index 005a528..3623747 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
@@ -56,10 +56,11 @@ public class MiniOzoneLoadGenerator {
 
   private AtomicBoolean isWriteThreadRunning;
 
-  private final OzoneBucket ozoneBucket;
+  private final List ozoneBuckets;
 
-  MiniOzoneLoadGenerator(OzoneBucket bucket, int numThreads, int numBuffers) {
-this.ozoneBucket = bucket;
+  MiniOzoneLoadGenerator(List bucket, int numThreads,
+  int numBuffers) {
+this.ozoneBuckets = bucket;
 this.numWriteThreads = numThreads;
 this.numBuffers = numBuffers;
 this.writeExecutor = new ThreadPoolExecutor(numThreads, numThreads, 100,
@@ -94,7 +95,9 @@ public class MiniOzoneLoadGenerator {
   int bufferCapacity = buffer.capacity();
 
   String keyName = threadName + "-" + index;
-  try (OzoneOutputStream stream = ozoneBucket.createKey(keyName,
+  OzoneBucket bucket =
+  ozoneBuckets.get((int) (Math.random() * ozoneBuckets.size()));
+  try (OzoneOutputStream stream = bucket.createKey(keyName,
   bufferCapacity, ReplicationType.RATIS, ReplicationFactor.THREE,
   new HashMap<>())) {
 stream.write(buffer.array());
@@ -106,7 +109,7 @@ public class MiniOzoneLoadGenerator {
 //  to closed container. add a break here once that is fixed.
   }
 
-  try (OzoneInputStream stream = ozoneBucket.readKey(keyName)) {
+  try (OzoneInputStream stream = bucket.readKey(keyName)) {
 byte[] readBuffer = new byte[bufferCapacity];
 int readLen = stream.read(readBuffer);
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
index a979c40..8bc3a52 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
@@ -29,7 +29,8 @@ import picocli.CommandLine.Command;
 import picocli.CommandLine.Option;
 import picocli.CommandLine;
 
-
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 /**
@@ -55,6 +56,10 @@ public class TestMiniChaosOzoneCluster implements Runnable {
   description = "total run time")
   private static int numMinutes = 1440; // 1 day by default
 
+  @Option(names = {"-n", "--numClients"},
+  description = "no of clients writing to OM")
+  private static int numClients = 3;
+
   @Option(names = {"-i", "--failureInterval"},
   description = "time between failure events in seconds")
   private static int failureInterval = 5; // 5 second period between failures.
@@ -74,9 +79,12 @@ public class TestMiniChaosOzoneCluster implements Runnable {
 store.createVolume(volumeName);
 OzoneVolume volume = store.getVolume(volumeName);
 volume.createBucket(bucketName);
-OzoneBucket ozoneBucket = volume.getBucket(bucketName);
+List ozoneBuckets = new ArrayList<>(numClients);
+for (int i = 0; i < numClients; i++) {
+  ozoneBuckets.add(volume.getBucket(bucketName));
+}
 loadGenerator =
-new MiniOzoneLoadGenerator(ozoneBucket, numThreads, numBuffers);
+new MiniOzoneLoadGenerator(ozoneBuckets, numThreads, numBuffers);
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-15014. KMS should log the IP address of the clients. Contributed by Zsombor Gegesy.

2019-04-16 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 008766c  HADOOP-15014. KMS should log the IP address of the clients. 
Contributed by Zsombor Gegesy.
008766c is described below

commit 008766c119d9ed9d568f9458ed0c02136962da5b
Author: Zsombor Gegesy 
AuthorDate: Tue Apr 16 05:27:29 2019 -0700

HADOOP-15014. KMS should log the IP address of the clients. Contributed by 
Zsombor Gegesy.

Signed-off-by: Wei-Chiu Chuang 
---
 .../key/kms/server/KMSExceptionsProvider.java  |  5 +-
 .../hadoop/crypto/key/kms/server/KMSMDCFilter.java | 58 +-
 2 files changed, 48 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
index 3d97753..ceaa8bc 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
@@ -111,9 +111,10 @@ public class KMSExceptionsProvider implements 
ExceptionMapper {
 UserGroupInformation ugi = KMSMDCFilter.getUgi();
 String method = KMSMDCFilter.getMethod();
 String url = KMSMDCFilter.getURL();
+String remoteClientAddress = KMSMDCFilter.getRemoteClientAddress();
 String msg = getOneLineMessage(ex);
-LOG.warn("User:'{}' Method:{} URL:{} Response:{}-{}", ugi, method, url,
-status, msg, ex);
+LOG.warn("User:'{}' Method:{} URL:{} From:{} Response:{}-{}", ugi, method,
+url, remoteClientAddress, status, msg, ex);
   }
 
 }
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
index 81591e5..f3c0bbd 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
@@ -21,6 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.security.UserGroupInformation;
 import 
org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
@@ -38,29 +40,40 @@ import java.io.IOException;
 public class KMSMDCFilter implements Filter {
 
   private static class Data {
-private UserGroupInformation ugi;
-private String method;
-private StringBuffer url;
+private final UserGroupInformation ugi;
+private final String method;
+private final String url;
+private final String remoteClientAddress;
 
-private Data(UserGroupInformation ugi, String method, StringBuffer url) {
+private Data(UserGroupInformation ugi, String method, String url,
+String remoteClientAddress) {
   this.ugi = ugi;
   this.method = method;
   this.url = url;
+  this.remoteClientAddress = remoteClientAddress;
 }
   }
 
   private static final ThreadLocal DATA_TL = new ThreadLocal();
 
   public static UserGroupInformation getUgi() {
-return DATA_TL.get().ugi;
+Data data = DATA_TL.get();
+return data != null ? data.ugi : null;
   }
 
   public static String getMethod() {
-return DATA_TL.get().method;
+Data data = DATA_TL.get();
+return data != null ? data.method : null;
   }
 
   public static String getURL() {
-return DATA_TL.get().url.toString();
+Data data = DATA_TL.get();
+return data != null ? data.url : null;
+  }
+
+  public static String getRemoteClientAddress() {
+Data data = DATA_TL.get();
+return data != null ? data.remoteClientAddress : null;
   }
 
   @Override
@@ -72,22 +85,41 @@ public class KMSMDCFilter implements Filter {
   FilterChain chain)
   throws IOException, ServletException {
 try {
-  DATA_TL.remove();
+  clearContext();
   UserGroupInformation ugi = HttpUserGroupInformation.get();
-  String method = ((HttpServletRequest) request).getMethod();
-  StringBuffer requestURL = ((HttpServletRequest) request).getRequestURL();
-  String queryString = ((HttpServletRequest) request).getQueryString();
+  HttpServletRequest httpServletRequest = (HttpServletRequest) request;
+  String method = httpServletRequest.getMethod();
+  StringBuffer requestURL = httpServletRequest.getRequestURL();
+  String queryString = httpServletRequest.getQueryString();
   if