[hadoop] branch trunk updated: HDDS-1264. Remove Parametrized in TestOzoneShell. (#614)

2019-03-26 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b226958  HDDS-1264. Remove Parametrized in TestOzoneShell. (#614)
b226958 is described below

commit b2269581f74df4045cb169a4ce328957c26062ae
Author: Vivek Ratnavel Subramanian 
AuthorDate: Tue Mar 26 22:07:22 2019 -0700

HDDS-1264. Remove Parametrized in TestOzoneShell. (#614)
---
 .../hadoop/ozone/ozShell/TestOzoneShell.java   | 28 ++
 1 file changed, 2 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 7f77f87..baea4ec 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -29,7 +29,6 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Objects;
@@ -54,7 +53,6 @@ import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.RestClient;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
@@ -90,8 +88,6 @@ import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import picocli.CommandLine;
@@ -104,7 +100,6 @@ import picocli.CommandLine.RunLast;
 /**
  * This test class specified for testing Ozone shell command.
  */
-@RunWith(value = Parameterized.class)
 public class TestOzoneShell {
 
   private static final Logger LOG =
@@ -128,16 +123,6 @@ public class TestOzoneShell {
   private static final PrintStream OLD_OUT = System.out;
   private static final PrintStream OLD_ERR = System.err;
 
-  @Parameterized.Parameters
-  public static Collection clientProtocol() {
-Object[][] params = new Object[][] {
-{RpcClient.class}};
-return Arrays.asList(params);
-  }
-
-  @Parameterized.Parameter
-  @SuppressWarnings("visibilitymodifier")
-  public Class clientProtocol;
   /**
* Create a MiniDFSCluster for testing with using distributed Ozone
* handler type.
@@ -182,16 +167,7 @@ public class TestOzoneShell {
   public void setup() {
 System.setOut(new PrintStream(out));
 System.setErr(new PrintStream(err));
-
-if(clientProtocol.equals(RestClient.class)) {
-  String hostName = cluster.getOzoneManager().getHttpServer()
-  .getHttpAddress().getHostName();
-  int port = cluster
-  .getOzoneManager().getHttpServer().getHttpAddress().getPort();
-  url = String.format("http://%s:%d";, hostName, port);
-} else {
-  url = "o3://" + getOmAddress();
-}
+url = "o3://" + getOmAddress();
   }
 
   @After
@@ -552,7 +528,7 @@ public class TestOzoneShell {
   @Test
   public void testListVolume() throws Exception {
 LOG.info("Running testListVolume");
-String protocol = clientProtocol.getName().toLowerCase();
+String protocol = "rpcclient";
 String commandOutput, commandError;
 List volumes;
 final int volCount = 20;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1262. In OM HA OpenKey call Should happen only leader OM. (#626)

2019-03-26 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new eef8cae  HDDS-1262. In OM HA OpenKey call Should happen only leader 
OM. (#626)
eef8cae is described below

commit eef8cae7cf42c2d1622970e177d699546351587f
Author: Bharat Viswanadham 
AuthorDate: Tue Mar 26 21:48:01 2019 -0700

HDDS-1262. In OM HA OpenKey call Should happen only leader OM. (#626)
---
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |   2 +
 .../org/apache/hadoop/ozone/audit/OMAction.java|   1 +
 .../hadoop/ozone/om/exceptions/OMException.java|   2 +
 .../ozone/om/protocol/OzoneManagerHAProtocol.java  |  30 
 .../src/main/proto/OzoneManagerProtocol.proto  |  16 ++
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java | 187 +
 .../org/apache/hadoop/ozone/om/KeyManager.java |  33 +++-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  90 +++---
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  74 +++-
 .../ozone/om/ratis/OzoneManagerRatisClient.java|  25 +++
 .../ozone/om/ratis/OzoneManagerStateMachine.java   | 137 ++-
 .../protocolPB/OzoneManagerRequestHandler.java |  54 ++
 .../om/ratis/TestOzoneManagerStateMachine.java |   2 +-
 13 files changed, 624 insertions(+), 29 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index 280461a..be879d8 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -210,6 +210,8 @@ public final class OmUtils {
 case GetDelegationToken:
 case RenewDelegationToken:
 case CancelDelegationToken:
+case ApplyCreateKey:
+case ApplyInitiateMultiPartUpload:
   return false;
 default:
   LOG.error("CmdType {} is not categorized as readOnly or not.", cmdType);
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
index 3863a52..0cbab08 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
@@ -26,6 +26,7 @@ public enum OMAction implements AuditAction {
   ALLOCATE_BLOCK,
   ADD_ALLOCATE_BLOCK,
   ALLOCATE_KEY,
+  APPLY_ALLOCATE_KEY,
   COMMIT_KEY,
   CREATE_VOLUME,
   CREATE_BUCKET,
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index 34980f6..b2f805a 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -23,6 +23,8 @@ import java.io.IOException;
  * Exception thrown by Ozone Manager.
  */
 public class OMException extends IOException {
+
+  public static final String STATUS_CODE = "STATUS_CODE=";
   private final OMException.ResultCodes result;
 
   /**
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
index 7390fe2..8357df2 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
@@ -20,6 +20,11 @@ package org.apache.hadoop.ozone.om.protocol;
 
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+.KeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
 .KeyLocation;
 
@@ -52,4 +57,29 @@ public interface OzoneManagerHAProtocol {
   KeyLocation keyLocation) throws IOException;
 
 
+  /**
+   * Add the openKey entry with given keyInfo and clientID in to openKeyTable.
+   * This will be called only from applyTransaction, once after calling
+   * applyKey in startTransaction.
+   *
+   * @param omKeyArgs
+   * @param keyInfo
+   * @param clientID
+   * @throws IOException
+   */
+  void applyOpenKey(KeyArgs omKeyArgs, KeyInfo keyInfo, long clientID)
+  throws IOException;
+
+  /**
+   * Initiate multipart upload for the specified key.
+   *
+   * This will be called only from applyTransaction.
+   * @param omKeyAr

[hadoop] branch trunk updated: HDFS-14304: High lock contention on hdfsHashMutex in libhdfs

2019-03-26 Thread todd
This is an automated email from the ASF dual-hosted git repository.

todd pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 18c57cf  HDFS-14304: High lock contention on hdfsHashMutex in libhdfs
18c57cf is described below

commit 18c57cf0464f4d1fa95899d75b2f59cae33c7c33
Author: Sahil Takiar 
AuthorDate: Tue Mar 19 12:20:56 2019 -0500

HDFS-14304: High lock contention on hdfsHashMutex in libhdfs

This closes #595

Signed-off-by: Todd Lipcon 
---
 .../src/main/native/libhdfs-tests/CMakeLists.txt   |   5 +-
 .../main/native/libhdfs-tests/native_mini_dfs.c|  42 +-
 .../src/main/native/libhdfs-tests/test_htable.c| 100 -
 .../src/main/native/libhdfs/CMakeLists.txt |   2 +-
 .../src/main/native/libhdfs/common/htable.c| 287 -
 .../src/main/native/libhdfs/common/htable.h| 161 ---
 .../src/main/native/libhdfs/exception.c|   6 +-
 .../src/main/native/libhdfs/hdfs.c | 464 ++---
 .../src/main/native/libhdfs/jclasses.c | 136 ++
 .../src/main/native/libhdfs/jclasses.h | 112 +
 .../src/main/native/libhdfs/jni_helper.c   | 223 +-
 .../src/main/native/libhdfs/jni_helper.h   |  37 +-
 .../src/main/native/libhdfs/os/mutexes.h   |   6 +-
 .../src/main/native/libhdfs/os/posix/mutexes.c |   2 +-
 .../native/libhdfs/os/posix/thread_local_storage.c |  10 +-
 .../src/main/native/libhdfs/os/windows/mutexes.c   |   4 +-
 16 files changed, 656 insertions(+), 941 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
index 08fc030..f16cc9e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
@@ -29,8 +29,8 @@ include_directories(
 
 add_library(native_mini_dfs
 native_mini_dfs.c
-../libhdfs/common/htable.c
 ../libhdfs/exception.c
+../libhdfs/jclasses.c
 ../libhdfs/jni_helper.c
 ${OS_DIR}/mutexes.c
 ${OS_DIR}/thread_local_storage.c
@@ -39,6 +39,3 @@ add_library(native_mini_dfs
 add_executable(test_native_mini_dfs test_native_mini_dfs.c)
 target_link_libraries(test_native_mini_dfs native_mini_dfs ${JAVA_JVM_LIBRARY})
 add_test(test_test_native_mini_dfs test_native_mini_dfs)
-
-add_executable(test_htable ../libhdfs/common/htable.c test_htable.c)
-target_link_libraries(test_htable ${OS_LINK_LIBRARIES})
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
index 6938109..3af56f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
@@ -17,6 +17,7 @@
  */
 
 #include "exception.h"
+#include "jclasses.h"
 #include "jni_helper.h"
 #include "native_mini_dfs.h"
 #include "platform.h"
@@ -36,9 +37,7 @@
 
 #define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder"
 #define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster"
-#define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
 #define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode"
-#define JAVA_INETSOCKETADDRESS "java/net/InetSocketAddress"
 
 struct NativeMiniDfsCluster {
 /**
@@ -60,8 +59,7 @@ static int hdfsDisableDomainSocketSecurity(void)
   errno = EINTERNAL;
   return -1;
 }
-jthr = invokeMethod(env, NULL, STATIC, NULL,
-"org/apache/hadoop/net/unix/DomainSocket",
+jthr = invokeMethod(env, NULL, STATIC, NULL, JC_DOMAIN_SOCKET,
 "disableBindPathValidation", "()V");
 if (jthr) {
 errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -126,11 +124,6 @@ struct NativeMiniDfsCluster* nmdCreate(struct 
NativeMiniDfsConf *conf)
 "nmdCreate: new Configuration");
 goto error;
 }
-if (jthr) {
-printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-  "nmdCreate: Configuration::setBoolean");
-goto error;
-}
 // Disable 'minimum block size' -- it's annoying in tests.
 (*env)->DeleteLocalRef(env, jconfStr);
 jconfStr = NULL;
@@ -140,8 +133,9 @@ struct NativeMiniDfsCluster* nmdCreate(struct 
NativeMiniDfsConf *conf)
   "nmdCreate: new String");
 goto error;
 }
-jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
-"setLong", "(Ljava/lang/String;J)V", jconfStr, 0LL);
+jthr = invokeMethod(env, NULL, INS

[hadoop] branch ozone-0.4 updated: HDDS-139. Output of createVolume can be improved. Contributed by Shweta.

2019-03-26 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 299177e  HDDS-139. Output of createVolume can be improved. Contributed 
by Shweta.
299177e is described below

commit 299177eaa74b839781f02f53e62c99269456f22f
Author: Shweta Yakkali 
AuthorDate: Tue Mar 26 19:01:49 2019 -0700

HDDS-139. Output of createVolume can be improved. Contributed by Shweta.

(cherry picked from commit f426b7ce8fb33d57e4187484448b9e0bfc04ccfa)
---
 .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java   | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index a3214f3..d326cbc 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -269,8 +269,12 @@ public class RpcClient implements ClientProtocol, 
KeyProviderTokenIssuer {
   builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(ozoneAcl));
 }
 
-LOG.info("Creating Volume: {}, with {} as owner and quota set to {} 
bytes.",
-volumeName, owner, quota);
+if (volArgs.getQuota() == null) {
+  LOG.info("Creating Volume: {}, with {} as owner.", volumeName, owner);
+} else {
+  LOG.info("Creating Volume: {}, with {} as owner "
+  + "and quota set to {} bytes.", volumeName, owner, quota);
+}
 ozoneManagerClient.createVolume(builder.build());
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-139. Output of createVolume can be improved. Contributed by Shweta.

2019-03-26 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f426b7c  HDDS-139. Output of createVolume can be improved. Contributed 
by Shweta.
f426b7c is described below

commit f426b7ce8fb33d57e4187484448b9e0bfc04ccfa
Author: Shweta Yakkali 
AuthorDate: Tue Mar 26 19:01:49 2019 -0700

HDDS-139. Output of createVolume can be improved. Contributed by Shweta.
---
 .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java   | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 9ba07d3..6ecda09 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -270,8 +270,12 @@ public class RpcClient implements ClientProtocol, 
KeyProviderTokenIssuer {
   builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(ozoneAcl));
 }
 
-LOG.info("Creating Volume: {}, with {} as owner and quota set to {} 
bytes.",
-volumeName, owner, quota);
+if (volArgs.getQuota() == null) {
+  LOG.info("Creating Volume: {}, with {} as owner.", volumeName, owner);
+} else {
+  LOG.info("Creating Volume: {}, with {} as owner "
+  + "and quota set to {} bytes.", volumeName, owner, quota);
+}
 ozoneManagerClient.createVolume(builder.build());
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14348: Fix JNI exception handling issues in libhdfs

2019-03-26 Thread todd
This is an automated email from the ASF dual-hosted git repository.

todd pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fe29b39  HDFS-14348: Fix JNI exception handling issues in libhdfs
fe29b39 is described below

commit fe29b3901be1b06db92379c7b7fac4954253e6e2
Author: Sahil Takiar 
AuthorDate: Thu Mar 21 20:53:01 2019 -0700

HDFS-14348: Fix JNI exception handling issues in libhdfs

This closes #600

Signed-off-by: Todd Lipcon 
---
 .../src/main/native/libhdfs/hdfs.c | 55 +
 .../src/main/native/libhdfs/jni_helper.c   |  8 +-
 .../native/libhdfs/os/posix/thread_local_storage.c | 94 ++
 3 files changed, 108 insertions(+), 49 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index 41caffd..ec0ad4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -2491,6 +2491,8 @@ int hadoopRzOptionsSetByteBufferPool(
 JNIEnv *env;
 jthrowable jthr;
 jobject byteBufferPool = NULL;
+jobject globalByteBufferPool = NULL;
+int ret;
 
 env = getJNIEnv();
 if (!env) {
@@ -2507,15 +2509,37 @@ int hadoopRzOptionsSetByteBufferPool(
   if (jthr) {
   printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
   "hadoopRzOptionsSetByteBufferPool(className=%s): ", className);
-  errno = EINVAL;
-  return -1;
+  ret = EINVAL;
+  goto done;
   }
-}
-if (opts->byteBufferPool) {
-// Delete any previous ByteBufferPool we had.
+  // Only set opts->byteBufferPool if creating a global reference is
+  // successful
+  globalByteBufferPool = (*env)->NewGlobalRef(env, byteBufferPool);
+  if (!globalByteBufferPool) {
+  printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+  "hadoopRzOptionsSetByteBufferPool(className=%s): ",
+  className);
+  ret = EINVAL;
+  goto done;
+  }
+  // Delete any previous ByteBufferPool we had before setting a new one.
+  if (opts->byteBufferPool) {
+  (*env)->DeleteGlobalRef(env, opts->byteBufferPool);
+  }
+  opts->byteBufferPool = globalByteBufferPool;
+} else if (opts->byteBufferPool) {
+// If the specified className is NULL, delete any previous
+// ByteBufferPool we had.
 (*env)->DeleteGlobalRef(env, opts->byteBufferPool);
+opts->byteBufferPool = NULL;
+}
+ret = 0;
+done:
+destroyLocalReference(env, byteBufferPool);
+if (ret) {
+errno = ret;
+return -1;
 }
-opts->byteBufferPool = (*env)->NewGlobalRef(env, byteBufferPool);
 return 0;
 }
 
@@ -2570,8 +2594,7 @@ static jthrowable hadoopRzOptionsGetEnumSet(JNIEnv *env,
 } else {
 jclass clazz = (*env)->FindClass(env, READ_OPTION);
 if (!clazz) {
-jthr = newRuntimeError(env, "failed "
-"to find class for %s", READ_OPTION);
+jthr = getPendingExceptionAndClear(env);
 goto done;
 }
 jthr = invokeMethod(env, &jVal, STATIC, NULL,
@@ -2697,6 +2720,7 @@ static int translateZCRException(JNIEnv *env, jthrowable 
exc)
 }
 if (!strcmp(className, "java.lang.UnsupportedOperationException")) {
 ret = EPROTONOSUPPORT;
+destroyLocalReference(env, exc);
 goto done;
 }
 ret = printExceptionAndFree(env, exc, PRINT_EXC_ALL,
@@ -2896,8 +2920,9 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, 
tOffset length)
 for (i = 0; i < jNumFileBlocks; ++i) {
 jFileBlock =
 (*env)->GetObjectArrayElement(env, jBlockLocations, i);
-if (!jFileBlock) {
-ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+jthr = (*env)->ExceptionOccurred(env);
+if (jthr || !jFileBlock) {
+ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
 "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
 "GetObjectArrayElement(%d)", path, start, length, i);
 goto done;
@@ -2930,8 +2955,9 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, 
tOffset length)
 //Now parse each hostname
 for (j = 0; j < jNumBlockHosts; ++j) {
 jHost = (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
-if (!jHost) {
-ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+jthr = (*env)->ExceptionOccurred(env);
+if (jthr || !jHost) {
+ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
 "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId6

[hadoop] branch trunk updated: HADOOP-16037. DistCp: Document usage of Sync (-diff option) in detail.

2019-03-26 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ce4bafd  HADOOP-16037. DistCp: Document usage of Sync (-diff option) 
in detail.
ce4bafd is described below

commit ce4bafdf442c004b6deb25eaa2fa7e947b8ad269
Author: Siyao Meng 
AuthorDate: Tue Mar 26 18:42:54 2019 +

HADOOP-16037. DistCp: Document usage of Sync (-diff option) in detail.

Contributed by Siyao Meng
---
 .../hadoop-distcp/src/site/markdown/DistCp.md.vm   | 120 +
 1 file changed, 120 insertions(+)

diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index 25ea7e2..3b7737b 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -13,6 +13,7 @@
 -->
 
 #set ( $H3 = '###' )
+#set ( $H4 = '' )
 
 DistCp Guide
 =
@@ -23,6 +24,7 @@ DistCp Guide
  - [Usage](#Usage)
  - [Basic Usage](#Basic_Usage)
  - [Update and Overwrite](#Update_and_Overwrite)
+ - [Sync](#Sync)
  - [Command Line Options](#Command_Line_Options)
  - [Architecture of DistCp](#Architecture_of_DistCp)
  - [DistCp Driver](#DistCp_Driver)
@@ -192,6 +194,124 @@ $H3 Update and Overwrite
 
   If `-overwrite` is used, `1` is overwritten as well.
 
+$H3 Sync
+
+  `-diff` option syncs files from a source cluster to a target cluster with a
+  snapshot diff. It copies, renames and removes files in the snapshot diff 
list.
+
+  `-update` option must be included when `-diff` option is in use.
+
+  Most cloud providers don't work well with sync at the moment.
+
+  Usage:
+
+hadoop distcp -update -diff

+
+  Example:
+
+hadoop distcp -update -diff snap1 snap2 /src/ /dst/
+
+  The command above applies changes from snapshot `snap1` to `snap2`
+  (i.e. snapshot diff from `snap1` to `snap2`) in `/src/` to `/dst/`.
+  Obviously, it requires `/src/` to have both snapshots `snap1` and `snap2`.
+  But the destination  `/dst/` must also have a snapshot with the same
+  name as ``, in this case `snap1`. The destination `/dst/`
+  should not have new file operations (create, rename, delete) since `snap1`.
+  Note that when this command finishes, a new snapshot `snap2` will NOT be
+  created at `/dst/`.
+
+  `-update` is required to use `-diff` option.
+
+  For instance, in `/src/`, if `1.txt` is added and `2.txt` is deleted after
+  the creation of `snap1` and before creation of `snap2`, the command above
+  will copy `1.txt` from `/src/` to `/dst/` and delete `2.txt` from `/dst/`.
+
+  Sync behavior will be elaborated using experiments below.
+
+$H4 Experiment 1: Syncing diff of two adjacent snapshots
+
+  Some preparations before we start.
+
+# Create source and destination directories
+hdfs dfs -mkdir /src/ /dst/
+# Allow snapshot on source
+hdfs dfsadmin -allowSnapshot /src/
+# Create a snapshot (empty one)
+hdfs dfs -createSnapshot /src/ snap1
+# Allow snapshot on destination
+hdfs dfsadmin -allowSnapshot /dst/
+# Create a from_snapshot with the same name
+hdfs dfs -createSnapshot /dst/ snap1
+
+# Put one text file under /src/
+echo "This is the 1st text file." > 1.txt
+hdfs dfs -put 1.txt /src/
+# Create the second snapshot
+hdfs dfs -createSnapshot /src/ snap2
+
+# Put another text file under /src/
+echo "This is the 2nd text file." > 2.txt
+hdfs dfs -put 2.txt /src/
+# Create the third snapshot
+hdfs dfs -createSnapshot /src/ snap3
+
+  Then we run distcp sync:
+
+hadoop distcp -update -diff snap1 snap2 /src/ /dst/
+
+  The command above should succeed. `1.txt` will be copied from `/src/` to
+  `/dst/`. Again, `-update` option is required.
+
+  If we run the same command again, we will get `DistCp sync failed` exception
+  because the destination has added a new file `1.txt` since `snap1`. That
+  being said, if we remove `1.txt` manually from `/dst/` and run the sync, the
+  command will succeed.
+
+$H4 Experiment 2: syncing diff of two non-adjacent snapshots
+
+  First do a clean up from Experiment 1.
+
+hdfs dfs -rm -skipTrash /dst/1.txt
+
+  Run sync command, note the `` has been changed from `snap2` in
+  Experiment 1 to `snap3`.
+
+hadoop distcp -update -diff snap1 snap3 /src/ /dst/
+
+  Both `1.txt` and `2.txt` will be copied to `/dst/`.
+
+$H4 Experiment 3: syncing file delete operation
+
+  Continuing from the end of Experiment 2:
+
+hdfs dfs -rm -skipTrash /dst/2.txt
+# Create snap2 at destination, it contains 1.txt
+hdfs dfs -createSnapshot /dst/ snap2
+
+# Delete 1.txt from source
+hdfs dfs -rm -skipTrash /src/1.txt
+# Create snap4 at source, it only contains 2.txt
+hdfs dfs -createSnapshot /src/ snap4
+
+  Run sync command now:
+
+  

[hadoop] branch branch-3.2 updated: HADOOP-16037. DistCp: Document usage of Sync (-diff option) in detail.

2019-03-26 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 52cfbc3  HADOOP-16037. DistCp: Document usage of Sync (-diff option) 
in detail.
52cfbc3 is described below

commit 52cfbc39ccd178fae07a3b1137dd3138c63d5c11
Author: Siyao Meng 
AuthorDate: Tue Mar 26 18:43:43 2019 +

HADOOP-16037. DistCp: Document usage of Sync (-diff option) in detail.

Contributed by Siyao Meng

(cherry picked from commit ce4bafdf442c004b6deb25eaa2fa7e947b8ad269)
---
 .../hadoop-distcp/src/site/markdown/DistCp.md.vm   | 120 +
 1 file changed, 120 insertions(+)

diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index 25ea7e2..3b7737b 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -13,6 +13,7 @@
 -->
 
 #set ( $H3 = '###' )
+#set ( $H4 = '' )
 
 DistCp Guide
 =
@@ -23,6 +24,7 @@ DistCp Guide
  - [Usage](#Usage)
  - [Basic Usage](#Basic_Usage)
  - [Update and Overwrite](#Update_and_Overwrite)
+ - [Sync](#Sync)
  - [Command Line Options](#Command_Line_Options)
  - [Architecture of DistCp](#Architecture_of_DistCp)
  - [DistCp Driver](#DistCp_Driver)
@@ -192,6 +194,124 @@ $H3 Update and Overwrite
 
   If `-overwrite` is used, `1` is overwritten as well.
 
+$H3 Sync
+
+  `-diff` option syncs files from a source cluster to a target cluster with a
+  snapshot diff. It copies, renames and removes files in the snapshot diff 
list.
+
+  `-update` option must be included when `-diff` option is in use.
+
+  Most cloud providers don't work well with sync at the moment.
+
+  Usage:
+
+hadoop distcp -update -diff

+
+  Example:
+
+hadoop distcp -update -diff snap1 snap2 /src/ /dst/
+
+  The command above applies changes from snapshot `snap1` to `snap2`
+  (i.e. snapshot diff from `snap1` to `snap2`) in `/src/` to `/dst/`.
+  Obviously, it requires `/src/` to have both snapshots `snap1` and `snap2`.
+  But the destination  `/dst/` must also have a snapshot with the same
+  name as ``, in this case `snap1`. The destination `/dst/`
+  should not have new file operations (create, rename, delete) since `snap1`.
+  Note that when this command finishes, a new snapshot `snap2` will NOT be
+  created at `/dst/`.
+
+  `-update` is required to use `-diff` option.
+
+  For instance, in `/src/`, if `1.txt` is added and `2.txt` is deleted after
+  the creation of `snap1` and before creation of `snap2`, the command above
+  will copy `1.txt` from `/src/` to `/dst/` and delete `2.txt` from `/dst/`.
+
+  Sync behavior will be elaborated using experiments below.
+
+$H4 Experiment 1: Syncing diff of two adjacent snapshots
+
+  Some preparations before we start.
+
+# Create source and destination directories
+hdfs dfs -mkdir /src/ /dst/
+# Allow snapshot on source
+hdfs dfsadmin -allowSnapshot /src/
+# Create a snapshot (empty one)
+hdfs dfs -createSnapshot /src/ snap1
+# Allow snapshot on destination
+hdfs dfsadmin -allowSnapshot /dst/
+# Create a from_snapshot with the same name
+hdfs dfs -createSnapshot /dst/ snap1
+
+# Put one text file under /src/
+echo "This is the 1st text file." > 1.txt
+hdfs dfs -put 1.txt /src/
+# Create the second snapshot
+hdfs dfs -createSnapshot /src/ snap2
+
+# Put another text file under /src/
+echo "This is the 2nd text file." > 2.txt
+hdfs dfs -put 2.txt /src/
+# Create the third snapshot
+hdfs dfs -createSnapshot /src/ snap3
+
+  Then we run distcp sync:
+
+hadoop distcp -update -diff snap1 snap2 /src/ /dst/
+
+  The command above should succeed. `1.txt` will be copied from `/src/` to
+  `/dst/`. Again, `-update` option is required.
+
+  If we run the same command again, we will get `DistCp sync failed` exception
+  because the destination has added a new file `1.txt` since `snap1`. That
+  being said, if we remove `1.txt` manually from `/dst/` and run the sync, the
+  command will succeed.
+
+$H4 Experiment 2: syncing diff of two non-adjacent snapshots
+
+  First do a clean up from Experiment 1.
+
+hdfs dfs -rm -skipTrash /dst/1.txt
+
+  Run sync command, note the `` has been changed from `snap2` in
+  Experiment 1 to `snap3`.
+
+hadoop distcp -update -diff snap1 snap3 /src/ /dst/
+
+  Both `1.txt` and `2.txt` will be copied to `/dst/`.
+
+$H4 Experiment 3: syncing file delete operation
+
+  Continuing from the end of Experiment 2:
+
+hdfs dfs -rm -skipTrash /dst/2.txt
+# Create snap2 at destination, it contains 1.txt
+hdfs dfs -createSnapshot /dst/ snap2
+
+# Delete 1.txt from source
+hdfs dfs -rm -skipTrash /src/1.txt
+# Create snap4 at source, it only c

[hadoop] branch branch-3.0 updated: HDFS-14037. Fix SSLFactory truststore reloader thread leak in URLConnectionFactory.

2019-03-26 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
 new abb06c9  HDFS-14037. Fix SSLFactory truststore reloader thread leak in 
URLConnectionFactory.
abb06c9 is described below

commit abb06c9d7c5e201ae62eec1885dd95bd6973ebcb
Author: Takanobu Asanuma 
AuthorDate: Wed Mar 27 03:27:02 2019 +0900

HDFS-14037. Fix SSLFactory truststore reloader thread leak in 
URLConnectionFactory.

(cherry picked from commit 55fb3c32fb48ca26a629d4d5f3f07e2858d09594)
---
 .../hadoop/hdfs/web/SSLConnectionConfigurator.java | 72 ++
 .../hadoop/hdfs/web/URLConnectionFactory.java  | 43 +++--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  3 +
 .../hadoop/hdfs/web/TestURLConnectionFactory.java  | 53 
 .../federation/router/RouterWebHdfsMethods.java|  2 +
 .../hdfs/qjournal/client/QuorumJournalManager.java |  1 +
 6 files changed, 139 insertions(+), 35 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
new file mode 100644
index 000..7bf7ae1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.security.GeneralSecurityException;
+
+/**
+ * Configure a connection to use SSL authentication.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+
+public class SSLConnectionConfigurator implements ConnectionConfigurator {
+  private final SSLFactory factory;
+  private final SSLSocketFactory sf;
+  private final HostnameVerifier hv;
+  private final int connectTimeout;
+  private final int readTimeout;
+
+  SSLConnectionConfigurator(int connectTimeout, int readTimeout,
+  Configuration conf) throws IOException, GeneralSecurityException {
+factory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+factory.init();
+sf = factory.createSSLSocketFactory();
+hv = factory.getHostnameVerifier();
+this.connectTimeout = connectTimeout;
+this.readTimeout = readTimeout;
+  }
+
+  @Override
+  public HttpURLConnection configure(HttpURLConnection conn) {
+if (conn instanceof HttpsURLConnection) {
+  HttpsURLConnection c = (HttpsURLConnection) conn;
+  c.setSSLSocketFactory(sf);
+  c.setHostnameVerifier(hv);
+}
+conn.setConnectTimeout(connectTimeout);
+conn.setReadTimeout(readTimeout);
+return conn;
+  }
+
+  void destroy() {
+factory.destroy();
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index 9713932..8b6c7f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -22,11 +22,6 @@ import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URLConnection;
-import java.security.GeneralSecurityException;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.HttpsURLConnection;
-import javax.net.ssl.SSLSocketFactory;
 
 import org.apache.hadoop.classification.I

[hadoop] branch branch-3.1 updated: HDFS-14037. Fix SSLFactory truststore reloader thread leak in URLConnectionFactory.

2019-03-26 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 1fc7417  HDFS-14037. Fix SSLFactory truststore reloader thread leak in 
URLConnectionFactory.
1fc7417 is described below

commit 1fc74178ebb2e391aa916ed71025544bf6dd19ac
Author: Takanobu Asanuma 
AuthorDate: Wed Mar 27 03:27:02 2019 +0900

HDFS-14037. Fix SSLFactory truststore reloader thread leak in 
URLConnectionFactory.

(cherry picked from commit 55fb3c32fb48ca26a629d4d5f3f07e2858d09594)
---
 .../hadoop/hdfs/web/SSLConnectionConfigurator.java | 72 ++
 .../hadoop/hdfs/web/URLConnectionFactory.java  | 43 +++--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  3 +
 .../hadoop/hdfs/web/TestURLConnectionFactory.java  | 53 
 .../federation/router/RouterWebHdfsMethods.java|  2 +
 .../hdfs/qjournal/client/QuorumJournalManager.java |  1 +
 6 files changed, 139 insertions(+), 35 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
new file mode 100644
index 000..7bf7ae1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.security.GeneralSecurityException;
+
+/**
+ * Configure a connection to use SSL authentication.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+
+public class SSLConnectionConfigurator implements ConnectionConfigurator {
+  private final SSLFactory factory;
+  private final SSLSocketFactory sf;
+  private final HostnameVerifier hv;
+  private final int connectTimeout;
+  private final int readTimeout;
+
+  SSLConnectionConfigurator(int connectTimeout, int readTimeout,
+  Configuration conf) throws IOException, GeneralSecurityException {
+factory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+factory.init();
+sf = factory.createSSLSocketFactory();
+hv = factory.getHostnameVerifier();
+this.connectTimeout = connectTimeout;
+this.readTimeout = readTimeout;
+  }
+
+  @Override
+  public HttpURLConnection configure(HttpURLConnection conn) {
+if (conn instanceof HttpsURLConnection) {
+  HttpsURLConnection c = (HttpsURLConnection) conn;
+  c.setSSLSocketFactory(sf);
+  c.setHostnameVerifier(hv);
+}
+conn.setConnectTimeout(connectTimeout);
+conn.setReadTimeout(readTimeout);
+return conn;
+  }
+
+  void destroy() {
+factory.destroy();
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index 9713932..8b6c7f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -22,11 +22,6 @@ import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URLConnection;
-import java.security.GeneralSecurityException;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.HttpsURLConnection;
-import javax.net.ssl.SSLSocketFactory;
 
 import org.apache.hadoop.classification.I

[hadoop] branch branch-3.2 updated: HDFS-14037. Fix SSLFactory truststore reloader thread leak in URLConnectionFactory.

2019-03-26 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 162e999  HDFS-14037. Fix SSLFactory truststore reloader thread leak in 
URLConnectionFactory.
162e999 is described below

commit 162ec7703f690397ad7e17791ac5da0176f6
Author: Takanobu Asanuma 
AuthorDate: Wed Mar 27 03:27:02 2019 +0900

HDFS-14037. Fix SSLFactory truststore reloader thread leak in 
URLConnectionFactory.

(cherry picked from commit 55fb3c32fb48ca26a629d4d5f3f07e2858d09594)
---
 .../hadoop/hdfs/web/SSLConnectionConfigurator.java | 72 ++
 .../hadoop/hdfs/web/URLConnectionFactory.java  | 43 +++--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  3 +
 .../hadoop/hdfs/web/TestURLConnectionFactory.java  | 53 
 .../federation/router/RouterWebHdfsMethods.java|  2 +
 .../hdfs/qjournal/client/QuorumJournalManager.java |  1 +
 6 files changed, 139 insertions(+), 35 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
new file mode 100644
index 000..7bf7ae1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.security.GeneralSecurityException;
+
+/**
+ * Configure a connection to use SSL authentication.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+
+public class SSLConnectionConfigurator implements ConnectionConfigurator {
+  private final SSLFactory factory;
+  private final SSLSocketFactory sf;
+  private final HostnameVerifier hv;
+  private final int connectTimeout;
+  private final int readTimeout;
+
+  SSLConnectionConfigurator(int connectTimeout, int readTimeout,
+  Configuration conf) throws IOException, GeneralSecurityException {
+factory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+factory.init();
+sf = factory.createSSLSocketFactory();
+hv = factory.getHostnameVerifier();
+this.connectTimeout = connectTimeout;
+this.readTimeout = readTimeout;
+  }
+
+  @Override
+  public HttpURLConnection configure(HttpURLConnection conn) {
+if (conn instanceof HttpsURLConnection) {
+  HttpsURLConnection c = (HttpsURLConnection) conn;
+  c.setSSLSocketFactory(sf);
+  c.setHostnameVerifier(hv);
+}
+conn.setConnectTimeout(connectTimeout);
+conn.setReadTimeout(readTimeout);
+return conn;
+  }
+
+  void destroy() {
+factory.destroy();
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index 9713932..8b6c7f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -22,11 +22,6 @@ import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URLConnection;
-import java.security.GeneralSecurityException;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.HttpsURLConnection;
-import javax.net.ssl.SSLSocketFactory;
 
 import org.apache.hadoop.classification.I

[hadoop] 14/20: YARN-9397. Fix empty NMResourceInfo object test failures in branch-2

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit f279f92bb1b9d89c4968bf168096615eff27b081
Author: Jonathan Hung 
AuthorDate: Mon Mar 18 13:44:27 2019 -0700

YARN-9397. Fix empty NMResourceInfo object test failures in branch-2
---
 .../yarn/server/nodemanager/webapp/TestNMWebServices.java  | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
index 2f1577f..980eae9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
@@ -457,23 +457,23 @@ public class TestNMWebServices extends JerseyTestBase {
 assertEquals(MediaType.APPLICATION_JSON, response.getType().toString());
 
 // Access resource-2 should fail (empty NMResourceInfo returned).
-JSONObject json = response.getEntity(JSONObject.class);
-assertEquals(0, json.length());
+String resp = response.getEntity(String.class);
+assertEquals("null", resp);
 
 // Access resource-3 should fail (unknown plugin)
 response = r.path("ws").path("v1").path("node").path(
 "resources").path("resource-3").accept(MediaType.APPLICATION_JSON).get(
 ClientResponse.class);
 assertEquals(MediaType.APPLICATION_JSON, response.getType().toString());
-json = response.getEntity(JSONObject.class);
-assertEquals(0, json.length());
+resp = response.getEntity(String.class);
+assertEquals("null", resp);
 
 // Access resource-1 should success
 response = r.path("ws").path("v1").path("node").path(
 "resources").path("resource-1").accept(MediaType.APPLICATION_JSON).get(
 ClientResponse.class);
 assertEquals(MediaType.APPLICATION_JSON, response.getType().toString());
-json = response.getEntity(JSONObject.class);
+JSONObject json = response.getEntity(JSONObject.class);
 assertEquals(1000, Long.parseLong(json.get("a").toString()));
 
 // Access resource-1 should success (encoded yarn.io/Fresource-1).


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 03/20: YARN-7137. [YARN-3926] Move newly added APIs to unstable in YARN-3926 branch. Contributed by Wangda Tan.

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit cbcbfc247913987cd02c80f857ef3d0944d0f187
Author: Sunil G 
AuthorDate: Tue Sep 12 20:31:47 2017 +0530

YARN-7137. [YARN-3926] Move newly added APIs to unstable in YARN-3926 
branch. Contributed by Wangda Tan.

(cherry picked from commit da0b6a354bf6f6bf37ca5a05a4a8eece09aa4893)
(cherry picked from commit 74030d808cd95e26a0c48500c08d269fcb4150ee)
---
 .../apache/hadoop/yarn/api/records/Resource.java   | 24 +++---
 .../hadoop/yarn/api/records/ResourceRequest.java   |  1 +
 .../hadoop/yarn/util/resource/ResourceUtils.java   | 19 -
 .../hadoop/yarn/util/resource/package-info.java|  6 +-
 .../server/resourcemanager/webapp/dao/AppInfo.java |  2 +-
 5 files changed, 15 insertions(+), 37 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index be0ab58..7e8c01d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -206,8 +206,8 @@ public abstract class Resource implements 
Comparable {
*
* @return Map of resource name to ResourceInformation
*/
-  @Public
-  @Evolving
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
   public ResourceInformation[] getResources() {
 return resources;
   }
@@ -220,7 +220,7 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException if the resource can't be found
*/
   @Public
-  @Evolving
+  @InterfaceStability.Unstable
   public ResourceInformation getResourceInformation(String resource)
   throws ResourceNotFoundException {
 Integer index = ResourceUtils.getResourceTypeIndex().get(resource);
@@ -240,8 +240,8 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException
*   if the resource can't be found
*/
-  @Public
-  @Evolving
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
   public ResourceInformation getResourceInformation(int index)
   throws ResourceNotFoundException {
 ResourceInformation ri = null;
@@ -262,7 +262,7 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException if the resource can't be found
*/
   @Public
-  @Evolving
+  @InterfaceStability.Unstable
   public long getResourceValue(String resource)
   throws ResourceNotFoundException {
 return getResourceInformation(resource).getValue();
@@ -276,7 +276,7 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException if the resource is not found
*/
   @Public
-  @Evolving
+  @InterfaceStability.Unstable
   public void setResourceInformation(String resource,
   ResourceInformation resourceInformation)
   throws ResourceNotFoundException {
@@ -302,8 +302,8 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException
*   if the resource is not found
*/
-  @Public
-  @Evolving
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
   public void setResourceInformation(int index,
   ResourceInformation resourceInformation)
   throws ResourceNotFoundException {
@@ -323,7 +323,7 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException if the resource is not found
*/
   @Public
-  @Evolving
+  @InterfaceStability.Unstable
   public void setResourceValue(String resource, long value)
   throws ResourceNotFoundException {
 if (resource.equals(ResourceInformation.MEMORY_URI)) {
@@ -350,8 +350,8 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException
*   if the resource is not found
*/
-  @Public
-  @Evolving
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
   public void setResourceValue(int index, long value)
   throws ResourceNotFoundException {
 try {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
index 94eda7c..e1a98ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.api.records;
 import java.io.Ser

[hadoop] 01/20: YARN-9188. Port YARN-7136 to branch-2

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 0d54ad7c1fb5b4b3048d7c37d90d6b2e5c0d0b61
Author: Jonathan Hung 
AuthorDate: Wed Jan 9 16:01:06 2019 -0500

YARN-9188. Port YARN-7136 to branch-2
---
 .../hadoop-yarn/dev-support/findbugs-exclude.xml   |   2 +-
 .../apache/hadoop/yarn/api/records/Resource.java   | 178 +++---
 .../yarn/api/records/ResourceInformation.java  |  15 +-
 ...{BaseResource.java => LightWeightResource.java} | 104 +---
 .../hadoop/yarn/util/resource/ResourceUtils.java   |  23 +-
 .../yarn/api/records/impl/pb/ResourcePBImpl.java   |  19 +-
 .../util/resource/DominantResourceCalculator.java  |  75 +++---
 .../hadoop/yarn/util/resource/Resources.java   |  30 ++-
 .../yarn/util/resource/TestResourceUtils.java  |   2 +
 .../rmapp/attempt/RMAppAttemptImpl.java|   2 -
 .../hadoop/yarn/server/resourcemanager/MockRM.java |   6 +-
 .../scheduler/capacity/TestCapacityScheduler.java  | 137 ---
 .../capacity/TestCapacitySchedulerPerf.java| 265 +
 .../apache/hadoop/yarn/server/MiniYARNCluster.java |   7 +-
 14 files changed, 524 insertions(+), 341 deletions(-)

diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index e086fbe..45aa868 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -629,7 +629,7 @@
   
 
   
-
+
 
 
   
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index f3a5bc2..37b50f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
-import org.apache.hadoop.yarn.api.records.impl.BaseResource;
+import org.apache.hadoop.yarn.api.records.impl.LightWeightResource;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -59,8 +59,15 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 @Stable
 public abstract class Resource implements Comparable {
 
-  protected static final String MEMORY = 
ResourceInformation.MEMORY_MB.getName();
-  protected static final String VCORES = ResourceInformation.VCORES.getName();
+  protected ResourceInformation[] resources = null;
+
+  // Number of mandatory resources, this is added to avoid invoke
+  // MandatoryResources.values().length, since values() internally will
+  // copy array, etc.
+  protected static final int NUM_MANDATORY_RESOURCES = 2;
+
+  protected static final int MEMORY_INDEX = 0;
+  protected static final int VCORES_INDEX = 1;
 
   @Public
   @Stable
@@ -71,7 +78,7 @@ public abstract class Resource implements 
Comparable {
   ret.setVirtualCores(vCores);
   return ret;
 }
-return new BaseResource(memory, vCores);
+return new LightWeightResource(memory, vCores);
   }
 
   @Public
@@ -83,7 +90,7 @@ public abstract class Resource implements 
Comparable {
   ret.setVirtualCores(vCores);
   return ret;
 }
-return new BaseResource(memory, vCores);
+return new LightWeightResource(memory, vCores);
   }
 
   @InterfaceAudience.Private
@@ -201,7 +208,9 @@ public abstract class Resource implements 
Comparable {
*/
   @Public
   @Evolving
-  public abstract ResourceInformation[] getResources();
+  public ResourceInformation[] getResources() {
+return resources;
+  }
 
   /**
* Get ResourceInformation for a specified resource.
@@ -215,7 +224,6 @@ public abstract class Resource implements 
Comparable {
   public ResourceInformation getResourceInformation(String resource)
   throws ResourceNotFoundException {
 Integer index = ResourceUtils.getResourceTypeIndex().get(resource);
-ResourceInformation[] resources = getResources();
 if (index != null) {
   return resources[index];
 }
@@ -236,12 +244,13 @@ public abstract class Resource implements 
Comparable {
   @Evolving
   public ResourceInformation getResourceInformation(int index)
   throws ResourceNotFoundException {
-ResourceInformation[] resources = getResources();
-if (index < 0 || index >= resources.length) {
-  throw new Re

[hadoop] 09/20: YARN-7396. NPE when accessing container logs due to null dirsHandler. Contributed by Jonathan Hung

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3d5a65211b8830b3c7d821612db35bf6f4409020
Author: Jian He 
AuthorDate: Wed Nov 1 17:00:32 2017 -0700

YARN-7396. NPE when accessing container logs due to null dirsHandler. 
Contributed by Jonathan Hung
---
 .../java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index c74b54e..536ac3a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -371,6 +371,8 @@ public class NodeManager extends CompositeService
 
 this.aclsManager = new ApplicationACLsManager(conf);
 
+this.dirsHandler = new LocalDirsHandlerService(metrics);
+
 boolean isDistSchedulingEnabled =
 conf.getBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED,
 YarnConfiguration.DEFAULT_DIST_SCHEDULING_ENABLED);
@@ -394,7 +396,6 @@ public class NodeManager extends CompositeService
 // NodeManager level dispatcher
 this.dispatcher = createNMDispatcher();
 
-dirsHandler = new LocalDirsHandlerService(metrics);
 nodeHealthChecker =
 new NodeHealthCheckerService(
 getNodeHealthScriptRunner(conf), dirsHandler);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 19/20: YARN-8183. Fix ConcurrentModificationException inside RMAppAttemptMetrics#convertAtomicLongMaptoLongMap. (Suma Shivaprasad via wangda)

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 4d9f4e792e97728a551b52631e1d4ebcac232594
Author: Wangda Tan 
AuthorDate: Tue Apr 24 17:42:17 2018 -0700

YARN-8183. Fix ConcurrentModificationException inside 
RMAppAttemptMetrics#convertAtomicLongMaptoLongMap. (Suma Shivaprasad via wangda)

Change-Id: I347871d672001653a3afe2e99adefd74e0d798cd
(cherry picked from commit bb3c504764f807fccba7f28298a12e2296f284cb)
(cherry picked from commit 3043a93d461fd8b9ccc2ff4b8d17e5430ed77615)
---
 .../resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java   | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
index 0982ef9..e68c5d7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
@@ -20,6 +20,7 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
 
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
@@ -53,8 +54,8 @@ public class RMAppAttemptMetrics {
   
   private ReadLock readLock;
   private WriteLock writeLock;
-  private Map resourceUsageMap = new HashMap<>();
-  private Map preemptedResourceMap = new HashMap<>();
+  private Map resourceUsageMap = new ConcurrentHashMap<>();
+  private Map preemptedResourceMap = new 
ConcurrentHashMap<>();
   private RMContext rmContext;
 
   private int[][] localityStatistics =
@@ -97,7 +98,7 @@ public class RMAppAttemptMetrics {
   public Resource getResourcePreempted() {
 try {
   readLock.lock();
-  return resourcePreempted;
+  return Resource.newInstance(resourcePreempted);
 } finally {
   readLock.unlock();
 }
@@ -229,7 +230,7 @@ public class RMAppAttemptMetrics {
   }
 
   public Resource getApplicationAttemptHeadroom() {
-return applicationHeadroom;
+return Resource.newInstance(applicationHeadroom);
   }
 
   public void setApplicationAttemptHeadRoom(Resource headRoom) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 17/20: YARN-7143. FileNotFound handling in ResourceUtils is inconsistent

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit f350c42f6a1a6caecea3d78a603620f345e38bb6
Author: Daniel Templeton 
AuthorDate: Thu Nov 9 10:36:49 2017 -0800

YARN-7143. FileNotFound handling in ResourceUtils is inconsistent

Change-Id: Ib1bb487e14a15edd2b5a42cf5078c5a2b295f069
(cherry picked from commit db82a41d94872cea4d0c1bb1336916cebc2faeec)
---
 .../hadoop/yarn/util/resource/ResourceUtils.java   | 52 +-
 1 file changed, 22 insertions(+), 30 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index f3edc74..abf58a6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -338,18 +338,14 @@ public class ResourceUtils {
 if (!initializedResources) {
   synchronized (ResourceUtils.class) {
 if (!initializedResources) {
-  if (conf == null) {
-conf = new YarnConfiguration();
-  }
-  try {
-addResourcesFileToConf(resourceFile, conf);
-LOG.debug("Found " + resourceFile + ", adding to configuration");
-  } catch (FileNotFoundException fe) {
-LOG.info("Unable to find '" + resourceFile
-+ "'. Falling back to memory and vcores as resources.");
+  Configuration resConf = conf;
+
+  if (resConf == null) {
+resConf = new YarnConfiguration();
   }
-  initializeResourcesMap(conf);
 
+  addResourcesFileToConf(resourceFile, resConf);
+  initializeResourcesMap(resConf);
 }
   }
 }
@@ -386,21 +382,17 @@ public class ResourceUtils {
   }
 
   private static void addResourcesFileToConf(String resourceFile,
-  Configuration conf) throws FileNotFoundException {
+  Configuration conf) {
 try {
   InputStream ris = getConfInputStream(resourceFile, conf);
   LOG.debug("Found " + resourceFile + ", adding to configuration");
   conf.addResource(ris);
 } catch (FileNotFoundException fe) {
-  throw fe;
-} catch (IOException ie) {
+  LOG.info("Unable to find '" + resourceFile + "'.");
+} catch (IOException | YarnException ex) {
   LOG.fatal("Exception trying to read resource types configuration '"
-  + resourceFile + "'.", ie);
-  throw new YarnRuntimeException(ie);
-} catch (YarnException ye) {
-  LOG.fatal("YARN Exception trying to read resource types configuration '"
-  + resourceFile + "'.", ye);
-  throw new YarnRuntimeException(ye);
+  + resourceFile + "'.", ex);
+  throw new YarnRuntimeException(ex);
 }
   }
 
@@ -462,19 +454,19 @@ public class ResourceUtils {
   private static Map 
initializeNodeResourceInformation(
   Configuration conf) {
 Map nodeResources = new HashMap<>();
-try {
-  addResourcesFileToConf(
-  YarnConfiguration.NODE_RESOURCES_CONFIGURATION_FILE, conf);
-  for (Map.Entry entry : conf) {
-String key = entry.getKey();
-String value = entry.getValue();
-if (key.startsWith(YarnConfiguration.NM_RESOURCES_PREFIX)) {
-  addResourceInformation(key, value, nodeResources);
-}
+
+addResourcesFileToConf(YarnConfiguration.NODE_RESOURCES_CONFIGURATION_FILE,
+conf);
+
+for (Map.Entry entry : conf) {
+  String key = entry.getKey();
+  String value = entry.getValue();
+
+  if (key.startsWith(YarnConfiguration.NM_RESOURCES_PREFIX)) {
+addResourceInformation(key, value, nodeResources);
   }
-} catch (FileNotFoundException fe) {
-  LOG.info("Couldn't find node resources file");
 }
+
 return nodeResources;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch YARN-8200 updated (dcb7d7a -> 9c6dbd8)

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a change to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 discard dcb7d7a  YARN-9271. Backport YARN-6927 for resource type support in 
MapReduce
 discard a06fc3f  YARN-8183. Fix ConcurrentModificationException inside 
RMAppAttemptMetrics#convertAtomicLongMaptoLongMap. (Suma Shivaprasad via wangda)
 discard 26e1c49  YARN-7383. Node resource is not parsed correctly for resource 
names containing dot. Contributed by Gergely Novák.
 discard 5ea878d  YARN-7143. FileNotFound handling in ResourceUtils is 
inconsistent
 discard a9c58d3  YARN-7345. GPU Isolation: Incorrect minor device numbers 
written to devices.deny file. (Jonathan Hung via wangda)
 discard ff91a67  YARN-9291. Backport YARN-7637 to branch-2
 discard 7d1832d  YARN-9397. Fix empty NMResourceInfo object test failures in 
branch-2
 discard e985a39  YARN-7223. Document GPU isolation feature. Contributed by 
Wangda Tan.
 discard 7cc7a63  YARN-7594. TestNMWebServices#testGetNMResourceInfo fails on 
trunk. Contributed by Gergely Novák.
 discard f493e0d  YARN-7573. Gpu Information page could be empty for nodes 
without GPU. (Sunil G via wangda)
 discard a8defcd  YARN-9289. Backport YARN-7330 for GPU in UI to branch-2
 discard 16127ea  YARN-7396. NPE when accessing container logs due to null 
dirsHandler. Contributed by Jonathan Hung
 discard 010845d  YARN-9174. Backport YARN-7224 for refactoring of GpuDevice 
class
 discard b4ae7ab  YARN-9280. Backport YARN-6620 to YARN-8200/branch-2 for 
NodeManager-side GPU isolation
 discard 83ab6b3  YARN-9180. Port YARN-7033 NM recovery of assigned resources 
to branch-2
 discard 574d7a2  YARN-9187. Backport YARN-6852 for GPU-specific native changes 
to branch-2
 discard 9d689f7  YARN-9175. Null resources check in ResourceInfo for branch-3.0
 discard 5a60e94  YARN-7137. [YARN-3926] Move newly added APIs to unstable in 
YARN-3926 branch. Contributed by Wangda Tan.
 discard a77aeb5  YARN-7270 addendum: Reapplied changes after YARN-3926 
backports
 discard 1b09071  YARN-9188. Port YARN-7136 to branch-2
 new 0d54ad7  YARN-9188. Port YARN-7136 to branch-2
 new 93fe781  YARN-7270 addendum: Reapplied changes after YARN-3926 
backports
 new cbcbfc2  YARN-7137. [YARN-3926] Move newly added APIs to unstable in 
YARN-3926 branch. Contributed by Wangda Tan.
 new b32e2a7  YARN-9175. Null resources check in ResourceInfo for branch-3.0
 new f0dcb31  YARN-9187. Backport YARN-6852 for GPU-specific native changes 
to branch-2
 new 4a1c7e6  YARN-9180. Port YARN-7033 NM recovery of assigned resources 
to branch-2
 new 25167b5  YARN-9280. Backport YARN-6620 to YARN-8200/branch-2 for 
NodeManager-side GPU isolation
 new faf0b36  YARN-9174. Backport YARN-7224 for refactoring of GpuDevice 
class
 new 3d5a652  YARN-7396. NPE when accessing container logs due to null 
dirsHandler. Contributed by Jonathan Hung
 new 9a61778  YARN-9289. Backport YARN-7330 for GPU in UI to branch-2
 new 2116edd  YARN-7573. Gpu Information page could be empty for nodes 
without GPU. (Sunil G via wangda)
 new 618d015  YARN-7594. TestNMWebServices#testGetNMResourceInfo fails on 
trunk. Contributed by Gergely Novák.
 new df6a7b0  YARN-7223. Document GPU isolation feature. Contributed by 
Wangda Tan.
 new f279f92  YARN-9397. Fix empty NMResourceInfo object test failures in 
branch-2
 new ea259c4  YARN-9291. Backport YARN-7637 to branch-2
 new 05292fe  YARN-7345. GPU Isolation: Incorrect minor device numbers 
written to devices.deny file. (Jonathan Hung via wangda)
 new f350c42  YARN-7143. FileNotFound handling in ResourceUtils is 
inconsistent
 new 6239faf  YARN-7383. Node resource is not parsed correctly for resource 
names containing dot. Contributed by Gergely Novák.
 new 4d9f4e7  YARN-8183. Fix ConcurrentModificationException inside 
RMAppAttemptMetrics#convertAtomicLongMaptoLongMap. (Suma Shivaprasad via wangda)
 new 9c6dbd8  YARN-9271. Backport YARN-6927 for resource type support in 
MapReduce

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (dcb7d7a)
\
 N -- N -- N   refs/heads/YARN-8200 (9c6dbd8)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 20 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have

[hadoop] 18/20: YARN-7383. Node resource is not parsed correctly for resource names containing dot. Contributed by Gergely Novák.

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 6239fafe0140ad5bba1c35f468730e62554f908d
Author: Sunil G 
AuthorDate: Wed Dec 13 22:00:07 2017 +0530

YARN-7383. Node resource is not parsed correctly for resource names 
containing dot. Contributed by Gergely Novák.
---
 .../apache/hadoop/yarn/util/resource/ResourceUtils.java   | 15 ++-
 .../hadoop/yarn/util/resource/TestResourceUtils.java  |  5 -
 .../test/resources/resource-types/node-resources-2.xml|  5 +
 .../test/resources/resource-types/resource-types-4.xml|  7 ++-
 4 files changed, 21 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index abf58a6..65eb5a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -461,21 +461,18 @@ public class ResourceUtils {
 for (Map.Entry entry : conf) {
   String key = entry.getKey();
   String value = entry.getValue();
-
-  if (key.startsWith(YarnConfiguration.NM_RESOURCES_PREFIX)) {
-addResourceInformation(key, value, nodeResources);
-  }
+  addResourceTypeInformation(key, value, nodeResources);
 }
 
 return nodeResources;
   }
 
-  private static void addResourceInformation(String prop, String value,
+  private static void addResourceTypeInformation(String prop, String value,
   Map nodeResources) {
-String[] parts = prop.split("\\.");
-LOG.info("Found resource entry " + prop);
-if (parts.length == 4) {
-  String resourceType = parts[3];
+if (prop.startsWith(YarnConfiguration.NM_RESOURCES_PREFIX)) {
+  LOG.info("Found resource entry " + prop);
+  String resourceType = prop.substring(
+  YarnConfiguration.NM_RESOURCES_PREFIX.length());
   if (!nodeResources.containsKey(resourceType)) {
 nodeResources
 .put(resourceType, ResourceInformation.newInstance(resourceType));
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
index 80555ca..b511705 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
@@ -124,9 +124,10 @@ public class TestResourceUtils {
 new ResourceFileInformation("resource-types-3.xml", 3);
 testFile3.resourceNameUnitsMap.put("resource2", "");
 ResourceFileInformation testFile4 =
-new ResourceFileInformation("resource-types-4.xml", 4);
+new ResourceFileInformation("resource-types-4.xml", 5);
 testFile4.resourceNameUnitsMap.put("resource1", "G");
 testFile4.resourceNameUnitsMap.put("resource2", "m");
+testFile4.resourceNameUnitsMap.put("yarn.io/gpu", "");
 
 ResourceFileInformation[] tests = {testFile1, testFile2, testFile3,
 testFile4};
@@ -292,6 +293,8 @@ public class TestResourceUtils {
 ResourceInformation.newInstance("resource1", "Gi", 5L));
 test3Resources.setResourceInformation("resource2",
 ResourceInformation.newInstance("resource2", "m", 2L));
+test3Resources.setResourceInformation("yarn.io/gpu",
+ResourceInformation.newInstance("yarn.io/gpu", "", 1));
 testRun.put("node-resources-2.xml", test3Resources);
 
 for (Map.Entry entry : testRun.entrySet()) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-2.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-2.xml
index 9d9b3dc..382d5dd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-2.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-2.xml
@@ -36,4 +36,9 @@ limitations under the License. See accompanying LICENSE file.
2m
  
 
+ 
+   yarn.nodemanager.resource-type.yarn.io/gpu
+   1
+ 
+
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-4.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-4.xml
index c84316a..ea8d2bd 100644
--- 
a/hadoop-yarn-project/hadoop-ya

[hadoop] 11/20: YARN-7573. Gpu Information page could be empty for nodes without GPU. (Sunil G via wangda)

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2116edd368ab2ae19b54ea5ce674785344d42759
Author: Wangda Tan 
AuthorDate: Wed Nov 29 17:43:37 2017 -0800

YARN-7573. Gpu Information page could be empty for nodes without GPU. 
(Sunil G via wangda)

Change-Id: I7f614e5a589a09ce4e4286c84b706e05c29abd14
---
 .../apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java | 4 +---
 .../hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js   | 6 --
 .../src/main/webapp/app/templates/components/node-menu-panel.hbs| 2 +-
 .../hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-apps.hbs | 2 +-
 .../src/main/webapp/app/templates/yarn-node-containers.hbs  | 2 +-
 .../src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs | 4 
 6 files changed, 12 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
index 7476d75..7702004 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
@@ -510,9 +510,7 @@ public class NMWebServices {
   }
 }
 
-throw new YarnException(
-"Could not get detailed resource information for given resource-name="
-+ resourceName);
+return new NMResourceInfo();
   }
 
   private long parseLongParam(String bytes) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
index b1b1518..aa5efbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
@@ -97,7 +97,9 @@ export default DS.Model.extend({
 var used = 0;
 var ri;
 
-var resourceInformations = this.get("usedResource").resourcesInformations;
+const usedResource = this.get("usedResource");
+const availableResource = this.get("availableResource");
+var resourceInformations = usedResource ? 
usedResource.resourcesInformations : [];
 for (var i = 0; i < resourceInformations.length; i++) {
   ri = resourceInformations[i];
   if (ri.name === "yarn.io/gpu") {
@@ -106,7 +108,7 @@ export default DS.Model.extend({
 }
 
 var available = 0;
-resourceInformations = this.get("availableResource").resourcesInformations;
+resourceInformations = availableResource ? 
availableResource.resourcesInformations : [];
 for (i = 0; i < resourceInformations.length; i++) {
   ri = resourceInformations[i];
   if (ri.name === "yarn.io/gpu") {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
index fffae30..966e408 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
@@ -36,7 +36,7 @@
   {{#link-to 'yarn-node-containers' nodeId nodeAddr}}List of 
Containers
   {{/link-to}}
 {{/link-to}}
-{{#if nmGpuInfo}}
+{{#if (and nmGpuInfo nmGpuInfo.info.totalGpuDevices)}}
   {{#link-to 'yarn-node.yarn-nm-gpu' tagName="li"}}
 {{#link-to 'yarn-node.yarn-nm-gpu' nodeId nodeAddr }}GPU 
Information
 {{/link-to}}
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-apps.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-apps.hbs
index 52f0c86..919e54d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-apps.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-apps.hbs
@@ -20,7 +20,7 @@
 
 
   
-{{node-menu-panel path="yarn-node-apps" nodeAddr=model.nodeInfo.addr 
nodeId=model.nodeInfo.id}}
+{{node-menu-panel path="yarn-node-apps" nodeAddr=model.nodeInfo.addr 
nodeId=model.nodeInfo.id nmGpuInfo=model.nmGpuInfo}}
 {{#if model.apps}}
 
   
diff --git 
a/hadoop-yarn-

[hadoop] 15/20: YARN-9291. Backport YARN-7637 to branch-2

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit ea259c493dedf2b7244afd6967848d09d51564c3
Author: Jonathan Hung 
AuthorDate: Wed Mar 20 17:45:01 2019 -0700

YARN-9291. Backport YARN-7637 to branch-2
---
 .../recovery/NMNullStateStoreService.java  |  1 +
 .../resources/gpu/TestGpuResourceHandler.java  | 30 ++
 2 files changed, 31 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
index 7d1010f..95ec61a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
@@ -272,6 +272,7 @@ public class NMNullStateStoreService extends 
NMStateStoreService {
   public void storeAssignedResources(Container container,
   String resourceType, List assignedResources)
   throws IOException {
+updateContainerResourceMapping(container, resourceType, assignedResources);
   }
 
   @Override
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
index b5796df..7a3bd02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
@@ -38,6 +38,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resource
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.GpuDevice;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.GpuDiscoverer;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants;
+import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
 import org.apache.hadoop.yarn.util.resource.TestResourceUtils;
 import org.junit.Assert;
@@ -349,6 +350,35 @@ public class TestGpuResourceHandler {
   }
 
   @Test
+  public void testAllocationStoredWithNULLStateStore() throws Exception {
+NMNullStateStoreService mockNMNULLStateStore = 
mock(NMNullStateStoreService.class);
+
+Context nmnctx = mock(Context.class);
+when(nmnctx.getNMStateStore()).thenReturn(mockNMNULLStateStore);
+
+GpuResourceHandlerImpl gpuNULLStateResourceHandler =
+new GpuResourceHandlerImpl(nmnctx, mockCGroupsHandler,
+mockPrivilegedExecutor);
+
+Configuration conf = new YarnConfiguration();
+conf.set(YarnConfiguration.NM_GPU_ALLOWED_DEVICES, "0:0,1:1,2:3,3:4");
+GpuDiscoverer.getInstance().initialize(conf);
+
+gpuNULLStateResourceHandler.bootstrap(conf);
+Assert.assertEquals(4,
+gpuNULLStateResourceHandler.getGpuAllocator().getAvailableGpus());
+
+/* Start container 1, asks 3 containers */
+Container container = mockContainerWithGpuRequest(1, 3);
+gpuNULLStateResourceHandler.preStart(container);
+
+verify(nmnctx.getNMStateStore()).storeAssignedResources(container,
+ResourceInformation.GPU_URI, Arrays
+.asList(new GpuDevice(0, 0), new GpuDevice(1, 1),
+new GpuDevice(2, 3)));
+  }
+
+  @Test
   public void testRecoverResourceAllocation() throws Exception {
 Configuration conf = new YarnConfiguration();
 conf.set(YarnConfiguration.NM_GPU_ALLOWED_DEVICES, "0:0,1:1,2:3,3:4");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14037. Fix SSLFactory truststore reloader thread leak in URLConnectionFactory.

2019-03-26 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 55fb3c3  HDFS-14037. Fix SSLFactory truststore reloader thread leak in 
URLConnectionFactory.
55fb3c3 is described below

commit 55fb3c32fb48ca26a629d4d5f3f07e2858d09594
Author: Takanobu Asanuma 
AuthorDate: Wed Mar 27 03:27:02 2019 +0900

HDFS-14037. Fix SSLFactory truststore reloader thread leak in 
URLConnectionFactory.
---
 .../hadoop/hdfs/web/SSLConnectionConfigurator.java | 72 ++
 .../hadoop/hdfs/web/URLConnectionFactory.java  | 43 +++--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  3 +
 .../hadoop/hdfs/web/TestURLConnectionFactory.java  | 53 
 .../federation/router/RouterWebHdfsMethods.java|  2 +
 .../hdfs/qjournal/client/QuorumJournalManager.java |  1 +
 6 files changed, 139 insertions(+), 35 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
new file mode 100644
index 000..7bf7ae1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SSLConnectionConfigurator.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.security.GeneralSecurityException;
+
+/**
+ * Configure a connection to use SSL authentication.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+
+public class SSLConnectionConfigurator implements ConnectionConfigurator {
+  private final SSLFactory factory;
+  private final SSLSocketFactory sf;
+  private final HostnameVerifier hv;
+  private final int connectTimeout;
+  private final int readTimeout;
+
+  SSLConnectionConfigurator(int connectTimeout, int readTimeout,
+  Configuration conf) throws IOException, GeneralSecurityException {
+factory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+factory.init();
+sf = factory.createSSLSocketFactory();
+hv = factory.getHostnameVerifier();
+this.connectTimeout = connectTimeout;
+this.readTimeout = readTimeout;
+  }
+
+  @Override
+  public HttpURLConnection configure(HttpURLConnection conn) {
+if (conn instanceof HttpsURLConnection) {
+  HttpsURLConnection c = (HttpsURLConnection) conn;
+  c.setSSLSocketFactory(sf);
+  c.setHostnameVerifier(hv);
+}
+conn.setConnectTimeout(connectTimeout);
+conn.setReadTimeout(readTimeout);
+return conn;
+  }
+
+  void destroy() {
+factory.destroy();
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index 9713932..8b6c7f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -22,11 +22,6 @@ import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URLConnection;
-import java.security.GeneralSecurityException;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.HttpsURLConnection;
-import javax.net.ssl.SSLSocketFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -36,7 

[hadoop] 10/20: YARN-9289. Backport YARN-7330 for GPU in UI to branch-2

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9a61778525202baad70bd5fa0785d2d8d37c4fee
Author: Jonathan Hung 
AuthorDate: Fri Feb 8 11:26:59 2019 -0800

YARN-9289. Backport YARN-7330 for GPU in UI to branch-2
---
 .../hadoop-yarn/dev-support/findbugs-exclude.xml   |   8 +
 .../apache/hadoop/yarn/api/records/Resource.java   |  20 +++
 .../linux/resources/gpu/GpuResourceAllocator.java  |  19 ++-
 .../resources/gpu/GpuResourceHandlerImpl.java  |   1 -
 .../resourceplugin/ResourcePlugin.java |  11 ++
 .../resourceplugin/gpu/AssignedGpuDevice.java  |  88 ++
 .../resourceplugin/gpu/GpuDevice.java  |  14 +-
 .../resourceplugin/gpu/GpuResourcePlugin.java  |  24 ++-
 .../server/nodemanager/webapp/NMWebServices.java   |  27 +++
 .../nodemanager/webapp/dao/NMResourceInfo.java}|  16 +-
 .../webapp/dao/gpu/GpuDeviceInformation.java   |   2 +-
 .../webapp/dao/gpu/NMGpuResourceInfo.java  |  80 +
 .../webapp/dao/gpu/PerGpuDeviceInformation.java|   2 +-
 .../webapp/dao/gpu/PerGpuMemoryUsage.java  |   2 +-
 .../resources/gpu/TestGpuResourceHandler.java  |   6 +-
 .../nodemanager/webapp/TestNMWebServices.java  | 188 +
 .../dao/gpu/TestGpuDeviceInformationParser.java|   2 +-
 .../app/{constants.js => adapters/yarn-nm-gpu.js}  |  21 ++-
 .../src/main/webapp/app/components/donut-chart.js  |  18 +-
 .../main/webapp/app/components/gpu-donut-chart.js  |  66 
 .../src/main/webapp/app/constants.js   |  13 ++
 .../webapp/app/controllers/yarn-nodes/table.js |   2 +-
 .../src/main/webapp/app/models/cluster-metric.js   |  69 
 .../app/{constants.js => models/yarn-nm-gpu.js}|  15 +-
 .../webapp/app/models/yarn-queue/capacity-queue.js |   3 +-
 .../src/main/webapp/app/models/yarn-rm-node.js |  35 
 .../hadoop-yarn-ui/src/main/webapp/app/router.js   |   5 +-
 .../src/main/webapp/app/routes/cluster-overview.js |   2 +-
 .../src/main/webapp/app/routes/yarn-node.js|   2 +
 .../yarn-node/yarn-nm-gpu.js}  |  10 +-
 .../yarn-node.js => serializers/yarn-nm-gpu.js}|  34 ++--
 .../app/serializers/yarn-queue/capacity-queue.js   |   1 +
 .../main/webapp/app/serializers/yarn-rm-node.js|   4 +-
 .../main/webapp/app/templates/cluster-overview.hbs |  88 ++
 .../app/templates/components/node-menu-panel.hbs   |  10 +-
 .../app/templates/components/yarn-nm-gpu-info.hbs  |  69 
 .../src/main/webapp/app/templates/yarn-node.hbs| 125 --
 .../main/webapp/app/templates/yarn-node/info.hbs   | 154 +
 .../webapp/app/templates/yarn-node/yarn-nm-gpu.hbs |  53 ++
 .../src/main/webapp/app/utils/converter.js |  51 ++
 40 files changed, 1115 insertions(+), 245 deletions(-)

diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 45aa868..e6dcefb 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -633,4 +633,12 @@
 
 
   
+
+  
+  
+
+
+
+  
+
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 7e8c01d..92137ad 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -18,8 +18,12 @@
 
 package org.apache.hadoop.yarn.api.records;
 
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.List;
 
+import com.google.common.collect.Lists;
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -213,6 +217,22 @@ public abstract class Resource implements 
Comparable {
   }
 
   /**
+   * Get list of resource information, this will be used by JAXB.
+   * @return list of resources copy.
+   */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  public List getAllResourcesListCopy() {
+List list = new ArrayList<>();
+for (ResourceInformation i : resources) {
+  ResourceInformation ri = new ResourceInformation();
+  ResourceInformation.copy(i, ri);
+  list.add(ri);
+}
+return list;
+  }
+
+  /**
* Get ResourceInformation for a specified resource.
*
* @param resource name of the resource
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-serv

[hadoop] 12/20: YARN-7594. TestNMWebServices#testGetNMResourceInfo fails on trunk. Contributed by Gergely Novák.

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 618d0154ef289218c8f99f1d13bdbda70145b89f
Author: Sunil G 
AuthorDate: Mon Dec 4 10:45:07 2017 +0530

YARN-7594. TestNMWebServices#testGetNMResourceInfo fails on trunk. 
Contributed by Gergely Novák.
---
 .../yarn/server/nodemanager/webapp/TestNMWebServices.java| 12 
 1 file changed, 4 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
index 72071da..2f1577f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
@@ -456,17 +456,17 @@ public class TestNMWebServices extends JerseyTestBase {
 ClientResponse.class);
 assertEquals(MediaType.APPLICATION_JSON, response.getType().toString());
 
-// Access resource-2 should fail (null NMResourceInfo returned).
+// Access resource-2 should fail (empty NMResourceInfo returned).
 JSONObject json = response.getEntity(JSONObject.class);
-assertIncludesException(json);
+assertEquals(0, json.length());
 
-// Access resource-3 should fail (unkown plugin)
+// Access resource-3 should fail (unknown plugin)
 response = r.path("ws").path("v1").path("node").path(
 "resources").path("resource-3").accept(MediaType.APPLICATION_JSON).get(
 ClientResponse.class);
 assertEquals(MediaType.APPLICATION_JSON, response.getType().toString());
 json = response.getEntity(JSONObject.class);
-assertIncludesException(json);
+assertEquals(0, json.length());
 
 // Access resource-1 should success
 response = r.path("ws").path("v1").path("node").path(
@@ -533,10 +533,6 @@ public class TestNMWebServices extends JerseyTestBase {
 assertEquals(2, json.getJSONArray("assignedGpuDevices").length());
   }
 
-  private void assertIncludesException(JSONObject json) {
-assertTrue(json.has("RemoteException"));
-  }
-
   private void testContainerLogs(WebResource r, ContainerId containerId)
   throws IOException {
 final String containerIdStr = containerId.toString();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 16/20: YARN-7345. GPU Isolation: Incorrect minor device numbers written to devices.deny file. (Jonathan Hung via wangda)

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 05292fef5ef3be612c184472ec578b31af907e56
Author: Wangda Tan 
AuthorDate: Thu Oct 19 14:45:44 2017 -0700

YARN-7345. GPU Isolation: Incorrect minor device numbers written to 
devices.deny file. (Jonathan Hung via wangda)
---
 .../native/container-executor/impl/modules/gpu/gpu-module.c |  2 +-
 .../container-executor/test/modules/gpu/test-gpu-module.cc  | 13 +
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
index f96645d..1a1b164 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
@@ -108,7 +108,7 @@ static int internal_handle_gpu_request(
 char param_value[128];
 memset(param_value, 0, sizeof(param_value));
 snprintf(param_value, sizeof(param_value), "c %d:%d rwm",
- major_device_number, i);
+ major_device_number, minor_devices[i]);
 
 int rc = update_cgroups_parameters_func_p("devices", "deny",
   container_id, param_value);
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
index 7e41fb4..b3d93dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
@@ -165,6 +165,19 @@ TEST_F(TestGpuModule, 
test_verify_gpu_module_calls_cgroup_parameter) {
 
   // Verify cgroups parameters
   verify_param_updated_to_cgroups(0, NULL);
+
+  /* Test case 3: block 2 non-sequential devices */
+  cgroups_parameters_invoked.clear();
+  char* argv_2[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", 
(char*) "1,3",
+   (char*) "--container_id", container_id };
+  rc = handle_gpu_request(&mock_update_cgroups_parameters,
+ "gpu", 5, argv_2);
+  ASSERT_EQ(0, rc) << "Should success.\n";
+
+  // Verify cgroups parameters
+  const char* expected_cgroups_argv_2[] = { "devices", "deny", container_id, 
"c 195:1 rwm",
+"devices", "deny", container_id, "c 195:3 rwm"};
+  verify_param_updated_to_cgroups(8, expected_cgroups_argv_2);
 }
 
 TEST_F(TestGpuModule, test_illegal_cli_parameters) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/20: YARN-7270 addendum: Reapplied changes after YARN-3926 backports

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 93fe781e8c7afd8b6b2db957cb2789fdde5a1fb8
Author: Daniel Templeton 
AuthorDate: Mon Oct 16 11:43:54 2017 -0700

YARN-7270 addendum: Reapplied changes after YARN-3926 backports
---
 .../src/main/java/org/apache/hadoop/yarn/api/records/Resource.java| 4 ++--
 .../org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java  | 4 ++--
 .../org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java| 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 37b50f2..be0ab58 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -285,7 +285,7 @@ public abstract class Resource implements 
Comparable {
   return;
 }
 if (resource.equals(ResourceInformation.VCORES_URI)) {
-  this.setVirtualCores((int) resourceInformation.getValue());
+  this.setVirtualCores(castToIntSafely(resourceInformation.getValue()));
   return;
 }
 ResourceInformation storedResourceInfo = getResourceInformation(resource);
@@ -331,7 +331,7 @@ public abstract class Resource implements 
Comparable {
   return;
 }
 if (resource.equals(ResourceInformation.VCORES_URI)) {
-  this.setVirtualCores((int)value);
+  this.setVirtualCores(castToIntSafely(value));
   return;
 }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
index b80e133..a64d242 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -92,7 +92,7 @@ public class LightWeightResource extends Resource {
   @Override
   @SuppressWarnings("deprecation")
   public int getMemory() {
-return (int) memoryResInfo.getValue();
+return castToIntSafely(memoryResInfo.getValue());
   }
 
   @Override
@@ -113,7 +113,7 @@ public class LightWeightResource extends Resource {
 
   @Override
   public int getVirtualCores() {
-return (int) vcoresResInfo.getValue();
+return castToIntSafely(vcoresResInfo.getValue());
   }
 
   @Override
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 06c30ff..4ae64c2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -117,7 +117,7 @@ public class ResourcePBImpl extends Resource {
   @Override
   public int getVirtualCores() {
 // vcores should always be present
-return (int) resources[VCORES_INDEX].getValue();
+return castToIntSafely(resources[VCORES_INDEX].getValue());
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 06/20: YARN-9180. Port YARN-7033 NM recovery of assigned resources to branch-2

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 4a1c7e6aade2b5d2621c5e09be2597dc6a73cd04
Author: Jonathan Hung 
AuthorDate: Fri Feb 1 15:20:50 2019 -0800

YARN-9180. Port YARN-7033 NM recovery of assigned resources to branch-2
---
 .../containermanager/container/Container.java  |   7 +
 .../containermanager/container/ContainerImpl.java  |  13 ++
 .../container/ResourceMappings.java| 124 
 .../recovery/NMLeveldbStateStoreService.java   |  42 ++
 .../recovery/NMNullStateStoreService.java  |   7 +
 .../nodemanager/recovery/NMStateStoreService.java  |  23 +++
 .../TestContainerManagerRecovery.java  | 163 +++--
 .../recovery/NMMemoryStateStoreService.java|  14 ++
 .../recovery/TestNMLeveldbStateStoreService.java   | 122 ++-
 .../server/nodemanager/webapp/MockContainer.java   |   6 +
 10 files changed, 436 insertions(+), 85 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
index b9d1e31..b5e3aa1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
@@ -98,4 +98,11 @@ public interface Container extends 
EventHandler {
   void sendPauseEvent(String description);
 
   Priority getPriority();
+
+  /**
+   * Get assigned resource mappings to the container.
+   *
+   * @return Resource Mappings of the container
+   */
+  ResourceMappings getResourceMappings();
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 4675716..e6c7bce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -188,6 +188,7 @@ public class ContainerImpl implements Container {
   private boolean recoveredAsKilled = false;
   private Context context;
   private ResourceSet resourceSet;
+  private ResourceMappings resourceMappings;
 
   public ContainerImpl(Configuration conf, Dispatcher dispatcher,
   ContainerLaunchContext launchContext, Credentials creds,
@@ -245,6 +246,7 @@ public class ContainerImpl implements Container {
 stateMachine = stateMachineFactory.make(this, ContainerState.NEW,
 context.getContainerStateTransitionListener());
 this.resourceSet = new ResourceSet();
+this.resourceMappings = new ResourceMappings();
   }
 
   private static ContainerRetryContext configureRetryContext(
@@ -285,6 +287,7 @@ public class ContainerImpl implements Container {
 this.remainingRetryAttempts = rcs.getRemainingRetryAttempts();
 this.workDir = rcs.getWorkDir();
 this.logDir = rcs.getLogDir();
+this.resourceMappings = rcs.getResourceMappings();
   }
 
   private static final ContainerDiagnosticsUpdateTransition 
UPDATE_DIAGNOSTICS_TRANSITION =
@@ -2172,4 +2175,14 @@ public class ContainerImpl implements Container {
   public Priority getPriority() {
 return containerTokenIdentifier.getPriority();
   }
+
+  /**
+   * Get assigned resource mappings to the container.
+   *
+   * @return Resource Mappings of the container
+   */
+  @Override
+  public ResourceMappings getResourceMappings() {
+return resourceMappings;
+  }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ResourceMappings.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ResourceMappings.java
new file mode 100644
index 000..d673341
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/

[hadoop] 13/20: YARN-7223. Document GPU isolation feature. Contributed by Wangda Tan.

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit df6a7b0c1052b8e236d9c462e5cfe02099b56cbf
Author: Sunil G 
AuthorDate: Wed Feb 21 14:16:45 2018 +0530

YARN-7223. Document GPU isolation feature. Contributed by Wangda Tan.
---
 .../src/site/markdown/UsingGpus.md | 230 +
 1 file changed, 230 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md
new file mode 100644
index 000..f6000e7
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md
@@ -0,0 +1,230 @@
+
+
+
+# Using GPU On YARN
+# Prerequisites
+
+- As of now, only Nvidia GPUs are supported by YARN
+- YARN node managers have to be pre-installed with Nvidia drivers.
+- When Docker is used as container runtime context, nvidia-docker 1.0 needs to 
be installed (Current supported version in YARN for nvidia-docker).
+
+# Configs
+
+## GPU scheduling
+
+In `resource-types.xml`
+
+Add following properties
+
+```
+
+  
+ yarn.resource-types
+ yarn.io/gpu
+  
+
+```
+
+In `yarn-site.xml`
+
+`DominantResourceCalculator` MUST be configured to enable GPU 
scheduling/isolation.
+
+For `Capacity Scheduler`, use following property to configure 
`DominantResourceCalculator` (In `capacity-scheduler.xml`):
+
+| Property | Default value |
+| --- | --- |
+|  yarn.scheduler.capacity.resource-calculator | 
org.apache.hadoop.yarn.util.resource.DominantResourceCalculator |
+
+
+## GPU Isolation
+
+### In `yarn-site.xml`
+
+```
+  
+yarn.nodemanager.resource-plugins
+yarn.io/gpu
+  
+```
+
+This is to enable GPU isolation module on NodeManager side.
+
+By default, YARN will automatically detect and config GPUs when above config 
is set. Following configs need to be set in `yarn-site.xml` only if admin has 
specialized requirements.
+
+**1) Allowed GPU Devices**
+
+| Property | Default value |
+| --- | --- |
+| yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices | auto |
+
+  Specify GPU devices which can be managed by YARN NodeManager (split by 
comma).
+  Number of GPU devices will be reported to RM to make scheduling decisions.
+  Set to auto (default) let YARN automatically discover GPU resource from
+  system.
+
+  Manually specify GPU devices if auto detect GPU device failed or admin
+  only want subset of GPU devices managed by YARN. GPU device is identified
+  by their minor device number and index. A common approach to get minor
+  device number of GPUs is using `nvidia-smi -q` and search `Minor Number`
+  output.
+
+  When minor numbers are specified manually, admin needs to include indice of 
GPUs
+  as well, format is `index:minor_number[,index:minor_number...]`. An example
+  of manual specification is `0:0,1:1,2:2,3:4"`to allow YARN NodeManager to
+  manage GPU devices with indices `0/1/2/3` and minor number `0/1/2/4`.
+  numbers .
+
+**2) Executable to discover GPUs**
+
+| Property | value |
+| --- | --- |
+| yarn.nodemanager.resource-plugins.gpu.path-to-discovery-executables | 
/absolute/path/to/nvidia-smi |
+
+When `yarn.nodemanager.resource.gpu.allowed-gpu-devices=auto` specified,
+YARN NodeManager needs to run GPU discovery binary (now only support
+`nvidia-smi`) to get GPU-related information.
+When value is empty (default), YARN NodeManager will try to locate
+discovery executable itself.
+An example of the config value is: `/usr/local/bin/nvidia-smi`
+
+**3) Docker Plugin Related Configs**
+
+Following configs can be customized when user needs to run GPU applications 
inside Docker container. They're not required if admin follows default 
installation/configuration of `nvidia-docker`.
+
+| Property | Default value |
+| --- | --- |
+| yarn.nodemanager.resource-plugins.gpu.docker-plugin | nvidia-docker-v1 |
+
+Specify docker command plugin for GPU. By default uses Nvidia docker V1.0.
+
+| Property | Default value |
+| --- | --- |
+| 
yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidia-docker-v1.endpoint | 
http://localhost:3476/v1.0/docker/cli |
+
+Specify end point of `nvidia-docker-plugin`. Please find documentation: 
https://github.com/NVIDIA/nvidia-docker/wiki For more details.
+
+**4) CGroups mount**
+
+GPU isolation uses CGroup [devices 
controller](https://www.kernel.org/doc/Documentation/cgroup-v1/devices.txt) to 
do per-GPU device isolation. Following configs should be added to 
`yarn-site.xml` to automatically mount CGroup sub devices, otherwise admin has 
to manually create devices subfolder in order to use this feature.
+
+| Property | Default value |
+| --- | --- |
+| yarn.nodemanager.linux-container-executor.cgroups.mount | true |
+
+
+### In `container-executor.cfg`
+
+In general, following config needs to be added to `container-executor.cfg`
+
+```
+[gpu]
+mod

[hadoop] 05/20: YARN-9187. Backport YARN-6852 for GPU-specific native changes to branch-2

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit f0dcb31f3cb1b012cff14f0475f3ecffa6930c6c
Author: Jonathan Hung 
AuthorDate: Wed Jan 9 16:21:43 2019 -0500

YARN-9187. Backport YARN-6852 for GPU-specific native changes to branch-2
---
 .../src/CMakeLists.txt |   8 +-
 .../container-executor/impl/container-executor.h   |   2 +
 .../src/main/native/container-executor/impl/main.c |  11 +
 .../impl/modules/cgroups/cgroups-operations.c  | 161 +++
 .../impl/modules/cgroups/cgroups-operations.h  |  55 +
 .../impl/modules/gpu/gpu-module.c  | 229 +
 .../impl/modules/gpu/gpu-module.h  |  45 
 .../test/modules/cgroups/test-cgroups-module.cc| 121 +++
 .../test/modules/gpu/test-gpu-module.cc| 203 ++
 .../test/test-container-executor.c |   1 -
 10 files changed, 833 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 0b1c3e9..e9f8aff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -101,9 +101,11 @@ add_library(container
 main/native/container-executor/impl/container-executor.c
 main/native/container-executor/impl/get_executable.c
 main/native/container-executor/impl/utils/string-utils.c
+main/native/container-executor/impl/utils/docker-util.c
 main/native/container-executor/impl/utils/path-utils.c
+main/native/container-executor/impl/modules/cgroups/cgroups-operations.c
 main/native/container-executor/impl/modules/common/module-configs.c
-main/native/container-executor/impl/utils/docker-util.c
+main/native/container-executor/impl/modules/gpu/gpu-module.c
 )
 
 add_executable(container-executor
@@ -135,6 +137,8 @@ add_executable(cetest
 main/native/container-executor/test/utils/test-string-utils.cc
 main/native/container-executor/test/utils/test-path-utils.cc
 main/native/container-executor/test/test_util.cc
-main/native/container-executor/test/utils/test_docker_util.cc)
+main/native/container-executor/test/utils/test_docker_util.cc
+
main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc
+main/native/container-executor/test/modules/gpu/test-gpu-module.cc)
 target_link_libraries(cetest gtest container)
 output_directory(cetest test)
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
index 956b38c..a78b077 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
@@ -285,3 +285,5 @@ int execute_regex_match(const char *regex_str, const char 
*input);
  * Return 0 on success.
  */
 int validate_docker_image_name(const char *image_name);
+
+struct configuration* get_cfg();
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 930dabe..9cf34a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -22,6 +22,8 @@
 #include "util.h"
 #include "get_executable.h"
 #include "utils/string-utils.h"
+#include "modules/gpu/gpu-module.h"
+#include "modules/cgroups/cgroups-operations.h"
 
 #include 
 #include 
@@ -241,6 +243,14 @@ static int validate_arguments(int argc, char **argv , int 
*operation) {
 return INVALID_ARGUMENT_NUMBER;
   }
 
+  /*
+   * Check if it is a known module, if yes, redirect to module
+   */
+  if (strcmp("--module-gpu", argv[1]) == 0) {
+return handle_gpu_request(&update_cgroups_parameters, "gpu", argc - 1,
+   &argv[1]);
+  }
+
   if (strcmp("--checksetup", argv[1]) == 0) {
 *operation = CHECK_SETUP;
 return 0;
@@ -325,6 +335,7 @@ static int validate_arguments(int argc,

[hadoop] 20/20: YARN-9271. Backport YARN-6927 for resource type support in MapReduce

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9c6dbd827f3bb7288322c4e7c1f422d0a0b13724
Author: Jonathan Hung 
AuthorDate: Wed Mar 20 17:46:35 2019 -0700

YARN-9271. Backport YARN-6927 for resource type support in MapReduce
---
 .../mapreduce/v2/app/job/impl/TaskAttemptImpl.java | 141 +++-
 .../mapreduce/TestMapreduceConfigFields.java   |  11 +
 .../mapreduce/v2/app/job/impl/TestTaskAttempt.java | 365 -
 .../org/apache/hadoop/mapreduce/MRJobConfig.java   |  68 +++-
 .../java/org/apache/hadoop/mapred/YARNRunner.java  |  86 -
 .../org/apache/hadoop/mapred/TestYARNRunner.java   | 167 ++
 .../hadoop/yarn/util/resource/ResourceUtils.java   |  44 +++
 7 files changed, 853 insertions(+), 29 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index dfc3adb..3f37d4d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.mapreduce.v2.app.job.impl;
 
+import static org.apache.commons.lang.StringUtils.isEmpty;
+
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -126,6 +128,7 @@ import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
@@ -139,6 +142,8 @@ import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.RackResolver;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -664,12 +669,8 @@ public abstract class TaskAttemptImpl implements
 this.jobFile = jobFile;
 this.partition = partition;
 
-//TODO:create the resource reqt for this Task attempt
 this.resourceCapability = recordFactory.newRecordInstance(Resource.class);
-this.resourceCapability.setMemorySize(
-getMemoryRequired(conf, taskId.getTaskType()));
-this.resourceCapability.setVirtualCores(
-getCpuRequired(conf, taskId.getTaskType()));
+populateResourceCapability(taskId.getTaskType());
 
 this.dataLocalHosts = resolveHosts(dataLocalHosts);
 RackResolver.init(conf);
@@ -701,21 +702,133 @@ public abstract class TaskAttemptImpl implements
 return memory;
   }
 
+  private void populateResourceCapability(TaskType taskType) {
+String resourceTypePrefix =
+getResourceTypePrefix(taskType);
+boolean memorySet = false;
+boolean cpuVcoresSet = false;
+if (resourceTypePrefix != null) {
+  List resourceRequests =
+  ResourceUtils.getRequestedResourcesFromConfig(conf,
+  resourceTypePrefix);
+  for (ResourceInformation resourceRequest : resourceRequests) {
+String resourceName = resourceRequest.getName();
+if (MRJobConfig.RESOURCE_TYPE_NAME_MEMORY.equals(resourceName) ||
+MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY.equals(
+resourceName)) {
+  if (memorySet) {
+throw new IllegalArgumentException(
+"Only one of the following keys " +
+"can be specified for a single job: " +
+MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY + ", " +
+MRJobConfig.RESOURCE_TYPE_NAME_MEMORY);
+  }
+  String units = isEmpty(resourceRequest.getUnits()) ?
+  ResourceUtils.getDefaultUnit(ResourceInformation.MEMORY_URI) :
+resourceRequest.getUnits();
+  this.resourceCapability.setMemorySize(
+  UnitsConversionUtil.convert(units, "Mi",
+  resourceRequest.getValue()));
+  memorySet = true;
+  String memoryKey = getMemoryKey(taskType);
+  if (memoryKey != null && conf.get(memoryKey) != null) {
+LOG.warn("Configuration " + reso

[hadoop] 04/20: YARN-9175. Null resources check in ResourceInfo for branch-3.0

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b32e2a7d307157318f8e518eedc5b3ee4c53dc57
Author: Jonathan Hung 
AuthorDate: Thu Jan 3 15:58:10 2019 -0500

YARN-9175. Null resources check in ResourceInfo for branch-3.0

(cherry picked from commit a0291a015c1af0ea1282849bd8fb32824d7452fa)
---
 .../hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
index e13980a..dd80d20 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
@@ -62,7 +62,7 @@ public class ResourceInfo {
 
   @Override
   public String toString() {
-return resources.toString();
+return getResource().toString();
   }
 
   public void setMemory(int memory) {
@@ -82,6 +82,9 @@ public class ResourceInfo {
   }
 
   public Resource getResource() {
+if (resources == null) {
+  resources = Resource.newInstance(memory, vCores);
+}
 return Resource.newInstance(resources);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 08/20: YARN-9174. Backport YARN-7224 for refactoring of GpuDevice class

2019-03-26 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit faf0b36e2f9a17f1aa50154beae996cd6a2695b3
Author: Jonathan Hung 
AuthorDate: Wed Feb 6 16:44:26 2019 -0800

YARN-9174. Backport YARN-7224 for refactoring of GpuDevice class
---
 .../linux/resources/gpu/GpuResourceAllocator.java  | 102 ++---
 .../resources/gpu/GpuResourceHandlerImpl.java  |  26 ++--
 .../resourceplugin/gpu/GpuDevice.java  |  78 ++
 .../resourceplugin/gpu/GpuDiscoverer.java  |  30 ++--
 .../gpu/GpuNodeResourceUpdateHandler.java  |  10 +-
 .../recovery/NMLeveldbStateStoreService.java   |  66 +
 .../recovery/NMNullStateStoreService.java  |   3 +-
 .../nodemanager/recovery/NMStateStoreService.java  |  15 +-
 .../TestContainerManagerRecovery.java  |   9 +-
 .../resources/gpu/TestGpuResourceHandler.java  | 161 +++--
 .../resourceplugin/gpu/TestGpuDiscoverer.java  |  34 -
 .../recovery/NMMemoryStateStoreService.java|   8 +-
 .../recovery/TestNMLeveldbStateStoreService.java   |  22 ++-
 13 files changed, 385 insertions(+), 179 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
index d6bae09..f4a49f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
@@ -26,12 +26,11 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
-import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.GpuDevice;
 
 import java.io.IOException;
 import java.io.Serializable;
@@ -54,8 +53,8 @@ import static 
org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
 public class GpuResourceAllocator {
   final static Log LOG = LogFactory.getLog(GpuResourceAllocator.class);
 
-  private Set allowedGpuDevices = new TreeSet<>();
-  private Map usedDevices = new TreeMap<>();
+  private Set allowedGpuDevices = new TreeSet<>();
+  private Map usedDevices = new TreeMap<>();
   private Context nmContext;
 
   public GpuResourceAllocator(Context ctx) {
@@ -63,14 +62,14 @@ public class GpuResourceAllocator {
   }
 
   /**
-   * Contains allowed and denied devices with minor number.
+   * Contains allowed and denied devices
* Denied devices will be useful for cgroups devices module to do 
blacklisting
*/
   static class GpuAllocation {
-private Set allowed = Collections.emptySet();
-private Set denied = Collections.emptySet();
+private Set allowed = Collections.emptySet();
+private Set denied = Collections.emptySet();
 
-GpuAllocation(Set allowed, Set denied) {
+GpuAllocation(Set allowed, Set denied) {
   if (allowed != null) {
 this.allowed = ImmutableSet.copyOf(allowed);
   }
@@ -79,21 +78,21 @@ public class GpuResourceAllocator {
   }
 }
 
-public Set getAllowedGPUs() {
+public Set getAllowedGPUs() {
   return allowed;
 }
 
-public Set getDeniedGPUs() {
+public Set getDeniedGPUs() {
   return denied;
 }
   }
 
   /**
* Add GPU to allowed list
-   * @param minorNumber minor number of the GPU device.
+   * @param gpuDevice gpu device
*/
-  public synchronized void addGpu(int minorNumber) {
-allowedGpuDevices.add(minorNumber);
+  public synchronized void addGpu(GpuDevice gpuDevice) {
+allowedGpuDevices.add(gpuDevice);
   }
 
   private String getResourceHandlerExceptionMessage(int numRequestedGpuDevices,
@@ -117,42 +116,42 @@ public class GpuResourceAllocator {
   + containerId);
 }
 
-for (Serializable deviceId : c.getResourc

[hadoop] branch ozone-0.4 updated: HDDS-939. Add S3 access check to Ozone manager. Contributed by Ajay Kumar. (#634)

2019-03-26 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 447d534  HDDS-939. Add S3 access check to Ozone manager. Contributed 
by Ajay Kumar. (#634)
447d534 is described below

commit 447d53476ae6f0f03f110a343d996f38a7e6d80e
Author: Ajay Yadav <7813154+ajay...@users.noreply.github.com>
AuthorDate: Tue Mar 26 08:59:59 2019 -0700

HDDS-939. Add S3 access check to Ozone manager. Contributed by Ajay Kumar. 
(#634)

(cherry picked from commit 82d477293c879ed4529efe2b5e8d138e09bbce3c)
---
 .../hadoop/ozone/om/S3SecretManagerImpl.java   | 21 ++---
 .../hadoop/ozone/om/helpers/S3SecretValue.java | 11 ++-
 .../client/rpc/TestOzoneRpcClientAbstract.java | 20 
 .../hadoop/ozone/s3/endpoint/BucketEndpoint.java   |  7 +++--
 .../hadoop/ozone/s3/endpoint/RootEndpoint.java |  7 +++--
 .../apache/hadoop/ozone/s3/header/Credential.java  | 16 --
 .../apache/hadoop/ozone/s3/util/OzoneS3Util.java   | 36 ++
 .../hadoop/ozone/s3/endpoint/TestRootList.java |  4 ++-
 8 files changed, 94 insertions(+), 28 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
index 6febcaf..44712d5 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
@@ -61,8 +61,7 @@ public class S3SecretManagerImpl implements S3SecretManager {
   public S3SecretValue getS3Secret(String kerberosID) throws IOException {
 Preconditions.checkArgument(Strings.isNotBlank(kerberosID),
 "kerberosID cannot be null or empty.");
-String awsAccessKeyStr = DigestUtils.md5Hex(kerberosID);
-byte[] awsAccessKey = awsAccessKeyStr.getBytes(UTF_8);
+byte[] awsAccessKey = kerberosID.getBytes(UTF_8);
 S3SecretValue result = null;
 omMetadataManager.getLock().acquireS3SecretLock(kerberosID);
 try {
@@ -77,33 +76,31 @@ public class S3SecretManagerImpl implements S3SecretManager 
{
 result = S3SecretValue.fromProtobuf(
 OzoneManagerProtocolProtos.S3Secret.parseFrom(s3Secret));
   }
-  result.setAwsAccessKey(awsAccessKeyStr);
 } finally {
   omMetadataManager.getLock().releaseS3SecretLock(kerberosID);
 }
-LOG.trace("Secret for kerberosID:{},accessKey:{}, proto:{}", kerberosID,
-awsAccessKeyStr, result);
+LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result);
 return result;
   }
 
   @Override
-  public String getS3UserSecretString(String awsAccessKeyId)
+  public String getS3UserSecretString(String kerberosID)
   throws IOException {
-Preconditions.checkArgument(Strings.isNotBlank(awsAccessKeyId),
+Preconditions.checkArgument(Strings.isNotBlank(kerberosID),
 "awsAccessKeyId cannot be null or empty.");
-LOG.trace("Get secret for awsAccessKey:{}", awsAccessKeyId);
+LOG.trace("Get secret for awsAccessKey:{}", kerberosID);
 
 byte[] s3Secret;
-omMetadataManager.getLock().acquireS3SecretLock(awsAccessKeyId);
+omMetadataManager.getLock().acquireS3SecretLock(kerberosID);
 try {
   s3Secret = omMetadataManager.getS3SecretTable()
-  .get(awsAccessKeyId.getBytes(UTF_8));
+  .get(kerberosID.getBytes(UTF_8));
   if (s3Secret == null) {
 throw new OzoneSecurityException("S3 secret not found for " +
-"awsAccessKeyId " + awsAccessKeyId, S3_SECRET_NOT_FOUND);
+"awsAccessKeyId " + kerberosID, S3_SECRET_NOT_FOUND);
   }
 } finally {
-  omMetadataManager.getLock().releaseS3SecretLock(awsAccessKeyId);
+  omMetadataManager.getLock().releaseS3SecretLock(kerberosID);
 }
 
 return OzoneManagerProtocolProtos.S3Secret.parseFrom(s3Secret)
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
index 23f4c05..2608e77 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.ozone.om.helpers;
 
-import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 
 /**
@@ -26,12 +25,10 @@ import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 public class S3SecretValue {
   private String kerberosID;
   private String awsSecret;
-  private String awsAccessKey;
 
   public S3SecretValue(String kerberosID, String awsSecret) {

[hadoop] branch trunk updated: HDDS-939. Add S3 access check to Ozone manager. Contributed by Ajay Kumar. (#634)

2019-03-26 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 82d4772  HDDS-939. Add S3 access check to Ozone manager. Contributed 
by Ajay Kumar. (#634)
82d4772 is described below

commit 82d477293c879ed4529efe2b5e8d138e09bbce3c
Author: Ajay Yadav <7813154+ajay...@users.noreply.github.com>
AuthorDate: Tue Mar 26 08:59:59 2019 -0700

HDDS-939. Add S3 access check to Ozone manager. Contributed by Ajay Kumar. 
(#634)
---
 .../hadoop/ozone/om/S3SecretManagerImpl.java   | 21 ++---
 .../hadoop/ozone/om/helpers/S3SecretValue.java | 11 ++-
 .../client/rpc/TestOzoneRpcClientAbstract.java | 20 
 .../hadoop/ozone/s3/endpoint/BucketEndpoint.java   |  7 +++--
 .../hadoop/ozone/s3/endpoint/RootEndpoint.java |  7 +++--
 .../apache/hadoop/ozone/s3/header/Credential.java  | 16 --
 .../apache/hadoop/ozone/s3/util/OzoneS3Util.java   | 36 ++
 .../hadoop/ozone/s3/endpoint/TestRootList.java |  4 ++-
 8 files changed, 94 insertions(+), 28 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
index 6febcaf..44712d5 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
@@ -61,8 +61,7 @@ public class S3SecretManagerImpl implements S3SecretManager {
   public S3SecretValue getS3Secret(String kerberosID) throws IOException {
 Preconditions.checkArgument(Strings.isNotBlank(kerberosID),
 "kerberosID cannot be null or empty.");
-String awsAccessKeyStr = DigestUtils.md5Hex(kerberosID);
-byte[] awsAccessKey = awsAccessKeyStr.getBytes(UTF_8);
+byte[] awsAccessKey = kerberosID.getBytes(UTF_8);
 S3SecretValue result = null;
 omMetadataManager.getLock().acquireS3SecretLock(kerberosID);
 try {
@@ -77,33 +76,31 @@ public class S3SecretManagerImpl implements S3SecretManager 
{
 result = S3SecretValue.fromProtobuf(
 OzoneManagerProtocolProtos.S3Secret.parseFrom(s3Secret));
   }
-  result.setAwsAccessKey(awsAccessKeyStr);
 } finally {
   omMetadataManager.getLock().releaseS3SecretLock(kerberosID);
 }
-LOG.trace("Secret for kerberosID:{},accessKey:{}, proto:{}", kerberosID,
-awsAccessKeyStr, result);
+LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result);
 return result;
   }
 
   @Override
-  public String getS3UserSecretString(String awsAccessKeyId)
+  public String getS3UserSecretString(String kerberosID)
   throws IOException {
-Preconditions.checkArgument(Strings.isNotBlank(awsAccessKeyId),
+Preconditions.checkArgument(Strings.isNotBlank(kerberosID),
 "awsAccessKeyId cannot be null or empty.");
-LOG.trace("Get secret for awsAccessKey:{}", awsAccessKeyId);
+LOG.trace("Get secret for awsAccessKey:{}", kerberosID);
 
 byte[] s3Secret;
-omMetadataManager.getLock().acquireS3SecretLock(awsAccessKeyId);
+omMetadataManager.getLock().acquireS3SecretLock(kerberosID);
 try {
   s3Secret = omMetadataManager.getS3SecretTable()
-  .get(awsAccessKeyId.getBytes(UTF_8));
+  .get(kerberosID.getBytes(UTF_8));
   if (s3Secret == null) {
 throw new OzoneSecurityException("S3 secret not found for " +
-"awsAccessKeyId " + awsAccessKeyId, S3_SECRET_NOT_FOUND);
+"awsAccessKeyId " + kerberosID, S3_SECRET_NOT_FOUND);
   }
 } finally {
-  omMetadataManager.getLock().releaseS3SecretLock(awsAccessKeyId);
+  omMetadataManager.getLock().releaseS3SecretLock(kerberosID);
 }
 
 return OzoneManagerProtocolProtos.S3Secret.parseFrom(s3Secret)
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
index 23f4c05..2608e77 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.ozone.om.helpers;
 
-import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 
 /**
@@ -26,12 +25,10 @@ import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 public class S3SecretValue {
   private String kerberosID;
   private String awsSecret;
-  private String awsAccessKey;
 
   public S3SecretValue(String kerberosID, String awsSecret) {
 this.kerberosID = kerberosID;
 this.awsSecret = awsSecret;
-this.awsAcces

[hadoop] branch ozone-0.4 updated: HDDS-1310. In datanode once a container becomes unhealthy, datanode restart fails.

2019-03-26 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new f6acbc9  HDDS-1310. In datanode once a container becomes unhealthy, 
datanode restart fails.
f6acbc9 is described below

commit f6acbc9cafc9923069fb264540d7bf9a95c5b42e
Author: Sandeep Nemuri 
AuthorDate: Tue Mar 26 15:44:18 2019 +0530

HDDS-1310. In datanode once a container becomes unhealthy, datanode restart 
fails.

Signed-off-by: Nanda kumar 
(cherry picked from commit 5c0a81ad3c3d28cc1dd8d91594298d3fc7ebcfa4)
---
 .../hadoop/ozone/container/keyvalue/KeyValueContainer.java  |  3 +++
 .../ozone/container/keyvalue/TestKeyValueContainer.java | 13 +
 2 files changed, 16 insertions(+)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index de1b109..0d45d68 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -618,6 +618,9 @@ public class KeyValueContainer implements 
Container {
 case CLOSED:
   state = ContainerReplicaProto.State.CLOSED;
   break;
+case UNHEALTHY:
+  state = ContainerReplicaProto.State.UNHEALTHY;
+  break;
 default:
   throw new StorageContainerException("Invalid Container state found: " +
   containerData.getContainerID(), INVALID_CONTAINER_STATE);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index c7c08b0..1aa7361 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -333,6 +333,19 @@ public class TestKeyValueContainer {
   }
 
   @Test
+  public void testReportOfUnhealthyContainer() throws Exception {
+keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+Assert.assertNotNull(keyValueContainer.getContainerReport());
+keyValueContainer.markContainerUnhealthy();
+File containerFile = keyValueContainer.getContainerFile();
+keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
+.readContainerFile(containerFile);
+assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY,
+keyValueContainerData.getState());
+Assert.assertNotNull(keyValueContainer.getContainerReport());
+  }
+
+  @Test
   public void testUpdateContainer() throws IOException {
 keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
 Map metadata = new HashMap<>();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1310. In datanode once a container becomes unhealthy, datanode restart fails.

2019-03-26 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5c0a81a  HDDS-1310. In datanode once a container becomes unhealthy, 
datanode restart fails.
5c0a81a is described below

commit 5c0a81ad3c3d28cc1dd8d91594298d3fc7ebcfa4
Author: Sandeep Nemuri 
AuthorDate: Tue Mar 26 15:44:18 2019 +0530

HDDS-1310. In datanode once a container becomes unhealthy, datanode restart 
fails.

Signed-off-by: Nanda kumar 
---
 .../hadoop/ozone/container/keyvalue/KeyValueContainer.java  |  3 +++
 .../ozone/container/keyvalue/TestKeyValueContainer.java | 13 +
 2 files changed, 16 insertions(+)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 47af110..26b0ce1 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -623,6 +623,9 @@ public class KeyValueContainer implements 
Container {
 case CLOSED:
   state = ContainerReplicaProto.State.CLOSED;
   break;
+case UNHEALTHY:
+  state = ContainerReplicaProto.State.UNHEALTHY;
+  break;
 default:
   throw new StorageContainerException("Invalid Container state found: " +
   containerData.getContainerID(), INVALID_CONTAINER_STATE);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index c7c08b0..1aa7361 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -333,6 +333,19 @@ public class TestKeyValueContainer {
   }
 
   @Test
+  public void testReportOfUnhealthyContainer() throws Exception {
+keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+Assert.assertNotNull(keyValueContainer.getContainerReport());
+keyValueContainer.markContainerUnhealthy();
+File containerFile = keyValueContainer.getContainerFile();
+keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
+.readContainerFile(containerFile);
+assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY,
+keyValueContainerData.getState());
+Assert.assertNotNull(keyValueContainer.getContainerReport());
+  }
+
+  @Test
   public void testUpdateContainer() throws IOException {
 keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
 Map metadata = new HashMap<>();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org