[hadoop] branch trunk updated: YARN-9809. Added node manager health status to resource manager registration call. Contributed by Eric Badger via eyang

2020-06-30 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e8dc862  YARN-9809. Added node manager health status to resource 
manager registration call.Contributed by Eric Badger via eyang
e8dc862 is described below

commit e8dc862d3856e9eaea124c625dade36f1dd53fe2
Author: Eric Yang 
AuthorDate: Tue Jun 30 11:39:16 2020 -0700

YARN-9809. Added node manager health status to resource manager 
registration call.
   Contributed by Eric Badger via eyang
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java |  7 +++
 .../src/main/resources/yarn-default.xml|  7 +++
 .../RegisterNodeManagerRequest.java| 19 ++-
 .../impl/pb/RegisterNodeManagerRequestPBImpl.java  | 39 -
 .../proto/yarn_server_common_service_protos.proto  |  1 +
 .../server/nodemanager/NodeStatusUpdaterImpl.java  |  3 +-
 .../nodemanager/health/NodeHealthScriptRunner.java | 11 +++-
 .../health/TimedHealthReporterService.java | 20 ++-
 .../yarn/server/nodemanager/TestEventFlow.java |  5 ++
 .../containermanager/BaseContainerManagerTest.java | 66 +-
 .../containermanager/TestContainerManager.java |  6 +-
 .../nodemanager/containermanager/TestNMProxy.java  |  4 +-
 .../scheduler/TestContainerSchedulerQueuing.java   |  2 +-
 .../resourcemanager/ResourceTrackerService.java|  5 +-
 .../server/resourcemanager/rmnode/RMNodeImpl.java  | 58 +++
 .../resourcemanager/rmnode/RMNodeStartedEvent.java | 10 +++-
 .../hadoop/yarn/server/resourcemanager/MockNM.java | 22 
 .../hadoop/yarn/server/resourcemanager/MockRM.java |  7 ++-
 .../yarn/server/resourcemanager/NodeManager.java   |  3 +-
 .../resourcemanager/TestRMNodeTransitions.java | 55 +++---
 .../resourcemanager/TestResourceManager.java   | 29 ++
 .../TestResourceTrackerService.java|  6 ++
 .../TestRMAppLogAggregationStatus.java |  7 ++-
 .../resourcetracker/TestNMExpiry.java  |  7 +++
 .../resourcetracker/TestNMReconnect.java   |  7 +++
 .../scheduler/TestAbstractYarnScheduler.java   |  5 ++
 .../scheduler/TestSchedulerHealth.java | 18 --
 .../scheduler/capacity/TestCapacityScheduler.java  | 63 ++---
 .../scheduler/fair/TestFairScheduler.java  | 21 +--
 .../scheduler/fifo/TestFifoScheduler.java  | 25 +---
 .../webapp/TestRMWebServicesNodes.java |  5 +-
 31 files changed, 429 insertions(+), 114 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 85d5a58..54e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2013,6 +2013,13 @@ public class YarnConfiguration extends Configuration {
   NM_PREFIX + "health-checker.interval-ms";
   public static final long DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS = 10 * 60 * 
1000;
 
+  /** Whether or not to run the node health script before the NM
+   *  starts up.*/
+  public static final String NM_HEALTH_CHECK_RUN_BEFORE_STARTUP =
+  NM_PREFIX + "health-checker.run-before-startup";
+  public static final boolean DEFAULT_NM_HEALTH_CHECK_RUN_BEFORE_STARTUP =
+  false;
+
   /** Health check time out period for all scripts.*/
   public static final String NM_HEALTH_CHECK_TIMEOUT_MS =
   NM_PREFIX + "health-checker.timeout-ms";
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index f09186e..2f97a7c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1669,6 +1669,13 @@
   
 
   
+Whether or not to run the node health script
+before the NM starts up.
+yarn.nodemanager.health-checker.run-before-startup
+false
+  
+
+  
 Frequency of running node health scripts.
 yarn.nodemanager.health-checker.interval-ms
 60
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeMan

[hadoop] branch trunk updated: YARN-10328. Fixed ZK Curator NodeExists exception in YARN service AM logs Contributed by Bilwa S T via eyang

2020-06-29 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0be2681  YARN-10328. Fixed ZK Curator NodeExists exception in YARN 
service AM logs Contributed by Bilwa S T via eyang
0be2681 is described below

commit 0be26811f3db49abb62d12e6a051a31553495da8
Author: Eric Yang 
AuthorDate: Mon Jun 29 09:21:24 2020 -0700

YARN-10328. Fixed ZK Curator NodeExists exception in YARN service AM logs
Contributed by Bilwa S T via eyang
---
 .../hadoop/yarn/service/registry/YarnRegistryViewForProviders.java   | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java
index cecca5f..06066d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java
@@ -143,7 +143,10 @@ public class YarnRegistryViewForProviders {
   ServiceRecord record) throws IOException {
 String path = RegistryUtils.componentPath(
 user, serviceClass, serviceName, componentName);
-registryOperations.mknode(RegistryPathUtils.parentOf(path), true);
+String parentPath = RegistryPathUtils.parentOf(path);
+if (!registryOperations.exists(parentPath)) {
+  registryOperations.mknode(parentPath, true);
+}
 registryOperations.bind(path, record, BindFlags.OVERWRITE);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-10308. Update javadoc and variable names for YARN service. Contributed by Bilwa S T via eyang

2020-06-17 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 89689c5  YARN-10308. Update javadoc and variable names for YARN 
service. Contributed by Bilwa S T via eyang
89689c5 is described below

commit 89689c52c39cdcc498d04508dbd235c6036ec17c
Author: Eric Yang 
AuthorDate: Wed Jun 17 09:04:26 2020 -0700

YARN-10308. Update javadoc and variable names for YARN service.
Contributed by Bilwa S T via eyang
---
 .../yarn/service/api/records/KerberosPrincipal.java  | 20 +++-
 .../hadoop/yarn/service/client/ServiceClient.java| 10 +-
 2 files changed, 16 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java
index 0ff4daa..27125fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java
@@ -71,8 +71,9 @@ public class KerberosPrincipal implements Serializable {
   }
 
   /**
-   * The URI of the kerberos keytab. It supports two schemes \
-   * hdfs\ and \file\. If the URI starts with \
+   * The URI of the kerberos keytab. It supports hadoop supported schemes
+   * like \hdfs\ \file\ \s3\
+   *  \viewfs\ etc.If the URI starts with \
* hdfs://\ scheme, it indicates the path on hdfs where the keytab is
* stored. The keytab will be localized by YARN and made available to AM in
* its local directory. If the URI starts with \file://\
@@ -81,13 +82,14 @@ public class KerberosPrincipal implements Serializable {
*
* @return keytab
**/
-  @ApiModelProperty(value = "The URI of the kerberos keytab. It supports two " 
+
-  "schemes \"hdfs\" and \"file\". If the URI starts with \"hdfs://\" " +
-  "scheme, it indicates the path on hdfs where the keytab is stored. The " 
+
-  "keytab will be localized by YARN and made available to AM in its local" 
+
-  " directory. If the URI starts with \"file://\" scheme, it indicates a " 
+
-  "path on the local host where the keytab is presumbaly installed by " +
-  "admins upfront. ")
+  @ApiModelProperty(value = "The URI of the kerberos keytab. It supports"
+  + " Hadoop supported filesystem types like \"hdfs\", \"file\","
+  + " \"viewfs\", \"s3\" etc.If the URI starts with \"hdfs://\" scheme, "
+  + "it indicates the path on hdfs where the keytab is stored. The "
+  + "keytab will be localized by YARN and made available to AM in its 
local"
+  + " directory. If the URI starts with \"file://\" scheme, it indicates a 
"
+  + "path on the local host where the keytab is presumbaly installed by "
+  + "admins upfront. ")
   public String getKeytab() {
 return keytab;
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index b3ac7bb..41d1e42 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -1478,18 +1478,18 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
 if ("file".equals(keytabURI.getScheme())) {
   LOG.info("Using a keytab from localhost: " + keytabURI);
 } else {
-  Path keytabOnhdfs = new Path(keytabURI);
-  if (!fileSystem.getFileSystem().exists(keytabOnhdfs)) {
+  Path keytabPath = new Path(keytabURI);
+  if (!fileSystem.getFileSystem().exists(keytabPath)) {
 LOG.warn(service.getName() + "'s keytab (prin

[hadoop] branch branch-3.3 updated: YARN-10228. Relax restriction of file path character in yarn.service.am.java.opts. Contributed by Bilwa S T via eyang

2020-05-20 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new d7cf19d  YARN-10228. Relax restriction of file path character in 
yarn.service.am.java.opts. Contributed by Bilwa S T via eyang
d7cf19d is described below

commit d7cf19d7c0fce1697d548af8ab407e6ff93a5d07
Author: Eric Yang 
AuthorDate: Wed May 20 09:20:53 2020 -0700

YARN-10228. Relax restriction of file path character in 
yarn.service.am.java.opts.
Contributed by Bilwa S T via eyang
---
 .../hadoop/yarn/service/utils/ServiceApiUtil.java |  2 +-
 .../hadoop/yarn/service/utils/TestServiceApiUtil.java | 19 +++
 2 files changed, 16 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index 3780c99..5e7a335 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -250,7 +250,7 @@ public class ServiceApiUtil {
 
   public static void validateJvmOpts(String jvmOpts)
   throws IllegalArgumentException {
-Pattern pattern = Pattern.compile("[!~#?@*&%${}()<>\\[\\]|\"\\/,`;]");
+Pattern pattern = Pattern.compile("[!~#?@*&%${}()<>\\[\\]|\",`;]");
 Matcher matcher = pattern.matcher(jvmOpts);
 if (matcher.find()) {
   throw new IllegalArgumentException(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
index a93f3d9..2a61171 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
@@ -46,10 +46,12 @@ import java.util.Collections;
 import java.util.List;
 
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static 
org.apache.hadoop.yarn.service.conf.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME;
 import static org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
 
 /**
  * Test for ServiceApiUtil helper methods.
@@ -766,10 +768,19 @@ public class TestServiceApiUtil extends ServiceTestUtils {
 Assert.assertTrue(thread.isAlive());
   }
 
-  @Test(expected = IllegalArgumentException.class)
-  public void testJvmOpts() {
-String jvmOpts = "`ping -c 3 example.com`";
-ServiceApiUtil.validateJvmOpts(jvmOpts);
+  @Test
+  public void testJvmOpts() throws Exception {
+String invalidJvmOpts = "`ping -c 3 example.com`";
+intercept(IllegalArgumentException.class,
+"Invalid character in yarn.service.am.java.opts.",
+() -> ServiceApiUtil.validateJvmOpts(invalidJvmOpts));
+String validJvmOpts = "-Dyarn.service.am.java.opts=-Xmx768m "
++ "-Djava.security.auth.login.config=/opt/hadoop/etc/jaas-zk.conf";
+try {
+  ServiceApiUtil.validateJvmOpts(validJvmOpts);
+} catch (Exception ex) {
+  fail("Invalid character in yarn.service.am.java.opts.");
+}
   }
 
   public static Service createExampleApplication() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3.0 updated: YARN-10228. Relax restriction of file path character in yarn.service.am.java.opts. Contributed by Bilwa S T via eyang

2020-05-20 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3.0 by this push:
 new 54bd242  YARN-10228. Relax restriction of file path character in 
yarn.service.am.java.opts. Contributed by Bilwa S T via eyang
54bd242 is described below

commit 54bd2424f7eda2d02c9c4f6bd8d1fa967649523a
Author: Eric Yang 
AuthorDate: Wed May 20 09:20:53 2020 -0700

YARN-10228. Relax restriction of file path character in 
yarn.service.am.java.opts.
Contributed by Bilwa S T via eyang
---
 .../hadoop/yarn/service/utils/ServiceApiUtil.java |  2 +-
 .../hadoop/yarn/service/utils/TestServiceApiUtil.java | 19 +++
 2 files changed, 16 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index 3780c99..5e7a335 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -250,7 +250,7 @@ public class ServiceApiUtil {
 
   public static void validateJvmOpts(String jvmOpts)
   throws IllegalArgumentException {
-Pattern pattern = Pattern.compile("[!~#?@*&%${}()<>\\[\\]|\"\\/,`;]");
+Pattern pattern = Pattern.compile("[!~#?@*&%${}()<>\\[\\]|\",`;]");
 Matcher matcher = pattern.matcher(jvmOpts);
 if (matcher.find()) {
   throw new IllegalArgumentException(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
index a93f3d9..2a61171 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
@@ -46,10 +46,12 @@ import java.util.Collections;
 import java.util.List;
 
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static 
org.apache.hadoop.yarn.service.conf.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME;
 import static org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
 
 /**
  * Test for ServiceApiUtil helper methods.
@@ -766,10 +768,19 @@ public class TestServiceApiUtil extends ServiceTestUtils {
 Assert.assertTrue(thread.isAlive());
   }
 
-  @Test(expected = IllegalArgumentException.class)
-  public void testJvmOpts() {
-String jvmOpts = "`ping -c 3 example.com`";
-ServiceApiUtil.validateJvmOpts(jvmOpts);
+  @Test
+  public void testJvmOpts() throws Exception {
+String invalidJvmOpts = "`ping -c 3 example.com`";
+intercept(IllegalArgumentException.class,
+"Invalid character in yarn.service.am.java.opts.",
+() -> ServiceApiUtil.validateJvmOpts(invalidJvmOpts));
+String validJvmOpts = "-Dyarn.service.am.java.opts=-Xmx768m "
++ "-Djava.security.auth.login.config=/opt/hadoop/etc/jaas-zk.conf";
+try {
+  ServiceApiUtil.validateJvmOpts(validJvmOpts);
+} catch (Exception ex) {
+  fail("Invalid character in yarn.service.am.java.opts.");
+}
   }
 
   public static Service createExampleApplication() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] reference refs/remotes/origin/branch-3.3 updated (4bd37f2 -> e452163)

2020-05-20 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a change to reference refs/remotes/origin/branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 4bd37f2  YARN-5277. When localizers fail due to resource timestamps 
being out, provide more diagnostics. Contributed by Siddharth Ahuja
 add 94da630  HADOOP-16465 listLocatedStatus() optimisation (#1943)
 add b4ba9be  HADOOP-16985. Handle release package related issues (#1957)
 add aaad947  HDFS-15283. Cache pool MAXTTL is not persisted and restored 
on cluster restart. Contributed by Stephen O'Donnell.
 add 96d7ceb  HADOOP-13873. log DNS addresses on s3a initialization.
 add 49ae9b2  YARN-9954. Configurable max application tags and max tag 
length. Contributed by Bilwa S T
 add de5d433  HADOOP-16972. Ignore AuthenticationFilterInitializer for 
KMSWebServer. (#1961)
 add a6c718f  HDFS-15218. RBF: MountTableRefresherService failed to refresh 
other router MountTableEntries in secure mode. Contributed by Surendra Singh 
Lilhore.
 add 9c81b17  HADOOP-16959. Resolve hadoop-cos dependency conflict. 
Contributed by Yang Yu.
 add de9a6b4  HADOOP-16986. S3A to not need wildfly on the classpath. 
(#1948)
 add 0982f56  HADOOP-16953. tuning s3guard disabled warnings (#1962)
 add 5459dd6  HDFS-14742. RBF: TestRouterFaultTolerant tests are flaky
 add 54a64e5  HADOOP-16944. Use Yetus 0.12.0 in GitHub PR (#1917)
 add f74a571  HADOOP-16910 : ABFS Streams to update FileSystem.Statistics 
counters on IO. (#1918). Contributed by Mehakmeet Singh.
 add 98fdbb8  HADOOP-16965. Refactor abfs stream configuration. (#1956)
 add f53ded6  HADOOP-16922. ABFS: Change User-Agent header (#1938)
 add 5b92d73  HADOOP-17001. The suffix name of the unified compression 
class. Contributed by bianqi
 add 47b330d  HDFS-15276. Concat on INodeRefernce fails with illegal state 
exception. Contributed by hemanthboyina
 add 1340518  YARN-9996. Code cleanup in 
QueueAdminConfigurationMutationACLPolicy. Contributed by Siddharth Ahuja
 add 32fb174  Hadoop 16857. ABFS: Stop CustomTokenProvider retry logic to 
depend on AbfsRestOp retry policy
 add 2471ba8  HADOOP-16914 Adding Output Stream Counters in ABFS (#1899)
 add 497c7a1  YARN-10223. Remove jersey-test-framework-core dependency from 
yarn-server-common. (#1939)
 add 61ca459  YARN-. TestFSSchedulerConfigurationStore: Extend from 
ConfigurationStoreBaseTest, general code cleanup. Contributed by Benjamin Teke
 add 7a3f190  YARN-10189. Code cleanup in LeveldbRMStateStore. Contributed 
by Benjamin Teke
 add d6c48f8  HDFS-15295. AvailableSpaceBlockPlacementPolicy should use 
chooseRandomWithStorageTypeTwoTrial() for better performance. Contributed by 
Jinglun.
 add c276938  HADOOP-16886. Add hadoop.http.idle_timeout.ms to 
core-default.xml. Contributed by Lisheng Sun.
 add 1c19107  HDFS-15281. Make sure ZKFC uses dfs.namenode.rpc-address to 
bind to host address (#1964)
 add 1db1992  HDFS-15297. 
TestNNHandlesBlockReportPerStorage::blockReport_02 fails intermittently. 
Contributed by Ayush Saxena
 add 9e79cca  HADOOP-17007. hadoop-cos fails to build. Contributed by Yang 
Yu.
 add c0b7b38  YARN-10194. YARN RMWebServices /scheduler-conf/validate leaks 
ZK Connections. Contributed by Prabhu Joseph
 add 68d8802  HDFS-1820. FTPFileSystem attempts to close the outputstream 
even when it is not initialised. (#1952)
 add 741fcf2  MAPREDUCE-7277. IndexCache totalMemoryUsed differs from cache 
contents. Contributed by Jon Eagles (jeagles).
 add 62ee597  YARN-9848. Revert YARN-4946. Contributed by Steven Rand.
 add d504574  HDFS-15286. Concat on a same file deleting the file. 
Contributed by hemanthboyina.
 add e45faae  YARN-10215. Endpoint for obtaining direct URL for the logs. 
Contributed by Andras Gyori
 add 08dbe32  HDFS-15301. statfs function in hdfs-fuse not working. 
Contributed by Aryan Gupta.(#1980)
 add 75433a6  YARN-10237. Add isAbsoluteResource config for queue in 
scheduler response. Contributed by Prabhu Joseph
 add 8ffe1f3  YARN-10247. Application priority queue ACLs are not 
respected. Contributed by Sunil G
 add a34174f  HDFS-15285. The same distance and load nodes don't shuffle 
when consider DataNode load. Contributed by Lisheng Sun.
 add dfa7f16  Preparing for 3.3.1 development
 add ccebc9d  HADOOP-16952. Add .diff to gitignore. Contributed by Ayush 
Saxena.
 add 0f27c04  SPNEGO TLS verification
 add e32e138  HDFS-15320. StringIndexOutOfBoundsException in 
HostRestrictingAuthorizationFilter (#1992)
 add df16146  Erasure Coding: metrics xmitsInProgress become to negative. 
Contributed by maobaolong and Toshihiko Uchida.
 add 1d83500  YARN-10256. Refactor 
TestContainerSchedulerQueuing.testContainerUpdateExecTypeGuaranteedToOpportunistic
 (Ahmed Hussein via jeagles)
 add 8809b43  HDFS-15323. StandbyNode fails

[hadoop] reference refs/remotes/origin/branch-3.3 updated (e452163 -> 18a7cbf)

2020-05-20 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a change to reference refs/remotes/origin/branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from e452163  HDFS-15293. Relax the condition for accepting a fsimage when 
receiving a checkpoint. Contributed by Chen Liang
 new 18a7cbf  YARN-10228. Relax restriction of file path character in 
yarn.service.am.java.opts. Contributed by Bilwa S T via eyang

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hadoop/yarn/service/utils/ServiceApiUtil.java |  2 +-
 .../hadoop/yarn/service/utils/TestServiceApiUtil.java | 19 +++
 2 files changed, 16 insertions(+), 5 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: YARN-10228. Relax restriction of file path character in yarn.service.am.java.opts. Contributed by Bilwa S T via eyang

2020-05-20 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to reference refs/remotes/origin/branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 18a7cbf071573310fa03726ba986594efb7b273d
Author: Eric Yang 
AuthorDate: Wed May 20 09:20:53 2020 -0700

YARN-10228. Relax restriction of file path character in 
yarn.service.am.java.opts.
Contributed by Bilwa S T via eyang
---
 .../hadoop/yarn/service/utils/ServiceApiUtil.java |  2 +-
 .../hadoop/yarn/service/utils/TestServiceApiUtil.java | 19 +++
 2 files changed, 16 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index 3780c99..5e7a335 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -250,7 +250,7 @@ public class ServiceApiUtil {
 
   public static void validateJvmOpts(String jvmOpts)
   throws IllegalArgumentException {
-Pattern pattern = Pattern.compile("[!~#?@*&%${}()<>\\[\\]|\"\\/,`;]");
+Pattern pattern = Pattern.compile("[!~#?@*&%${}()<>\\[\\]|\",`;]");
 Matcher matcher = pattern.matcher(jvmOpts);
 if (matcher.find()) {
   throw new IllegalArgumentException(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
index a93f3d9..2a61171 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
@@ -46,10 +46,12 @@ import java.util.Collections;
 import java.util.List;
 
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static 
org.apache.hadoop.yarn.service.conf.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME;
 import static org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
 
 /**
  * Test for ServiceApiUtil helper methods.
@@ -766,10 +768,19 @@ public class TestServiceApiUtil extends ServiceTestUtils {
 Assert.assertTrue(thread.isAlive());
   }
 
-  @Test(expected = IllegalArgumentException.class)
-  public void testJvmOpts() {
-String jvmOpts = "`ping -c 3 example.com`";
-ServiceApiUtil.validateJvmOpts(jvmOpts);
+  @Test
+  public void testJvmOpts() throws Exception {
+String invalidJvmOpts = "`ping -c 3 example.com`";
+intercept(IllegalArgumentException.class,
+"Invalid character in yarn.service.am.java.opts.",
+() -> ServiceApiUtil.validateJvmOpts(invalidJvmOpts));
+String validJvmOpts = "-Dyarn.service.am.java.opts=-Xmx768m "
++ "-Djava.security.auth.login.config=/opt/hadoop/etc/jaas-zk.conf";
+try {
+  ServiceApiUtil.validateJvmOpts(validJvmOpts);
+} catch (Exception ex) {
+  fail("Invalid character in yarn.service.am.java.opts.");
+}
   }
 
   public static Service createExampleApplication() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-10228. Relax restriction of file path character in yarn.service.am.java.opts. Contributed by Bilwa S T via eyang

2020-05-20 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 726b8e3  YARN-10228. Relax restriction of file path character in 
yarn.service.am.java.opts. Contributed by Bilwa S T via eyang
726b8e3 is described below

commit 726b8e324b6fc99aac5a26fbbc7edd26a3a25479
Author: Eric Yang 
AuthorDate: Wed May 20 09:20:53 2020 -0700

YARN-10228. Relax restriction of file path character in 
yarn.service.am.java.opts.
Contributed by Bilwa S T via eyang
---
 .../hadoop/yarn/service/utils/ServiceApiUtil.java |  2 +-
 .../hadoop/yarn/service/utils/TestServiceApiUtil.java | 19 +++
 2 files changed, 16 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index 70c43f1..b4067d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -250,7 +250,7 @@ public class ServiceApiUtil {
 
   public static void validateJvmOpts(String jvmOpts)
   throws IllegalArgumentException {
-Pattern pattern = Pattern.compile("[!~#?@*&%${}()<>\\[\\]|\"\\/,`;]");
+Pattern pattern = Pattern.compile("[!~#?@*&%${}()<>\\[\\]|\",`;]");
 Matcher matcher = pattern.matcher(jvmOpts);
 if (matcher.find()) {
   throw new IllegalArgumentException(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
index 1ac9613..5c80f85 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java
@@ -46,10 +46,12 @@ import java.util.Collections;
 import java.util.List;
 
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static 
org.apache.hadoop.yarn.service.conf.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME;
 import static org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
 
 /**
  * Test for ServiceApiUtil helper methods.
@@ -744,10 +746,19 @@ public class TestServiceApiUtil extends ServiceTestUtils {
 Assert.assertTrue(thread.isAlive());
   }
 
-  @Test(expected = IllegalArgumentException.class)
-  public void testJvmOpts() {
-String jvmOpts = "`ping -c 3 example.com`";
-ServiceApiUtil.validateJvmOpts(jvmOpts);
+  @Test
+  public void testJvmOpts() throws Exception {
+String invalidJvmOpts = "`ping -c 3 example.com`";
+intercept(IllegalArgumentException.class,
+"Invalid character in yarn.service.am.java.opts.",
+() -> ServiceApiUtil.validateJvmOpts(invalidJvmOpts));
+String validJvmOpts = "-Dyarn.service.am.java.opts=-Xmx768m "
++ "-Djava.security.auth.login.config=/opt/hadoop/etc/jaas-zk.conf";
+try {
+  ServiceApiUtil.validateJvmOpts(validJvmOpts);
+} catch (Exception ex) {
+  fail("Invalid character in yarn.service.am.java.opts.");
+}
   }
 
   public static Service createExampleApplication() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: MAPREDUCE-7273. Fixed Job History server token renewal. Contributed by Peter Bacsko via eyang

2020-04-16 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 053e39e  MAPREDUCE-7273.  Fixed Job History server token renewal.  
Contributed by Peter Bacsko via eyang
053e39e is described below

commit 053e39e1e458d6437481296ab528327f7241fc78
Author: Eric Yang 
AuthorDate: Thu Apr 16 20:17:21 2020 -0400

MAPREDUCE-7273.  Fixed Job History server token renewal.
 Contributed by Peter Bacsko via eyang
---
 .../java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java   | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index 825fb25..268eeec 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.ShutdownThreadsHelper;
 import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
@@ -917,6 +918,10 @@ public class HistoryFileManager extends AbstractService {
*   if there was a error while scanning
*/
   void scanIntermediateDirectory() throws IOException {
+if (UserGroupInformation.isSecurityEnabled()) {
+  UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
+}
+
 // TODO it would be great to limit how often this happens, except in the
 // case where we are looking for a particular job.
 List userDirList = JobHistoryUtils.localGlobber(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2 updated: HADOOP-16361. Fixed TestSecureLogins#testValidKerberosName on branch-2. Contributed by Jim Brennan via eyang

2020-04-16 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 2920279  HADOOP-16361. Fixed TestSecureLogins#testValidKerberosName on 
branch-2.   Contributed by Jim Brennan via eyang
2920279 is described below

commit 2920279559a6addfbdb8e4e97fe7806fc369ead6
Author: Eric Yang 
AuthorDate: Thu Apr 16 12:12:21 2020 -0400

HADOOP-16361. Fixed TestSecureLogins#testValidKerberosName on branch-2.
  Contributed by Jim Brennan via eyang
---
 .../org/apache/hadoop/registry/secure/TestSecureLogins.java  | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
index 5592e19..5150aac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
@@ -31,6 +31,7 @@ import 
org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
 import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
 import org.apache.zookeeper.Environment;
 import org.apache.zookeeper.data.ACL;
+import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -179,10 +180,15 @@ public class TestSecureLogins extends 
AbstractSecureRegistryTest {
   public void testValidKerberosName() throws Throwable {
 
 new HadoopKerberosName(ZOOKEEPER).getShortName();
-new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName();
 new HadoopKerberosName(ZOOKEEPER_REALM).getShortName();
-// standard rules don't pick this up
-// new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName();
+new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName();
+try {
+  new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName();
+  Assert.fail("Unexpected success for invalid principal: " +
+  ZOOKEEPER_LOCALHOST);
+} catch (IOException e) {
+  LOG.info("Expected exception: " + e.getMessage());
+}
   }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HADOOP-16361. Fixed TestSecureLogins#testValidKerberosName on branch-2. Contributed by Jim Brennan via eyang

2020-04-16 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 7945ed4  HADOOP-16361. Fixed TestSecureLogins#testValidKerberosName on 
branch-2.   Contributed by Jim Brennan via eyang
7945ed4 is described below

commit 7945ed40f0596da2aedd29f831e8671ba4ea0aa2
Author: Eric Yang 
AuthorDate: Thu Apr 16 12:12:21 2020 -0400

HADOOP-16361. Fixed TestSecureLogins#testValidKerberosName on branch-2.
  Contributed by Jim Brennan via eyang
---
 .../org/apache/hadoop/registry/secure/TestSecureLogins.java  | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
index 5592e19..5150aac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
@@ -31,6 +31,7 @@ import 
org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
 import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
 import org.apache.zookeeper.Environment;
 import org.apache.zookeeper.data.ACL;
+import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -179,10 +180,15 @@ public class TestSecureLogins extends 
AbstractSecureRegistryTest {
   public void testValidKerberosName() throws Throwable {
 
 new HadoopKerberosName(ZOOKEEPER).getShortName();
-new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName();
 new HadoopKerberosName(ZOOKEEPER_REALM).getShortName();
-// standard rules don't pick this up
-// new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName();
+new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName();
+try {
+  new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName();
+  Assert.fail("Unexpected success for invalid principal: " +
+  ZOOKEEPER_LOCALHOST);
+} catch (IOException e) {
+  LOG.info("Expected exception: " + e.getMessage());
+}
   }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-10019. Improved container-executor exec() calls. Contributed by Peter Bacsko

2020-01-10 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2301b25  YARN-10019.  Improved container-executor exec() calls.
  Contributed by Peter Bacsko
2301b25 is described below

commit 2301b25899b5ae293719f4b4dcb8584c20a36bd5
Author: Eric Yang 
AuthorDate: Fri Jan 10 19:04:04 2020 -0500

YARN-10019.  Improved container-executor exec() calls.
 Contributed by Peter Bacsko
---
 .../container-executor/impl/container-executor.c   | 77 ++
 .../native/container-executor/impl/runc/runc.c |  2 +-
 .../src/main/native/container-executor/impl/util.h |  3 +-
 .../container-executor/impl/utils/docker-util.c|  3 +-
 4 files changed, 39 insertions(+), 46 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 3de7365..d69acf3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1610,7 +1610,7 @@ int exec_container(const char *command_file) {
   }
 } else {
   if (rc < 0) {
-if (errno==5) {
+if (errno == EIO) {
   fprintf(stderr, "Remote Connection Closed.\n");
   exit(0);
 } else {
@@ -1644,21 +1644,21 @@ int exec_container(const char *command_file) {
 tcsetattr (fds, TCSANOW, _term_settings);
 
 // The slave side of the PTY becomes the standard input and outputs of the 
child process
-close(0); // Close standard input (current terminal)
-close(1); // Close standard output (current terminal)
-close(2); // Close standard error (current terminal)
+close(STDIN_FILENO); // Close standard input (current terminal)
+close(STDOUT_FILENO); // Close standard output (current terminal)
+close(STDERR_FILENO); // Close standard error (current terminal)
 
 if (dup(fds) == -1) {
   // PTY becomes standard input (0)
-  exit(DOCKER_EXEC_FAILED);
+  _exit(DOCKER_EXEC_FAILED);
 }
 if (dup(fds) == -1) {
   // PTY becomes standard output (1)
-  exit(DOCKER_EXEC_FAILED);
+  _exit(DOCKER_EXEC_FAILED);
 }
 if (dup(fds) == -1) {
   // PTY becomes standard error (2)
-  exit(DOCKER_EXEC_FAILED);
+  _exit(DOCKER_EXEC_FAILED);
 }
 
 // Now the original file descriptor is useless
@@ -1669,8 +1669,8 @@ int exec_container(const char *command_file) {
   setsid();
 } else {
   exit_code = set_user(user);
-  if (exit_code!=0) {
-goto cleanup;
+  if (exit_code != 0) {
+_exit(exit_code);
   }
 }
 
@@ -1679,24 +1679,20 @@ int exec_container(const char *command_file) {
 ioctl(0, TIOCSCTTY, 1);
 if (docker) {
   ret = execvp(binary, args);
+  fprintf(ERRORFILE, "exec failed - %s\n", strerror(errno));
+  _exit(DOCKER_EXEC_FAILED);
 } else {
   if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
-exit_code = DOCKER_EXEC_FAILED;
-goto cleanup;
+_exit(DOCKER_EXEC_FAILED);
   }
   ret = chdir(workdir);
   if (ret != 0) {
-exit_code = DOCKER_EXEC_FAILED;
-goto cleanup;
+fprintf(ERRORFILE, "chdir failed - %s", strerror(errno));
+_exit(DOCKER_EXEC_FAILED);
   }
-  ret = execve(binary, args, env);
-}
-if (ret != 0) {
-  fprintf(ERRORFILE, "Couldn't execute the container launch with args %s - 
%s\n",
-binary, strerror(errno));
-  exit_code = DOCKER_EXEC_FAILED;
-} else {
-  exit_code = 0;
+  execve(binary, args, env);
+  fprintf(ERRORFILE, "exec failed - %s\n", strerror(errno));
+  _exit(DOCKER_EXEC_FAILED);
 }
   }
 
@@ -1708,7 +1704,8 @@ cleanup:
   free_values(args);
   free_values(env);
   free_configuration(_config);
-  return exit_code;
+
+  return exit_code; // we reach this point only if an error occurs
 }
 
 int exec_docker_command(char *docker_command, char **argv, int argc) {
@@ -2113,14 +2110,14 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
 if (so_fd == NULL) {
   fprintf(ERRORFILE, "Could not append to %s\n", so);
   exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
-  goto cleanup;
+  _exit(exit_code);
 }
 FI

[hadoop] branch trunk updated: HADOOP-16590. Update OS login modules for IBM JDK. Contributed by Nicholas Marion

2020-01-10 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e589f61  HADOOP-16590.  Update OS login modules for IBM JDK.   
 Contributed by Nicholas Marion
e589f61 is described below

commit e589f6199c562cb1e478a38f5f2c5dee94880258
Author: Eric Yang 
AuthorDate: Fri Jan 10 14:32:02 2020 -0500

HADOOP-16590.  Update OS login modules for IBM JDK.
   Contributed by Nicholas Marion

Close #1484
---
 .../hadoop/security/UserGroupInformation.java  | 28 --
 1 file changed, 4 insertions(+), 24 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 884380c..8c84a8d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -427,23 +427,12 @@ public class UserGroupInformation {
   
   private static final boolean windows =
   System.getProperty("os.name").startsWith("Windows");
-  private static final boolean is64Bit =
-  System.getProperty("os.arch").contains("64") ||
-  System.getProperty("os.arch").contains("s390x");
-  private static final boolean aix = 
System.getProperty("os.name").equals("AIX");
 
   /* Return the OS login module class name */
+  /* For IBM JDK, use the common OS login module class name for all platforms 
*/
   private static String getOSLoginModuleName() {
 if (IBM_JAVA) {
-  if (windows) {
-return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule"
-: "com.ibm.security.auth.module.NTLoginModule";
-  } else if (aix) {
-return is64Bit ? "com.ibm.security.auth.module.AIX64LoginModule"
-: "com.ibm.security.auth.module.AIXLoginModule";
-  } else {
-return "com.ibm.security.auth.module.LinuxLoginModule";
-  }
+  return "com.ibm.security.auth.module.JAASLoginModule";
 } else {
   return windows ? "com.sun.security.auth.module.NTLoginModule"
 : "com.sun.security.auth.module.UnixLoginModule";
@@ -451,23 +440,14 @@ public class UserGroupInformation {
   }
 
   /* Return the OS principal class */
+  /* For IBM JDK, use the common OS principal class for all platforms */
   @SuppressWarnings("unchecked")
   private static Class getOsPrincipalClass() {
 ClassLoader cl = ClassLoader.getSystemClassLoader();
 try {
   String principalClass = null;
   if (IBM_JAVA) {
-if (is64Bit) {
-  principalClass = "com.ibm.security.auth.UsernamePrincipal";
-} else {
-  if (windows) {
-principalClass = "com.ibm.security.auth.NTUserPrincipal";
-  } else if (aix) {
-principalClass = "com.ibm.security.auth.AIXPrincipal";
-  } else {
-principalClass = "com.ibm.security.auth.LinuxPrincipal";
-  }
-}
+principalClass = "com.ibm.security.auth.UsernamePrincipal";
   } else {
 principalClass = windows ? "com.sun.security.auth.NTUserPrincipal"
 : "com.sun.security.auth.UnixPrincipal";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9956. Improved connection error message for YARN ApiServerClient. Contributed by Prabhu Joseph

2020-01-06 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d81d45f  YARN-9956. Improved connection error message for YARN 
ApiServerClient.Contributed by Prabhu Joseph
d81d45f is described below

commit d81d45ff2fc9a1c424222e021f9306bf64c916b2
Author: Eric Yang 
AuthorDate: Mon Jan 6 13:24:16 2020 -0500

YARN-9956. Improved connection error message for YARN ApiServerClient.
   Contributed by Prabhu Joseph
---
 .../yarn/service/client/ApiServiceClient.java  | 78 --
 .../yarn/service/client/TestApiServiceClient.java  | 22 ++
 .../service/client/TestSecureApiServiceClient.java |  2 +
 .../hadoop/yarn/client/util/YarnClientUtils.java   |  2 +-
 4 files changed, 68 insertions(+), 36 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
index 834bb03..3c2c3c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.client.api.AppAdminClient;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.client.util.YarnClientUtils;
+import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.service.api.records.Component;
@@ -94,7 +95,7 @@ public class ApiServiceClient extends AppAdminClient {
   /**
* Calculate Resource Manager address base on working REST API.
*/
-  String getRMWebAddress() {
+  String getRMWebAddress() throws IOException {
 Configuration conf = getConfig();
 String scheme = "http://;;
 String path = "/app/v1/services/version";
@@ -105,43 +106,50 @@ public class ApiServiceClient extends AppAdminClient {
   rmAddress = conf
   .get("yarn.resourcemanager.webapp.https.address");
 }
-boolean useKerberos = UserGroupInformation.isSecurityEnabled();
-List rmServers = getRMHAWebAddresses(conf);
-for (String host : rmServers) {
-  try {
-Client client = Client.create();
-client.setFollowRedirects(false);
-StringBuilder sb = new StringBuilder();
-sb.append(scheme)
-.append(host)
-.append(path);
-if (!useKerberos) {
-  try {
-String username = 
UserGroupInformation.getCurrentUser().getShortUserName();
-sb.append("?user.name=")
-.append(username);
-  } catch (IOException e) {
-LOG.debug("Fail to resolve username: {}", e);
+
+if (HAUtil.isHAEnabled(conf)) {
+  boolean useKerberos = UserGroupInformation.isSecurityEnabled();
+  List rmServers = getRMHAWebAddresses(conf);
+  StringBuilder diagnosticsMsg = new StringBuilder();
+  for (String host : rmServers) {
+try {
+  Client client = Client.create();
+  client.setFollowRedirects(false);
+  StringBuilder sb = new StringBuilder();
+  sb.append(scheme)
+  .append(host)
+  .append(path);
+  if (!useKerberos) {
+try {
+  String username = UserGroupInformation.getCurrentUser()
+  .getShortUserName();
+  sb.append("?user.name=")
+  .append(username);
+} catch (IOException e) {
+  LOG.debug("Fail to resolve username: {}", e);
+}
   }
+  Builder builder = client
+  .resource(sb.toString()).type(MediaType.APPLICATION_JSON);
+  if (useKerberos) {
+String[] server = host.split(":");
+String challenge = YarnClientUtils.generateToken(server[0]);
+builder.header(HttpHeaders.AUTHORIZATION, "Negotiate " +
+challenge);
+LOG.debug("Authorization: Negotiate {}", challenge);
+  }
+  ClientResponse test = builder.get(ClientResponse.class);
+  if (test.getStatus() == 200) {
+return scheme + host;
+  }
+} catch (Exception 

[hadoop] branch branch-3.2 updated: YARN-9983. Fixed typo in YARN Service overview. Contributed by Denes Gerencser

2019-11-19 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 6951689  YARN-9983. Fixed typo in YARN Service overview.
Contributed by Denes Gerencser
6951689 is described below

commit 6951689f4ca450dc004e55ca2e1d3e41576c6b1f
Author: Eric Yang 
AuthorDate: Tue Nov 19 14:16:51 2019 -0500

YARN-9983. Fixed typo in YARN Service overview.
   Contributed by Denes Gerencser
---
 .../hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
index 041b0ee..dc4c3a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
@@ -24,7 +24,7 @@ lifecycle management such as stop/start/delete the service, 
flexing service comp
 The yarn-service framework primarily includes below components:
 
 * A core framework (ApplicationMaster) running on YARN to serve as a container 
orchestrator, being responsible for all service lifecycle managements.
-* A restful API-server to for users to interact with YARN to deploy/manage 
their services via a simple JSON spec.
+* A restful API-server for users to interact with YARN to deploy/manage their 
services via a simple JSON spec.
 * A DNS server backed by YARN service registry to enable discovering services 
on YARN by the standard DNS lookup.
 
 ## Why should I try YARN Service framework?


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9983. Fixed typo in YARN Service overview. Contributed by Denes Gerencser

2019-11-19 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 1669a5c  YARN-9983. Fixed typo in YARN Service overview.
Contributed by Denes Gerencser
1669a5c is described below

commit 1669a5c1cbea167cb366f5dbdc32aa05be700064
Author: Eric Yang 
AuthorDate: Tue Nov 19 14:16:51 2019 -0500

YARN-9983. Fixed typo in YARN Service overview.
   Contributed by Denes Gerencser
---
 .../hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
index 041b0ee..dc4c3a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
@@ -24,7 +24,7 @@ lifecycle management such as stop/start/delete the service, 
flexing service comp
 The yarn-service framework primarily includes below components:
 
 * A core framework (ApplicationMaster) running on YARN to serve as a container 
orchestrator, being responsible for all service lifecycle managements.
-* A restful API-server to for users to interact with YARN to deploy/manage 
their services via a simple JSON spec.
+* A restful API-server for users to interact with YARN to deploy/manage their 
services via a simple JSON spec.
 * A DNS server backed by YARN service registry to enable discovering services 
on YARN by the standard DNS lookup.
 
 ## Why should I try YARN Service framework?


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9983. Fixed typo in YARN Service overview. Contributed by Denes Gerencser

2019-11-19 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ea8ffac  YARN-9983. Fixed typo in YARN Service overview.
Contributed by Denes Gerencser
ea8ffac is described below

commit ea8ffac121f6460013094d33ded387f06ef65785
Author: Eric Yang 
AuthorDate: Tue Nov 19 14:16:51 2019 -0500

YARN-9983. Fixed typo in YARN Service overview.
   Contributed by Denes Gerencser
---
 .../hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
index 140bde4..7489d28 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
@@ -24,7 +24,7 @@ lifecycle management such as stop/start/delete the service, 
flexing service comp
 The yarn-service framework primarily includes below components:
 
 * A core framework (ApplicationMaster) running on YARN to serve as a container 
orchestrator, being responsible for all service lifecycle managements.
-* A restful API-server to for users to interact with YARN to deploy/manage 
their services via a simple JSON spec.
+* A restful API-server for users to interact with YARN to deploy/manage their 
services via a simple JSON spec.
 * A DNS server backed by YARN service registry to enable discovering services 
on YARN by the standard DNS lookup.
 
 ## Why should I try YARN Service framework?


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14730. Removed unused configuration dfs.web.authentication.filter. Contributed by Chen Zhang

2019-10-28 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 30ed24a  HDFS-14730.  Removed unused configuration 
dfs.web.authentication.filter.  Contributed by Chen Zhang
30ed24a is described below

commit 30ed24a42112b3225ab2486ed24bd6a5011a7a7f
Author: Eric Yang 
AuthorDate: Mon Oct 28 19:27:32 2019 -0400

HDFS-14730.  Removed unused configuration dfs.web.authentication.filter.
 Contributed by Chen Zhang
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  7 
 .../src/main/resources/hdfs-default.xml|  8 -
 .../org/apache/hadoop/hdfs/TestDFSConfigKeys.java  | 37 --
 3 files changed, 52 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 47a507d..16a29dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -283,13 +283,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   = "dfs.namenode.blockreport.queue.size";
   public static final intDFS_NAMENODE_BLOCKREPORT_QUEUE_SIZE_DEFAULT
   = 1024;
-  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = 
"dfs.web.authentication.filter";
-  /* Phrased as below to avoid javac inlining as a constant, to match the 
behavior when
- this was AuthFilter.class.getName(). Note that if you change the import 
for AuthFilter, you
- need to update the literal here as well as TestDFSConfigKeys.
-   */
-  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
-  "org.apache.hadoop.hdfs.web.AuthFilter";
   @Deprecated
   public static final String  DFS_WEBHDFS_USER_PATTERN_KEY =
   HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 27b3fe6..fa2f30e 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -5043,14 +5043,6 @@
 
 
 
-  dfs.web.authentication.filter
-  org.apache.hadoop.hdfs.web.AuthFilter
-  
-Authentication filter class used for WebHDFS.
-  
-
-
-
   dfs.web.authentication.simple.anonymous.allowed
   
   
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
deleted file mode 100644
index c7df891..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import org.apache.hadoop.hdfs.web.AuthFilter;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-public class TestDFSConfigKeys {
-
-  /**
-   * Make sure we keep the String literal up to date with what we'd get by 
calling
-   * class.getName.
-   */
-  @Test
-  public void testStringLiteralDefaultWebFilter() {
-Assert.assertEquals("The default webhdfs auth filter should make the FQCN 
of AuthFilter.",
-AuthFilter.class.getName(), 
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
-  }
- 
-}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16614. Add aarch64 support for dependent leveldbjni. Contributed by liusheng close #1546

2019-10-24 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ac6b6a6  HADOOP-16614.  Add aarch64 support for dependent leveldbjni.  
  Contributed by liusheng close #1546
ac6b6a6 is described below

commit ac6b6a6a85c126efbfda12dc9979706490246bbe
Author: Eric Yang 
AuthorDate: Thu Oct 24 11:45:57 2019 -0400

HADOOP-16614.  Add aarch64 support for dependent leveldbjni.
   Contributed by liusheng
close #1546
---
 hadoop-client-modules/hadoop-client-minicluster/pom.xml |  4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  2 +-
 .../hadoop-mapreduce-client-hs/pom.xml  |  2 +-
 .../hadoop-mapreduce-client-shuffle/pom.xml |  2 +-
 hadoop-mapreduce-project/pom.xml|  2 +-
 hadoop-project/pom.xml  |  2 +-
 .../hadoop-yarn-server-applicationhistoryservice/pom.xml|  2 +-
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml|  2 +-
 .../hadoop-yarn-server-nodemanager/pom.xml  |  2 +-
 .../hadoop-yarn-server-resourcemanager/pom.xml  |  2 +-
 pom.xml | 13 +
 11 files changed, 24 insertions(+), 11 deletions(-)

diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 5641b62..52595d9 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -143,7 +143,7 @@
   hadoop-yarn-common
 
 
-  org.fusesource.leveldbjni
+  ${leveldbjni.group}
   leveldbjni-all
 
 
@@ -484,7 +484,7 @@
   hadoop-yarn-server-common
 
 
-  org.fusesource.leveldbjni
+  ${leveldbjni.group}
   leveldbjni-all
 
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index e88f0f0..700a5ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -190,7 +190,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   test
 
 
-  org.fusesource.leveldbjni
+  ${leveldbjni.group}
   leveldbjni-all
 
 
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
index eea9545..4b1eedb 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
@@ -67,7 +67,7 @@
   test
 
 
-  org.fusesource.leveldbjni
+  ${leveldbjni.group}
   leveldbjni-all
 
 
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
index 090da57..915ebe2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
@@ -52,7 +52,7 @@
   hadoop-mapreduce-client-common
 
 
-  org.fusesource.leveldbjni
+  ${leveldbjni.group}
   leveldbjni-all
 
   
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index 85ef642..67b3d95 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -144,7 +144,7 @@
   compile
 
 
-  org.fusesource.leveldbjni
+  ${leveldbjni.group}
   leveldbjni-all
 
 
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 14f13cd..5ba415a 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1331,7 +1331,7 @@
   
 
   
-org.fusesource.leveldbjni
+${leveldbjni.group}
 leveldbjni-all
 1.8
   
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
index 14be03c..5d4467d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
@@ -156,7 +156,7 @@
 
 
 
-  org.fusesource.leveldbjni
+  ${leveldbjni.group}
   leveldbjni-all
 
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-y

[hadoop] branch trunk updated: YARN-9922. Fixed YARN javadoc errors from YARN-9699. Contributed by Peter Bacsko

2019-10-21 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3f7756d  YARN-9922. Fixed YARN javadoc errors from YARN-9699.  
  Contributed by Peter Bacsko
3f7756d is described below

commit 3f7756dc6cd541918eec2b221891864d29b174d3
Author: Eric Yang 
AuthorDate: Mon Oct 21 16:14:17 2019 -0400

YARN-9922. Fixed YARN javadoc errors from YARN-9699.
   Contributed by Peter Bacsko
---
 .../resourcemanager/scheduler/fair/FairSchedulerConfiguration.java| 2 +-
 .../scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java   | 2 +-
 .../scheduler/fair/converter/FSConfigToCSConfigConverterParams.java   | 2 +-
 .../scheduler/fair/converter/FSConfigToCSConfigRuleHandler.java   | 2 +-
 .../scheduler/fair/converter/PreconditionException.java   | 2 +-
 .../scheduler/fair/converter/UnsupportedPropertyException.java| 2 +-
 .../server/resourcemanager/scheduler/fair/converter/package-info.java | 4 ++--
 7 files changed, 8 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index f83a492..45e1395 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -86,7 +86,7 @@ public class FairSchedulerConfiguration extends Configuration 
{
   private static final String CONF_PREFIX =  "yarn.scheduler.fair.";
 
   /**
-   * Used during FS->CS conversion. When enabled, background threads are
+   * Used during FS-CS conversion. When enabled, background threads are
* not started. This property should NOT be used by end-users!
*/
   public static final String MIGRATION_MODE = CONF_PREFIX + "migration.mode";
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java
index 89ce3cb..1641249 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java
@@ -27,7 +27,7 @@ import org.slf4j.LoggerFactory;
 import java.io.File;
 
 /**
- * Parses arguments passed to the FS->CS converter.
+ * Parses arguments passed to the FS-CS converter.
  * If the arguments are valid, it calls the converter itself.
  *
  */
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverterParams.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverterParams.java
index b083b25..ca2eb7e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverterParams.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverterParams.java
@@ -17,7 +17,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.converter;
 
 /**
- * POJO that holds values for the FS->CS converter.
+ * POJO that holds values for the FS-CS converter.
  *
  */
 public final class FSConfigToCSConfigConverterParams {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/r

[hadoop] branch trunk updated: YARN-9875. Improve fair scheduler configuration store on HDFS. Contributed by Prabhu Joseph

2019-10-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 155864d  YARN-9875. Improve fair scheduler configuration store on 
HDFS.Contributed by Prabhu Joseph
155864d is described below

commit 155864da006346a500ff35c2f6b69281093195b1
Author: Eric Yang 
AuthorDate: Fri Oct 18 20:30:11 2019 -0400

YARN-9875. Improve fair scheduler configuration store on HDFS.
   Contributed by Prabhu Joseph
---
 .../conf/FSSchedulerConfigurationStore.java| 12 -
 .../conf/TestFSSchedulerConfigurationStore.java| 51 ++
 2 files changed, 62 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java
index 464ef14..855939e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java
@@ -66,7 +66,7 @@ public class FSSchedulerConfigurationStore extends 
YarnConfigurationStore {
   private Path configVersionFile;
 
   @Override
-  public void initialize(Configuration conf, Configuration vSchedConf,
+  public void initialize(Configuration fsConf, Configuration vSchedConf,
   RMContext rmContext) throws Exception {
 this.configFilePathFilter = new PathFilter() {
   @Override
@@ -80,6 +80,7 @@ public class FSSchedulerConfigurationStore extends 
YarnConfigurationStore {
   }
 };
 
+Configuration conf = new Configuration(fsConf);
 String schedulerConfPathStr = conf.get(
 YarnConfiguration.SCHEDULER_CONFIGURATION_FS_PATH);
 if (schedulerConfPathStr == null || schedulerConfPathStr.isEmpty()) {
@@ -88,6 +89,15 @@ public class FSSchedulerConfigurationStore extends 
YarnConfigurationStore {
   + " must be set");
 }
 this.schedulerConfDir = new Path(schedulerConfPathStr);
+String scheme = schedulerConfDir.toUri().getScheme();
+if (scheme == null) {
+  scheme = FileSystem.getDefaultUri(conf).getScheme();
+}
+if (scheme != null) {
+  String disableCacheName = String.format("fs.%s.impl.disable.cache",
+  scheme);
+  conf.setBoolean(disableCacheName, true);
+}
 this.fileSystem = this.schedulerConfDir.getFileSystem(conf);
 this.maxVersion = conf.getInt(
 YarnConfiguration.SCHEDULER_CONFIGURATION_FS_MAX_VERSION,
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java
index 33596c3..7968372 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
 import org.junit.After;
@@ -138,6 +140,55 @@ public class TestFSSchedulerConfigurationStore {
   }
 
   @Test
+  public void testFileSystemClose() throws Exception {
+MiniDFSCluster hdfsCluster = null;
+FileSystem fs = null;
+Path path = new Path("/tmp/confstore");
+try {
+  HdfsConfiguration hdfsConfig = new HdfsConfiguration();
+  hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig)
+  .numDataNodes(1).build();
+
+  

[hadoop] branch trunk updated: YARN-9884. Make container-executor mount logic modular Contributed by Eric Badger

2019-10-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 72b1bed  YARN-9884. Make container-executor mount logic modular
Contributed by Eric Badger
72b1bed is described below

commit 72b1bed9982a3b775fa60ec2ea818d1fbed683a2
Author: Eric Yang 
AuthorDate: Fri Oct 18 19:09:20 2019 -0400

YARN-9884. Make container-executor mount logic modular
   Contributed by Eric Badger
---
 .../src/CMakeLists.txt |   1 +
 .../container-executor/impl/container-executor.c   |   4 +-
 .../src/main/native/container-executor/impl/main.c |   5 +
 .../src/main/native/container-executor/impl/util.c | 137 +
 .../src/main/native/container-executor/impl/util.h |  46 +-
 .../container-executor/impl/utils/docker-util.c| 663 +
 .../container-executor/impl/utils/docker-util.h|  31 -
 .../container-executor/impl/utils/mount-utils.c| 356 +++
 .../container-executor/impl/utils/mount-utils.h|  40 ++
 .../test/utils/test_docker_util.cc | 101 ++--
 10 files changed, 929 insertions(+), 455 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index bb7fd06..7e8b19f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -137,6 +137,7 @@ add_library(container
 main/native/container-executor/impl/modules/fpga/fpga-module.c
 main/native/container-executor/impl/modules/devices/devices-module.c
 main/native/container-executor/impl/utils/docker-util.c
+main/native/container-executor/impl/utils/mount-utils.c
 )
 
 add_executable(container-executor
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 318356d..72e0cf1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1367,7 +1367,7 @@ char **construct_docker_command(const char *command_file) 
{
   ret = get_docker_command(command_file, , );
   if (ret != 0) {
 fprintf(ERRORFILE, "Error constructing docker command, docker error 
code=%d, error message='%s'\n", ret,
-get_docker_error_message(ret));
+get_error_message(ret));
 exit(DOCKER_RUN_FAILED);
   }
 
@@ -1414,7 +1414,7 @@ int exec_container(const char *command_file) {
   if (ret != 0) {
 free_configuration(_config);
 free(docker_binary);
-return INVALID_COMMAND_FILE;
+return INVALID_DOCKER_COMMAND_FILE;
   }
 
   char *value = get_configuration_value("docker-command", 
DOCKER_COMMAND_FILE_SECTION, _config);
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index f890bd7..ce3e21e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -788,6 +788,11 @@ int main(int argc, char **argv) {
 break;
   }
 
+  if (exit_code) {
+fprintf(ERRORFILE, "Nonzero exit code=%d, error message='%s'\n", exit_code,
+get_error_message(exit_code));
+  }
+
   flush_and_close_log_files();
   return exit_code;
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c
index 1165428..c0b73d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c
@@ -195,3 +195,140 @@ void quote_and_append_arg(char **str, size_t *size, const 
cha

[hadoop] branch trunk updated: YARN-9860. Enable service mode for Docker containers on YARN Contributed by Prabhu Joseph and Shane Kumpf

2019-10-10 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 31e0122  YARN-9860. Enable service mode for Docker containers on YARN  
  Contributed by Prabhu Joseph and Shane Kumpf
31e0122 is described below

commit 31e0122f4d4ddc4026470b45d2bf683ece137d44
Author: Eric Yang 
AuthorDate: Thu Oct 10 19:02:02 2019 -0400

YARN-9860. Enable service mode for Docker containers on YARN
   Contributed by Prabhu Joseph and Shane Kumpf
---
 .../yarn/service/api/records/ConfigFile.java   |  28 -
 .../hadoop/yarn/service/client/ServiceClient.java  |  20 +++-
 .../yarn/service/conf/YarnServiceConstants.java|   2 +
 .../yarn/service/provider/ProviderUtils.java   |  41 ++-
 .../provider/tarball/TarballProviderService.java   |   4 +-
 .../hadoop/yarn/service/utils/CoreFileSystem.java  |  17 ++-
 .../yarn/service/utils/SliderFileSystem.java   |  34 ++
 .../yarn/service/provider/TestProviderUtils.java   | 119 +++--
 .../linux/runtime/DockerLinuxContainerRuntime.java |  39 +--
 .../linux/runtime/docker/DockerRunCommand.java |   6 ++
 .../container-executor/impl/container-executor.h   |   6 --
 .../container-executor/impl/utils/docker-util.c|  59 +++---
 .../container-executor/impl/utils/docker-util.h|   4 +-
 .../src/site/markdown/DockerContainers.md  |  23 
 14 files changed, 305 insertions(+), 97 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
index c09373f..060e204 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
@@ -24,6 +24,7 @@ import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlEnum;
@@ -73,6 +74,7 @@ public class ConfigFile implements Serializable {
   private TypeEnum type = null;
   private String destFile = null;
   private String srcFile = null;
+  private LocalResourceVisibility visibility = null;
   private Map properties = new HashMap<>();
 
   public ConfigFile copy() {
@@ -80,6 +82,7 @@ public class ConfigFile implements Serializable {
 copy.setType(this.getType());
 copy.setSrcFile(this.getSrcFile());
 copy.setDestFile(this.getDestFile());
+copy.setVisibility(this.visibility);
 if (this.getProperties() != null && !this.getProperties().isEmpty()) {
   copy.getProperties().putAll(this.getProperties());
 }
@@ -150,6 +153,26 @@ public class ConfigFile implements Serializable {
 this.srcFile = srcFile;
   }
 
+
+  /**
+   * Visibility of the Config file.
+   **/
+  public ConfigFile visibility(LocalResourceVisibility localrsrcVisibility) {
+this.visibility = localrsrcVisibility;
+return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Visibility of the Config file")
+  @JsonProperty("visibility")
+  public LocalResourceVisibility getVisibility() {
+return visibility;
+  }
+
+  @XmlElement(name = "visibility", defaultValue="APPLICATION")
+  public void setVisibility(LocalResourceVisibility localrsrcVisibility) {
+this.visibility = localrsrcVisibility;
+  }
+
   /**
A blob of key value pairs that will be dumped in the dest_file in the format
as specified in type. If src_file is specified, src_file content are dumped
@@ -200,12 +223,13 @@ public class ConfigFile implements Serializable {
 return Objects.equals(this.type, configFile.type)
 && Objects.equals(this.destFile, configFile.destFile)
 && Objects.equals(this.srcFile, configFile.srcFile)
+&& Objects.equals(this.visibility, configFile.visibility)
 && Objects.equals(this.properties, configFile.properties);
   }
 
   @Override
   public int hashCode() {
-return Objects.hash(type, destFile, srcFile, properties);
+return Objects.hash(type, destFile, srcFile, visibility, properties);
   }
 
   @Override
@@ -217,6 +241,

[hadoop] branch branch-3.2 updated: HDFS-14890. Fixed namenode and journalnode startup on Windows. Contributed by Siddharth Wagle

2019-10-04 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 8bb2b00  HDFS-14890.  Fixed namenode and journalnode startup on 
Windows.  Contributed by Siddharth Wagle
8bb2b00 is described below

commit 8bb2b00d38978859b22b892034eb3f559b820942
Author: Eric Yang 
AuthorDate: Fri Oct 4 13:13:10 2019 -0400

HDFS-14890.  Fixed namenode and journalnode startup on Windows.
 Contributed by Siddharth Wagle

(cherry picked from commit aa24add8f0e9812d1f787efb3c40155b0fdeed9c)
---
 .../java/org/apache/hadoop/hdfs/server/common/Storage.java| 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 2ba943a..e7da44e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -447,9 +447,14 @@ public abstract class Storage extends StorageInfo {
 throw new IOException("Cannot create directory " + curDir);
   }
   if (permission != null) {
-Set permissions =
-PosixFilePermissions.fromString(permission.toString());
-Files.setPosixFilePermissions(curDir.toPath(), permissions);
+try {
+  Set permissions =
+  PosixFilePermissions.fromString(permission.toString());
+  Files.setPosixFilePermissions(curDir.toPath(), permissions);
+} catch (UnsupportedOperationException uoe) {
+  // Default to FileUtil for non posix file systems
+  FileUtil.setPermission(curDir, permission);
+}
   }
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14890. Fixed namenode and journalnode startup on Windows. Contributed by Siddharth Wagle

2019-10-04 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new aa24add  HDFS-14890.  Fixed namenode and journalnode startup on 
Windows.  Contributed by Siddharth Wagle
aa24add is described below

commit aa24add8f0e9812d1f787efb3c40155b0fdeed9c
Author: Eric Yang 
AuthorDate: Fri Oct 4 13:13:10 2019 -0400

HDFS-14890.  Fixed namenode and journalnode startup on Windows.
 Contributed by Siddharth Wagle
---
 .../java/org/apache/hadoop/hdfs/server/common/Storage.java| 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 2ba943a..e7da44e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -447,9 +447,14 @@ public abstract class Storage extends StorageInfo {
 throw new IOException("Cannot create directory " + curDir);
   }
   if (permission != null) {
-Set permissions =
-PosixFilePermissions.fromString(permission.toString());
-Files.setPosixFilePermissions(curDir.toPath(), permissions);
+try {
+  Set permissions =
+  PosixFilePermissions.fromString(permission.toString());
+  Files.setPosixFilePermissions(curDir.toPath(), permissions);
+} catch (UnsupportedOperationException uoe) {
+  // Default to FileUtil for non posix file systems
+  FileUtil.setPermission(curDir, permission);
+}
   }
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9862. Increase yarn-services-core test timeout value. Contributed by Prabhu Joseph

2019-10-01 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d1ddba6  YARN-9862. Increase yarn-services-core test timeout value.
Contributed by Prabhu Joseph
d1ddba6 is described below

commit d1ddba60e5ddee537b24f639a02e1a736314031e
Author: Eric Yang 
AuthorDate: Tue Oct 1 21:15:26 2019 -0400

YARN-9862. Increase yarn-services-core test timeout value.
   Contributed by Prabhu Joseph
---
 .../hadoop-yarn-services/hadoop-yarn-services-core/pom.xml   | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
index 24c2167..8147366 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
@@ -68,6 +68,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  1800
   
 ${java.home}
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9837. Fixed reading YARN Service JSON spec file larger than 128k. Contributed by Tarun Parimi

2019-09-17 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 345ef04  YARN-9837. Fixed reading YARN Service JSON spec file larger 
than 128k.Contributed by Tarun Parimi
345ef04 is described below

commit 345ef049dfa96986af1c3c0c6db30d6e25519728
Author: Eric Yang 
AuthorDate: Tue Sep 17 13:13:35 2019 -0400

YARN-9837. Fixed reading YARN Service JSON spec file larger than 128k.
   Contributed by Tarun Parimi

(cherry picked from commit eefe9bc85ccdabc2b7303969934dbce98f2b31b5)
---
 .../org/apache/hadoop/yarn/service/utils/JsonSerDeser.java | 14 ++
 1 file changed, 2 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
index 2c27ea7..dbc152d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.service.utils;
 
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
@@ -34,7 +33,6 @@ import org.codehaus.jackson.map.SerializationConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.EOFException;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
@@ -177,17 +175,9 @@ public class JsonSerDeser {
* @throws JsonParseException parse problems
* @throws JsonMappingException O/J mapping problems
*/
-  public T load(FileSystem fs, Path path)
-throws IOException, JsonParseException, JsonMappingException {
-FileStatus status = fs.getFileStatus(path);
-long len = status.getLen();
-byte[] b = new byte[(int) len];
+  public T load(FileSystem fs, Path path) throws IOException {
 FSDataInputStream dataInputStream = fs.open(path);
-int count = dataInputStream.read(b);
-if (count != len) {
-  throw new EOFException("Read of " + path +" finished prematurely");
-}
-return fromBytes(b);
+return fromStream(dataInputStream);
   }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9837. Fixed reading YARN Service JSON spec file larger than 128k. Contributed by Tarun Parimi

2019-09-17 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new dae22c9  YARN-9837. Fixed reading YARN Service JSON spec file larger 
than 128k.Contributed by Tarun Parimi
dae22c9 is described below

commit dae22c962df21f3371b9ad5ed377bc422c833c0e
Author: Eric Yang 
AuthorDate: Tue Sep 17 13:13:35 2019 -0400

YARN-9837. Fixed reading YARN Service JSON spec file larger than 128k.
   Contributed by Tarun Parimi
---
 .../org/apache/hadoop/yarn/service/utils/JsonSerDeser.java | 14 ++
 1 file changed, 2 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
index 2c27ea7..dbc152d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.service.utils;
 
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
@@ -34,7 +33,6 @@ import org.codehaus.jackson.map.SerializationConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.EOFException;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
@@ -177,17 +175,9 @@ public class JsonSerDeser {
* @throws JsonParseException parse problems
* @throws JsonMappingException O/J mapping problems
*/
-  public T load(FileSystem fs, Path path)
-throws IOException, JsonParseException, JsonMappingException {
-FileStatus status = fs.getFileStatus(path);
-long len = status.getLen();
-byte[] b = new byte[(int) len];
+  public T load(FileSystem fs, Path path) throws IOException {
 FSDataInputStream dataInputStream = fs.open(path);
-int count = dataInputStream.read(b);
-if (count != len) {
-  throw new EOFException("Read of " + path +" finished prematurely");
-}
-return fromBytes(b);
+return fromStream(dataInputStream);
   }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9837. Fixed reading YARN Service JSON spec file larger than 128k. Contributed by Tarun Parimi

2019-09-17 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new eefe9bc  YARN-9837. Fixed reading YARN Service JSON spec file larger 
than 128k.Contributed by Tarun Parimi
eefe9bc is described below

commit eefe9bc85ccdabc2b7303969934dbce98f2b31b5
Author: Eric Yang 
AuthorDate: Tue Sep 17 13:13:35 2019 -0400

YARN-9837. Fixed reading YARN Service JSON spec file larger than 128k.
   Contributed by Tarun Parimi
---
 .../org/apache/hadoop/yarn/service/utils/JsonSerDeser.java | 14 ++
 1 file changed, 2 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
index 00b8e0c..254d6c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
@@ -27,14 +27,12 @@ import 
com.fasterxml.jackson.databind.PropertyNamingStrategy;
 import com.fasterxml.jackson.databind.SerializationFeature;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.EOFException;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
@@ -176,17 +174,9 @@ public class JsonSerDeser {
* @throws JsonParseException parse problems
* @throws JsonMappingException O/J mapping problems
*/
-  public T load(FileSystem fs, Path path)
-throws IOException, JsonParseException, JsonMappingException {
-FileStatus status = fs.getFileStatus(path);
-long len = status.getLen();
-byte[] b = new byte[(int) len];
+  public T load(FileSystem fs, Path path) throws IOException {
 FSDataInputStream dataInputStream = fs.open(path);
-int count = dataInputStream.read(b);
-if (count != len) {
-  throw new EOFException("Read of " + path +" finished prematurely");
-}
-return fromBytes(b);
+return fromStream(dataInputStream);
   }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9728. Bugfix for escaping illegal xml characters for Resource Manager REST API. Contributed by Prabhu Joseph

2019-09-10 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 10144a5  YARN-9728. Bugfix for escaping illegal xml characters for 
Resource Manager REST API.Contributed by Prabhu Joseph
10144a5 is described below

commit 10144a580e4647eb7d3d59d043608ffbf8cf090d
Author: Eric Yang 
AuthorDate: Tue Sep 10 17:04:39 2019 -0400

YARN-9728. Bugfix for escaping illegal xml characters for Resource Manager 
REST API.
   Contributed by Prabhu Joseph
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java |  4 ++
 .../src/main/resources/yarn-default.xml|  9 +++
 .../resourcemanager/webapp/RMWebServices.java  | 61 +++-
 .../server/resourcemanager/webapp/dao/AppInfo.java |  4 ++
 .../resourcemanager/webapp/TestRMWebServices.java  | 82 ++
 5 files changed, 158 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f140d6f..9c62827 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3960,6 +3960,10 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER =
   false;
 
+  public static final String FILTER_INVALID_XML_CHARS =
+  "yarn.webapp.filter-invalid-xml-chars";
+  public static final boolean DEFAULT_FILTER_INVALID_XML_CHARS = false;
+
   // RM and NM CSRF props
   public static final String REST_CSRF = "webapp.rest-csrf.";
   public static final String RM_CSRF_PREFIX = RM_PREFIX + REST_CSRF;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index eb6bf14..b856536 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3794,6 +3794,15 @@
   
 
   
+yarn.webapp.filter-invalid-xml-chars
+false
+
+  Flag to enable filter of invalid xml 1.0 characters present in the
+  value of diagnostics field of apps output from RM WebService.
+
+  
+
+  
 
   The type of configuration store to use for scheduler configurations.
   Default is "file", which uses file based capacity-scheduler.xml to
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index aa29ee6..6a413d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -242,6 +242,7 @@ public class RMWebServices extends WebServices implements 
RMWebServiceProtocol {
   @VisibleForTesting
   boolean isCentralizedNodeLabelConfiguration = true;
   private boolean filterAppsByUser = false;
+  private boolean filterInvalidXMLChars = false;
 
   public final static String DELEGATION_TOKEN_HEADER =
   "Hadoop-YARN-RM-Delegation-Token";
@@ -257,6 +258,9 @@ public class RMWebServices extends WebServices implements 
RMWebServiceProtocol {
 this.filterAppsByUser  = conf.getBoolean(
 YarnConfiguration.FILTER_ENTITY_LIST_BY_USER,
 YarnConfiguration.DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER);
+this.filterInvalidXMLChars = conf.getBoolean(
+YarnConfiguration.FILTER_INVALID_XML_CHARS,
+YarnConfiguration.DEFAULT_FILTER_INVALID_XML_CHARS);
   }
 
   RMWebServices(ResourceManager rm, Configuration conf,
@@ -551,6 +555,38 @@ public class RMWebServices extends WebServices implements 
RMWebServiceProtocol {
 return ni;
   }
 
+  /**
+   * This method ensures that the output String has only
+   * valid XML unicode characters as specified by the
+   * XML 1.0 standard. For reference, please see
+   * http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char;>
+   * the standard.
+   *
+   * @param str The String whose invalid xml characters we w

[hadoop] branch trunk updated: YARN-9755. Fixed RM failing to start when FileSystemBasedConfigurationProvider is configured. Contributed by Prabhu Joseph

2019-08-27 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 717c853  YARN-9755. Fixed RM failing to start when 
FileSystemBasedConfigurationProvider is configured.Contributed by 
Prabhu Joseph
717c853 is described below

commit 717c853873dd3b9112f5c15059a24655b8654607
Author: Eric Yang 
AuthorDate: Tue Aug 27 13:14:59 2019 -0400

YARN-9755. Fixed RM failing to start when 
FileSystemBasedConfigurationProvider is configured.
   Contributed by Prabhu Joseph
---
 .../yarn/FileSystemBasedConfigurationProvider.java | 14 ++--
 .../server/resourcemanager/ResourceManager.java| 32 +--
 .../server/resourcemanager/TestRMAdminService.java | 37 ++
 3 files changed, 65 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
index b6ba660..3532d13 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
@@ -71,10 +71,20 @@ public class FileSystemBasedConfigurationProvider
   @Override
   public synchronized void initInternal(Configuration bootstrapConf)
   throws Exception {
+Configuration conf = new Configuration(bootstrapConf);
 configDir =
-new Path(bootstrapConf.get(YarnConfiguration.FS_BASED_RM_CONF_STORE,
+new Path(conf.get(YarnConfiguration.FS_BASED_RM_CONF_STORE,
 YarnConfiguration.DEFAULT_FS_BASED_RM_CONF_STORE));
-fs = configDir.getFileSystem(bootstrapConf);
+String scheme = configDir.toUri().getScheme();
+if (scheme == null) {
+  scheme = FileSystem.getDefaultUri(conf).getScheme();
+}
+if (scheme != null) {
+  String disableCacheName = String.format("fs.%s.impl.disable.cache",
+  scheme);
+  conf.setBoolean(disableCacheName, true);
+}
+fs = configDir.getFileSystem(conf);
 fs.mkdirs(configDir);
   }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index c0a9133..57be468 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -267,6 +267,22 @@ public class ResourceManager extends CompositeService
 this.rmContext = new RMContextImpl();
 rmContext.setResourceManager(this);
 
+// Set HA configuration should be done before login
+this.rmContext.setHAEnabled(HAUtil.isHAEnabled(this.conf));
+if (this.rmContext.isHAEnabled()) {
+  HAUtil.verifyAndSetConfiguration(this.conf);
+}
+
+// Set UGI and do login
+// If security is enabled, use login user
+// If security is not enabled, use current user
+this.rmLoginUGI = UserGroupInformation.getCurrentUser();
+try {
+  doSecureLogin();
+} catch(IOException ie) {
+  throw new YarnRuntimeException("Failed to login", ie);
+}
+
 this.configurationProvider =
 ConfigurationProviderFactory.getConfigurationProvider(conf);
 this.configurationProvider.init(this.conf);
@@ -285,22 +301,6 @@ public class ResourceManager extends CompositeService
 loadConfigurationXml(YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
 
 validateConfigs(this.conf);
-
-// Set HA configuration should be done before login
-this.rmContext.setHAEnabled(HAUtil.isHAEnabled(this.conf));
-if (this.rmContext.isHAEnabled()) {
-  HAUtil.verifyAndSetConfiguration(this.conf);
-}
-
-// Set UGI and do login
-// If security is enabled, use login user
-// If security is not enabled, use current user
-this.rmLoginUGI = UserGroupInformation.getCurrentUser();
-try {
-  doSecureLogin();
-} catch(IOException ie) {
-  throw new YarnRuntimeException("Failed to login", ie);
-}
 
 // register the handlers for all AlwaysOn services using setupDispatcher().
 rmDispatcher = setupDispatcher();
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-

[hadoop] branch trunk updated: YARN-9719. Fixed YARN service restart bug when application ID no longer exist in RM. Contributed by kyungwan nam

2019-08-12 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 201dc66  YARN-9719. Fixed YARN service restart bug when application ID 
no longer exist in RM.Contributed by kyungwan nam
201dc66 is described below

commit 201dc667e9e27de601b2c30956e7c9f9f285281a
Author: Eric Yang 
AuthorDate: Mon Aug 12 18:24:00 2019 -0400

YARN-9719. Fixed YARN service restart bug when application ID no longer 
exist in RM.
   Contributed by kyungwan nam
---
 .../hadoop/yarn/service/client/ServiceClient.java  | 13 +-
 .../hadoop/yarn/service/ServiceTestUtils.java  |  2 ++
 .../yarn/service/TestYarnNativeServices.java   | 29 ++
 3 files changed, 43 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 49a1053..b7fec77 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -57,6 +57,7 @@ import 
org.apache.hadoop.yarn.client.api.YarnClientApplication;
 import org.apache.hadoop.yarn.client.cli.ApplicationCLI;
 import org.apache.hadoop.yarn.client.util.YarnClientUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto;
@@ -1558,7 +1559,17 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
   return appSpec;
 }
 appSpec.setId(currentAppId.toString());
-ApplicationReport appReport = 
yarnClient.getApplicationReport(currentAppId);
+ApplicationReport appReport = null;
+try {
+  appReport = yarnClient.getApplicationReport(currentAppId);
+} catch (ApplicationNotFoundException e) {
+  LOG.info("application ID {} doesn't exist", currentAppId);
+  return appSpec;
+}
+if (appReport == null) {
+  LOG.warn("application ID {} is reported as null", currentAppId);
+  return appSpec;
+}
 appSpec.setState(convertState(appReport.getYarnApplicationState()));
 ApplicationTimeout lifetime =
 
appReport.getApplicationTimeouts().get(ApplicationTimeoutType.LIFETIME);
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
index fd2b00a..e5c3527 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
@@ -218,6 +218,8 @@ public class ServiceTestUtils {
   setConf(new YarnConfiguration());
   conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, false);
   conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, false);
+  conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,
+  YarnConfiguration.DEFAULT_RM_MAX_COMPLETED_APPLICATIONS);
 }
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
 // reduce the teardown waiting time
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index 0512e58..b33972e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-se

[hadoop] branch trunk updated: YARN-9527. Prevent rogue Localizer Runner from downloading same file repeatly. Contributed by Jim Brennan

2019-08-09 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6ff0453  YARN-9527.  Prevent rogue Localizer Runner from downloading 
same file repeatly. Contributed by Jim Brennan
6ff0453 is described below

commit 6ff0453edeeb0ed7bc9a7d3fb6dfa7048104238b
Author: Eric Yang 
AuthorDate: Fri Aug 9 14:12:17 2019 -0400

YARN-9527.  Prevent rogue Localizer Runner from downloading same file 
repeatly.
Contributed by Jim Brennan
---
 .../localizer/ResourceLocalizationService.java | 144 +++--
 .../localizer/TestResourceLocalizationService.java | 232 -
 2 files changed, 316 insertions(+), 60 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 0494c2d..3e4af2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -141,6 +141,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.Re
 import 
org.apache.hadoop.yarn.server.nodemanager.security.authorize.NMPolicyProvider;
 import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerBuilderUtils;
 import org.apache.hadoop.yarn.util.FSDownload;
+import org.apache.hadoop.yarn.util.LRUCacheHashMap;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.cache.CacheBuilder;
@@ -722,6 +723,8 @@ public class ResourceLocalizationService extends 
CompositeService
 
 private final PublicLocalizer publicLocalizer;
 private final Map privLocalizers;
+private final Map recentlyCleanedLocalizers;
+private final int maxRecentlyCleaned = 128;
 
 LocalizerTracker(Configuration conf) {
   this(conf, new HashMap());
@@ -732,6 +735,8 @@ public class ResourceLocalizationService extends 
CompositeService
   super(LocalizerTracker.class.getName());
   this.publicLocalizer = new PublicLocalizer(conf);
   this.privLocalizers = privLocalizers;
+  this.recentlyCleanedLocalizers =
+  new LRUCacheHashMap(maxRecentlyCleaned, false);
 }
 
 @Override
@@ -783,14 +788,24 @@ public class ResourceLocalizationService extends 
CompositeService
   synchronized (privLocalizers) {
 LocalizerRunner localizer = privLocalizers.get(locId);
 if (localizer != null && localizer.killContainerLocalizer.get()) {
-  // Old localizer thread has been stopped, remove it and creates
+  // Old localizer thread has been stopped, remove it and create
   // a new localizer thread.
   LOG.info("New " + event.getType() + " localize request for "
   + locId + ", remove old private localizer.");
-  cleanupPrivLocalizers(locId);
+  privLocalizers.remove(locId);
+  localizer.interrupt();
   localizer = null;
 }
 if (null == localizer) {
+  // Don't create a new localizer if this one has been recently
+  // cleaned up - this can happen if localization requests come
+  // in after cleanupPrivLocalizers has been called.
+  if (recentlyCleanedLocalizers.containsKey(locId)) {
+LOG.info(
+"Skipping localization request for recently cleaned " +
+"localizer " + locId + " resource:" + req.getResource());
+break;
+  }
   LOG.info("Created localizer for " + locId);
   localizer = new LocalizerRunner(req.getContext(), locId);
   privLocalizers.put(locId, localizer);
@@ -808,6 +823,7 @@ public class ResourceLocalizationService extends 
CompositeService
 public void cleanupPrivLocalizers(String locId) {
   synchronized (privLocalizers) {
 LocalizerRunner localizer = privLocalizers.get(locId);
+recentlyCleanedLocalizers.put(locId, locId);
 if (null == localizer) {
   return; // ignore; already gone
 }
@@ -1047,44 +1063,74 @@ public class ResourceLocalizationService extends 
CompositeService
  

[hadoop] branch trunk updated: HADOOP-16457. Fixed Kerberos activation in ServiceAuthorizationManager. Contributed by Prabhu Joseph

2019-08-06 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 22430c1  HADOOP-16457. Fixed Kerberos activation in 
ServiceAuthorizationManager.   Contributed by Prabhu Joseph
22430c1 is described below

commit 22430c10e2c41d7b5e4f0457eedaf5395b2b3c84
Author: Eric Yang 
AuthorDate: Tue Aug 6 17:04:17 2019 -0400

HADOOP-16457. Fixed Kerberos activation in ServiceAuthorizationManager.
  Contributed by Prabhu Joseph
---
 .../authorize/ServiceAuthorizationManager.java | 32 ++---
 .../authorize/TestServiceAuthorization.java| 52 ++
 2 files changed, 69 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
index 4c47348..a264eb4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
@@ -97,21 +97,23 @@ public class ServiceAuthorizationManager {
   throw new AuthorizationException("Protocol " + protocol + 
" is not known.");
 }
-
-// get client principal key to verify (if available)
-KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf);
-String clientPrincipal = null; 
-if (krbInfo != null) {
-  String clientKey = krbInfo.clientPrincipal();
-  if (clientKey != null && !clientKey.isEmpty()) {
-try {
-  clientPrincipal = SecurityUtil.getServerPrincipal(
-  conf.get(clientKey), addr);
-} catch (IOException e) {
-  throw (AuthorizationException) new AuthorizationException(
-  "Can't figure out Kerberos principal name for connection from "
-  + addr + " for user=" + user + " protocol=" + protocol)
-  .initCause(e);
+
+String clientPrincipal = null;
+if (UserGroupInformation.isSecurityEnabled()) {
+  // get client principal key to verify (if available)
+  KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf);
+  if (krbInfo != null) {
+String clientKey = krbInfo.clientPrincipal();
+if (clientKey != null && !clientKey.isEmpty()) {
+  try {
+clientPrincipal = SecurityUtil.getServerPrincipal(
+conf.get(clientKey), addr);
+  } catch (IOException e) {
+throw (AuthorizationException) new AuthorizationException(
+"Can't figure out Kerberos principal name for connection from "
++ addr + " for user=" + user + " protocol=" + protocol)
+.initCause(e);
+  }
 }
   }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestServiceAuthorization.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestServiceAuthorization.java
index c473c50..d02fe60 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestServiceAuthorization.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestServiceAuthorization.java
@@ -20,13 +20,18 @@ package org.apache.hadoop.security.authorize;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
+import java.lang.annotation.Annotation;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.TokenInfo;
 import org.junit.Test;
 
 public class TestServiceAuthorization {
@@ -52,6 +57,53 @@ public class TestServiceAuthorization {
 }
   }
 
+  private static class CustomSecurityInfo extends SecurityInfo {
+@Override
+public KerberosInfo getKerberosInfo(Class protocol,
+Configuration conf) {
+  return new KerberosInfo() {
+@Override
+public Class annotationType() {
+  return null;
+}
+@Override
+public String serverPrincipal() {
+  return null;
+}
+@Overri

[hadoop] branch trunk updated: YARN-9667. Use setbuf with line buffer to reduce fflush complexity in container-executor. Contributed by Peter Bacsko

2019-08-05 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d6697da  YARN-9667.  Use setbuf with line buffer to reduce fflush 
complexity in container-executor. Contributed by Peter Bacsko
d6697da is described below

commit d6697da5e854355ac3718a85006b73315d0702aa
Author: Eric Yang 
AuthorDate: Mon Aug 5 13:59:12 2019 -0400

YARN-9667.  Use setbuf with line buffer to reduce fflush complexity in 
container-executor.
Contributed by Peter Bacsko
---
 .../container-executor/impl/container-executor.c   | 225 +++--
 .../src/main/native/container-executor/impl/main.c |  25 ++-
 .../impl/modules/devices/devices-module.c  |   1 -
 .../src/main/native/container-executor/impl/util.c |   3 +-
 .../src/main/native/container-executor/impl/util.h |   3 +-
 .../container-executor/impl/utils/docker-util.c|   1 -
 .../test/test-container-executor.c |  12 ++
 7 files changed, 57 insertions(+), 213 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 1e117b6..69dee35 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -143,7 +143,6 @@ int check_executor_permissions(char *executable_file) {
 fprintf(ERRORFILE,
 "Error resolving the canonical name for the executable : %s!",
 strerror(errno));
-fflush(ERRORFILE);
 return -1;
   }
 
@@ -152,7 +151,6 @@ int check_executor_permissions(char *executable_file) {
   if (stat(resolved_path, ) != 0) {
 fprintf(ERRORFILE,
 "Could not stat the executable : %s!.\n", strerror(errno));
-fflush(ERRORFILE);
 return -1;
   }
 
@@ -163,14 +161,12 @@ int check_executor_permissions(char *executable_file) {
   if (binary_euid != 0) {
 fprintf(LOGFILE,
 "The container-executor binary should be user-owned by root.\n");
-fflush(LOGFILE);
 return -1;
   }
 
   if (binary_gid != getgid()) {
 fprintf(LOGFILE, "The configured nodemanager group %d is different from"
 " the group of the executable %d\n", getgid(), binary_gid);
-fflush(LOGFILE);
 return -1;
   }
 
@@ -180,14 +176,12 @@ int check_executor_permissions(char *executable_file) {
 fprintf(LOGFILE,
 "The container-executor binary should not have write or execute "
 "for others.\n");
-fflush(LOGFILE);
 return -1;
   }
 
   // Binary should be setuid executable
   if ((filestat.st_mode & S_ISUID) == 0) {
 fprintf(LOGFILE, "The container-executor binary should be set setuid.\n");
-fflush(LOGFILE);
 return -1;
   }
 
@@ -207,13 +201,11 @@ static int change_effective_user(uid_t user, gid_t group) 
{
   if (setegid(group) != 0) {
 fprintf(LOGFILE, "Failed to set effective group id %d - %s\n", group,
 strerror(errno));
-fflush(LOGFILE);
 return -1;
   }
   if (seteuid(user) != 0) {
 fprintf(LOGFILE, "Failed to set effective user id %d - %s\n", user,
 strerror(errno));
-fflush(LOGFILE);
 return -1;
   }
   return 0;
@@ -238,7 +230,6 @@ static int write_pid_to_cgroup_as_root(const char* 
cgroup_file, pid_t pid) {
   if (cgroup_fd == -1) {
 fprintf(LOGFILE, "Can't open file %s as node manager - %s\n", cgroup_file,
strerror(errno));
-fflush(LOGFILE);
 rc = -1;
 goto cleanup;
   }
@@ -251,7 +242,6 @@ static int write_pid_to_cgroup_as_root(const char* 
cgroup_file, pid_t pid) {
   if (written == -1) {
 fprintf(LOGFILE, "Failed to write pid to file %s - %s\n",
cgroup_file, strerror(errno));
-fflush(LOGFILE);
 rc = -1;
 goto cleanup;
   }
@@ -277,20 +267,17 @@ static int write_pid_to_file_as_nm(const char* pid_file, 
pid_t pid) {
   gid_t group = getegid();
   if (change_effective_user(nm_uid, nm_gid) != 0) {
 fprintf(ERRORFILE, "Could not change to effective users %d, %d\n", nm_uid, 
nm_gid);
-fflush(ERRORFILE);
 rc = -1;
 goto cleanup;
   }
 
   temp_pid_file = concatenate("%s.tmp", "pid_file_path", 1, pid_file);
   fprintf(LOGFILE, "Writing to tmp file %s\n", temp_pid_file);
-  fflush(LOGFILE);
   // create with 700
   int pid_fd = open(temp_pid_file, O_W

[hadoop] branch trunk updated: HDDS-1833. Moved RefCountedDB stacktrace to log level trace. Contributed by Siddharth Wagle

2019-07-29 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d023663  HDDS-1833. Moved RefCountedDB stacktrace to log level trace.  
  Contributed by Siddharth Wagle
d023663 is described below

commit d023663e3e2038ff73e9f1f8d931c8946adb048e
Author: Eric Yang 
AuthorDate: Mon Jul 29 12:05:24 2019 -0400

HDDS-1833. Moved RefCountedDB stacktrace to log level trace.
   Contributed by Siddharth Wagle
---
 .../ozone/container/common/utils/ReferenceCountedDB.java | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
index 81cde5b..819f22c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.ozone.container.common.utils;
 
 import com.google.common.base.Preconditions;
+
+import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.hadoop.utils.MetadataStore;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -52,20 +54,18 @@ public class ReferenceCountedDB implements Closeable {
 
   public void incrementReference() {
 this.referenceCount.incrementAndGet();
-if (LOG.isDebugEnabled()) {
-  LOG.debug("IncRef {} to refCnt {} \n", containerDBPath,
-  referenceCount.get());
-  new Exception().printStackTrace();
+if (LOG.isTraceEnabled()) {
+  LOG.trace("IncRef {} to refCnt {}, stackTrace: {}", containerDBPath,
+  referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable()));
 }
   }
 
   public void decrementReference() {
 int refCount = this.referenceCount.decrementAndGet();
 Preconditions.checkArgument(refCount >= 0, "refCount:", refCount);
-if (LOG.isDebugEnabled()) {
-  LOG.debug("DecRef {} to refCnt {} \n", containerDBPath,
-  referenceCount.get());
-  new Exception().printStackTrace();
+if (LOG.isTraceEnabled()) {
+  LOG.trace("DecRef {} to refCnt {}, stackTrace: {}", containerDBPath,
+  referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable()));
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-6046. Fixed documentation error in YarnApplicationSecurity. Contributed by Yousef Abu-Salah

2019-07-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9838a47  YARN-6046.  Fixed documentation error in 
YarnApplicationSecurity. Contributed by Yousef Abu-Salah
9838a47 is described below

commit 9838a47d44c31ac8557b4e8f67c1676c356ec9f7
Author: Eric Yang 
AuthorDate: Thu Jul 18 12:36:45 2019 -0400

YARN-6046.  Fixed documentation error in YarnApplicationSecurity.
Contributed by Yousef Abu-Salah
---
 .../hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md
index 5f1f325..d36d83b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md
@@ -114,7 +114,7 @@ supplied this way.
 
 This means you have a relative similar workflow across secure and insecure 
clusters.
 
-1. Suring AM startup, log in to Kerberos.
+1. During AM startup, log in to Kerberos.
 A call to `UserGroupInformation.isSecurityEnabled()` will trigger this 
operation.
 
 1. Enumerate the current user's credentials, through a call of
@@ -144,7 +144,7 @@ than the AMRM and timeline tokens.
 
 Here are the different strategies
 
-1. Don't. Rely on the lifespan of the application being so short that token
+1. Don't rely on the lifespan of the application being so short that token
 renewal is not needed. For applications whose life can always be measured
 in minutes or tens of minutes, this is a viable strategy.
 
@@ -156,7 +156,7 @@ This what most YARN applications do.
 
 ### AM/RM Token Refresh
 
-The AM/RM token is renewed automatically; the AM pushes out a new token
+The AM/RM token is renewed automatically; the RM sends out a new token
 to the AM within an `allocate` message. Consult the `AMRMClientImpl` class
 to see the process. *Your AM code does not need to worry about this process*
 
@@ -191,7 +191,7 @@ token. Consult `UnmanagedAMLauncher` for the specifics.
 ### Identity on an insecure cluster: `HADOOP_USER_NAME`
 
 In an insecure cluster, the application will run as the identity of
-the account of the node manager, typically something such as `yarn`
+the account of the node manager, such as `yarn`
 or `mapred`. By default, the application will access HDFS
 as that user, with a different home directory, and with
 a different user identified in audit logs and on file system owner attributes.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9568. Fixed NPE in MiniYarnCluster during FileSystemNodeAttributeStore.recover. Contributed by Steve Loughran

2019-07-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c34ceb5  YARN-9568. Fixed NPE in MiniYarnCluster during 
FileSystemNodeAttributeStore.recover.Contributed by Steve Loughran
c34ceb5 is described below

commit c34ceb5fde9f6d3d692640eb2a27d97990f17350
Author: Eric Yang 
AuthorDate: Thu Jul 18 12:30:53 2019 -0400

YARN-9568. Fixed NPE in MiniYarnCluster during 
FileSystemNodeAttributeStore.recover.
   Contributed by Steve Loughran
---
 .../test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java  | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 5edd3ca..68d97ee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -313,7 +313,13 @@ public class MiniYARNCluster extends CompositeService {
 YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED) || enableAHS) {
 addService(new ApplicationHistoryServerWrapper());
 }
-
+// to ensure that any FileSystemNodeAttributeStore started by the RM always
+// uses a unique path, if unset, force it under the test dir.
+if (conf.get(YarnConfiguration.FS_NODE_ATTRIBUTE_STORE_ROOT_DIR) == null) {
+  File nodeAttrDir = new File(getTestWorkDir(), "nodeattributes");
+  conf.set(YarnConfiguration.FS_NODE_ATTRIBUTE_STORE_ROOT_DIR,
+  nodeAttrDir.getCanonicalPath());
+}
 super.serviceInit(
 conf instanceof YarnConfiguration ? conf : new 
YarnConfiguration(conf));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9660. Update support documentation for Docker on YARN. Contributed by Peter Bacsko

2019-07-10 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 555dabf  YARN-9660. Update support documentation for Docker on YARN.   
 Contributed by Peter Bacsko
555dabf is described below

commit 555dabf4f849f8b1163ae26c57f1392230d40100
Author: Eric Yang 
AuthorDate: Wed Jul 10 17:15:33 2019 -0400

YARN-9660. Update support documentation for Docker on YARN.
   Contributed by Peter Bacsko
---
 .../src/site/markdown/DockerContainers.md  | 86 ++
 1 file changed, 86 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index b5c54be..e30ac98 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -359,6 +359,58 @@ implicitly perform a Docker pull command. Both MapReduce 
and Spark assume that
 tasks which take more that 10 minutes to report progress have stalled, so
 specifying a large Docker image may cause the application to fail.
 
+CGroups configuration Requirements
+--
+The Docker plugin utilizes cgroups to limit resource usage of individual 
containers.
+Since launched containers belong to YARN, the command line option 
`--cgroup-parent` is
+used to define the appropriate control group.
+
+Docker supports two different cgroups driver: `cgroupfs` and `systemd`. Note 
that only
+`cgroupfs` is supported - attempt to launch a Docker container with `systemd` 
results in the
+following, similar error message:
+
+```
+Container id: container_1561638268473_0006_01_02
+Exit code: 7
+Exception message: Launch container failed
+Shell error output: /usr/bin/docker-current: Error response from daemon: 
cgroup-parent for systemd cgroup should be a valid slice named as "xxx.slice".
+See '/usr/bin/docker-current run --help'.
+Shell output: main : command provided 4
+```
+
+This means you have to reconfigure the Docker deamon on each host where 
`systemd` driver is used.
+
+Depending on what OS Hadoop is running on, reconfiguration might require 
different steps. However,
+if `systemd` was chosen for cgroups driver, it is likely that the `systemctl` 
command is available
+on the system.
+
+Check the `ExecStart` property of the Docker daemon:
+
+```
+~$ systemctl show --no-pager --property=ExecStart docker.service
+ExecStart={ path=/usr/bin/dockerd-current ; argv[]=/usr/bin/dockerd-current 
--add-runtime
+docker-runc=/usr/libexec/docker/docker-runc-current 
--default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
+--userland-proxy-path=/usr/libexec/docker/docker-proxy-current
+--init-path=/usr/libexec/docker/docker-init-current
+--seccomp-profile=/etc/docker/seccomp.json
+$OPTIONS $DOCKER_STORAGE_OPTIONS $DOCKER_NETWORK_OPTIONS $ADD_REGISTRY 
$BLOCK_REGISTRY $INSECURE_REGISTRY $REGISTRIES ;
+ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; 
status=0/0 }
+```
+
+This example shows that the `native.cgroupdriver` is `systemd`. You have to 
modify that in the unit file of the daemon.
+
+```
+~$ sudo systemctl edit --full docker.service
+```
+
+This brings up the whole configuration for editing. Just replace the `systemd` 
string to `cgroupfs`. Save the
+changes and restart both the systemd and Docker daemon:
+
+```
+~$ sudo systemctl daemon-reload
+~$ sudo systemctl restart docker.service
+```
+
 Application Submission
 --
 
@@ -667,6 +719,14 @@ In development environment, local images can be tagged 
with a repository name pr
 docker tag centos:latest localhost:5000/centos:latest
 ```
 
+Let's say you have an Ubuntu-based image with some changes in the local 
repository and you wish to use it.
+The following example tags the `local_ubuntu` image:
+```
+docker tag local_ubuntu local/ubuntu:latest
+```
+
+Next, you have to add `local` to `docker.trusted.registries`. The image can be 
referenced by using `local/ubuntu`.
+
 Trusted images are allowed to mount external devices such as HDFS via NFS 
gateway, or host level Hadoop configuration.  If system administrators allow 
writing to external volumes using `docker.allow.rw-mounts directive`, 
privileged docker container can have full control of host level files in the 
predefined volumes.
 
 For [YARN Service HTTPD example](./yarn-service/Examples.html), 
container-executor.cfg must define centos docker registry to be trusted for the 
example to run.
@@ -981,6 +1041,32 @@ In yarn-env.sh, define:
 export YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE=true
 ```
 
+Requirements when not using ENTRYPOINT 

[hadoop] branch trunk updated: YARN-9560. Restructure DockerLinuxContainerRuntime to extend OCIContainerRuntime. Contributed by Eric Badger, Jim Brennan, Craig Condit

2019-06-28 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 29465bf  YARN-9560. Restructure DockerLinuxContainerRuntime to extend 
OCIContainerRuntime.Contributed by Eric Badger, Jim Brennan, Craig 
Condit
29465bf is described below

commit 29465bf169a7e348a4f32265083450faf66d5631
Author: Eric Yang 
AuthorDate: Fri Jun 28 17:14:26 2019 -0400

YARN-9560. Restructure DockerLinuxContainerRuntime to extend 
OCIContainerRuntime.
   Contributed by Eric Badger, Jim Brennan, Craig Condit
---
 .../server/nodemanager/LinuxContainerExecutor.java |   5 +-
 .../launcher/ContainerCleanup.java |   4 +-
 .../resources/gpu/GpuResourceHandlerImpl.java  |   4 +-
 .../linux/runtime/DockerLinuxContainerRuntime.java | 351 ---
 .../linux/runtime/OCIContainerRuntime.java | 374 +
 .../deviceframework/DeviceResourceHandlerImpl.java |   4 +-
 .../linux/runtime/TestDockerContainerRuntime.java  |  41 +--
 7 files changed, 461 insertions(+), 322 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 137421a..06a32be 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DelegatingLinuxContainerRuntime;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerRuntime;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerCommandExecutor;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerRmCommand;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
@@ -94,14 +95,14 @@ import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  * appropriate {@link LinuxContainerRuntime} instance. This class uses a
  * {@link DelegatingLinuxContainerRuntime} instance, which will delegate calls
  * to either a {@link DefaultLinuxContainerRuntime} instance or a
- * {@link DockerLinuxContainerRuntime} instance, depending on the job's
+ * {@link OCIContainerRuntime} instance, depending on the job's
  * configuration.
  *
  * @see LinuxContainerRuntime
  * @see DelegatingLinuxContainerRuntime
  * @see DefaultLinuxContainerRuntime
  * @see DockerLinuxContainerRuntime
- * @see DockerLinuxContainerRuntime#isDockerContainerRequested
+ * @see OCIContainerRuntime#isOCICompliantContainerRequested
  */
 public class LinuxContainerExecutor extends ContainerExecutor {
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
index faf926a..e92560e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
@@ -35,7 +35,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DockerContainerDeletionTask;
-import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime;
+import

[hadoop] branch branch-3.2 updated: YARN-9581. Add support for get multiple RM webapp URLs. Contributed by Prabhu Joseph

2019-06-28 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 860606f  YARN-9581. Add support for get multiple RM webapp URLs.   
 Contributed by Prabhu Joseph
860606f is described below

commit 860606fc670ed3ebc9ad034293bab9734e655bbd
Author: Eric Yang 
AuthorDate: Fri Jun 28 14:51:58 2019 -0400

YARN-9581. Add support for get multiple RM webapp URLs.
   Contributed by Prabhu Joseph

(cherry picked from commit f02b0e19940dc6fc1e19258a40db37d1eed89d21)
---
 .../java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index 5b1c3bb..09daf42 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -102,8 +102,16 @@ public class WebAppUtils {
   return func.apply(rm1Address, arg);
 } catch (Exception e) {
   if (HAUtil.isHAEnabled(conf)) {
-String rm2Address = getRMWebAppURLWithScheme(conf, 1);
-return func.apply(rm2Address, arg);
+int rms = HAUtil.getRMHAIds(conf).size();
+for (int i=1; i

[hadoop] branch trunk updated: YARN-9581. Add support for get multiple RM webapp URLs. Contributed by Prabhu Joseph

2019-06-28 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f02b0e1  YARN-9581. Add support for get multiple RM webapp URLs.   
 Contributed by Prabhu Joseph
f02b0e1 is described below

commit f02b0e19940dc6fc1e19258a40db37d1eed89d21
Author: Eric Yang 
AuthorDate: Fri Jun 28 14:51:58 2019 -0400

YARN-9581. Add support for get multiple RM webapp URLs.
   Contributed by Prabhu Joseph
---
 .../java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index 5b1c3bb..09daf42 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -102,8 +102,16 @@ public class WebAppUtils {
   return func.apply(rm1Address, arg);
 } catch (Exception e) {
   if (HAUtil.isHAEnabled(conf)) {
-String rm2Address = getRMWebAppURLWithScheme(conf, 1);
-return func.apply(rm2Address, arg);
+int rms = HAUtil.getRMHAIds(conf).size();
+for (int i=1; i

[hadoop] branch trunk updated: YARN-9374. Improve Timeline service resilience when HBase is unavailable. Contributed by Prabhu Joseph and Szilard Nemeth

2019-06-24 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b220ec6  YARN-9374.  Improve Timeline service resilience when HBase is 
unavailable. Contributed by Prabhu Joseph and Szilard Nemeth
b220ec6 is described below

commit b220ec6f613dca4542e256008b1be2689c67bb03
Author: Eric Yang 
AuthorDate: Mon Jun 24 12:19:14 2019 -0400

YARN-9374.  Improve Timeline service resilience when HBase is unavailable.
Contributed by Prabhu Joseph and Szilard Nemeth
---
 .../storage/TestTimelineReaderHBaseDown.java   |  18 +++-
 .../storage/TestTimelineWriterHBaseDown.java   | 117 +
 .../storage/HBaseTimelineReaderImpl.java   |  13 +--
 .../storage/HBaseTimelineWriterImpl.java   |  19 +++-
 .../storage/TimelineStorageMonitor.java|   4 -
 5 files changed, 158 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
index e738d39..1148b80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
@@ -150,7 +150,14 @@ public class TestTimelineReaderHBaseDown {
   waitForHBaseDown(htr);
 
   util.startMiniHBaseCluster(1, 1);
-  GenericTestUtils.waitFor(() -> !htr.isHBaseDown(), 1000, 15);
+  GenericTestUtils.waitFor(() -> {
+try {
+  htr.getTimelineStorageMonitor().checkStorageIsUp();
+  return true;
+} catch (IOException e) {
+  return false;
+}
+  }, 1000, 15);
 } finally {
   util.shutdownMiniCluster();
 }
@@ -158,8 +165,15 @@ public class TestTimelineReaderHBaseDown {
 
   private static void waitForHBaseDown(HBaseTimelineReaderImpl htr) throws
   TimeoutException, InterruptedException {
-GenericTestUtils.waitFor(() -> htr.isHBaseDown(), 1000, 15);
 try {
+  GenericTestUtils.waitFor(() -> {
+try {
+  htr.getTimelineStorageMonitor().checkStorageIsUp();
+  return false;
+} catch (IOException e) {
+  return true;
+}
+  }, 1000, 15);
   checkQuery(htr);
   Assert.fail("Query should fail when HBase is down");
 } catch (IOException e) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineWriterHBaseDown.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineWriterHBaseDown.java
new file mode 100644
index 000..cb89ba4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineWriterHBaseDown.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import java.io.IOException;
+
+import org.junit.Test;
+import org.junit.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.ap

[hadoop] branch trunk updated: YARN-9631. Added ability to select JavaScript test or skip JavaScript tests for YARN application catalog. Contributed by Eric Yang

2019-06-19 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5bfdf62  YARN-9631.  Added ability to select JavaScript test or skip 
JavaScript tests for YARN application catalog. Contributed by Eric 
Yang
5bfdf62 is described below

commit 5bfdf62614735e09b67d6c70a0db4e0dbd2743b2
Author: Eric Yang 
AuthorDate: Wed Jun 19 18:45:23 2019 -0400

YARN-9631.  Added ability to select JavaScript test or skip JavaScript 
tests for YARN application catalog.
Contributed by Eric Yang

(cherry picked from commit 6002b0c5c6994965d3f7231330248c093869dba2)
---
 .../hadoop-yarn-applications-catalog-webapp/pom.xml   | 15 ++-
 1 file changed, 14 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
index 273379d..8e716f8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
@@ -36,6 +36,7 @@
 target/generated-sources/vendor
 UTF-8
 false
+*Spec
 
 
 
@@ -365,6 +366,7 @@
 
 
 
+${skipTests}
 
org.openqa.selenium.phantomjs.PhantomJSDriver
 
 
@@ -381,7 +383,7 @@
 src/main/javascript
 src/test/javascript
 
-*Spec.js
+${javascript.test}.js
 
 
 
@@ -459,6 +461,17 @@
 
   
   
+test-selector
+
+  
+test
+  
+
+
+  ${test}
+
+  
+  
 rest-docs
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9574. Update hadoop-yarn-applications-mawo artifactId to match directory name. Contributed by Wanqiang Ji

2019-06-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b14f056  YARN-9574. Update hadoop-yarn-applications-mawo artifactId to 
match directory name.Contributed by Wanqiang Ji
b14f056 is described below

commit b14f0569bb8f4c6ea56de13f797d1d5155e99ceb
Author: Eric Yang 
AuthorDate: Tue Jun 18 13:48:38 2019 -0400

YARN-9574. Update hadoop-yarn-applications-mawo artifactId to match 
directory name.
   Contributed by Wanqiang Ji

(cherry picked from commit 5f758a69ede8fafd214857a74f5d3b46198094c4)
---
 .../hadoop-yarn-applications-mawo-core/pom.xml| 4 ++--
 .../hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml| 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml
index 93eab69..1e3584b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml
@@ -13,14 +13,14 @@
 http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
   xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
https://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
-hadoop-applications-mawo
+hadoop-yarn-applications-mawo
 org.apache.hadoop.applications.mawo
 3.3.0-SNAPSHOT
 
   4.0.0
 
   
-  hadoop-applications-mawo-core
+  hadoop-yarn-applications-mawo-core
   jar
 
   Apache Hadoop YARN Application MaWo Core
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml
index 3a5cd9f..5594a30 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml
@@ -20,7 +20,7 @@
 4.0.0
 
 org.apache.hadoop.applications.mawo
-hadoop-applications-mawo
+hadoop-yarn-applications-mawo
 pom
 
 Apache Hadoop YARN Application MaWo


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-8499 ATSv2 Generalize TimelineStorageMonitor. Contributed by Prabhu Joseph

2019-06-14 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cda9f33  YARN-8499 ATSv2 Generalize TimelineStorageMonitor.
Contributed by Prabhu Joseph
cda9f33 is described below

commit cda9f3374573f0cb5ae4f26ba3fbc77aae45ec58
Author: Eric Yang 
AuthorDate: Fri Jun 14 18:59:14 2019 -0400

YARN-8499 ATSv2 Generalize TimelineStorageMonitor.
   Contributed by Prabhu Joseph
---
 .../storage/TestTimelineReaderHBaseDown.java   |   4 +-
 .../storage/HBaseStorageMonitor.java   |  90 +
 .../storage/HBaseTimelineReaderImpl.java   |  90 ++---
 .../storage/TimelineStorageMonitor.java| 106 +
 4 files changed, 206 insertions(+), 84 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
index 786f529..e738d39 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
@@ -34,8 +34,8 @@ import java.util.Set;
 import java.util.concurrent.TimeoutException;
 
 import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS;
-import static 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl.DATA_TO_RETRIEVE;
-import static 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl.MONITOR_FILTERS;
+import static 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseStorageMonitor.DATA_TO_RETRIEVE;
+import static 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseStorageMonitor.MONITOR_FILTERS;
 
 public class TestTimelineReaderHBaseDown {
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseStorageMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseStorageMonitor.java
new file mode 100644
index 000..c433aa6
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseStorageMonitor.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineE

[hadoop] branch trunk updated: HADOOP-16366. Fixed ProxyUserAuthenticationFilterInitializer for timeline server. Contributed by Prabhu Joseph

2019-06-14 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3ba090f  HADOOP-16366.  Fixed ProxyUserAuthenticationFilterInitializer 
for timeline server.Contributed by Prabhu Joseph
3ba090f is described below

commit 3ba090f4360c81c9dfb575efa13b8161c7a5255b
Author: Eric Yang 
AuthorDate: Fri Jun 14 12:54:16 2019 -0400

HADOOP-16366.  Fixed ProxyUserAuthenticationFilterInitializer for timeline 
server.
   Contributed by Prabhu Joseph
---
 .../yarn/server/timelineservice/reader/TimelineReaderServer.java| 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index 49c1d4b..10265c6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -165,10 +165,10 @@ public class TimelineReaderServer extends 
CompositeService {
   TimelineReaderAuthenticationFilterInitializer.class.getName())) {
 defaultInitializers.add(
 TimelineReaderAuthenticationFilterInitializer.class.getName());
-  } else {
-defaultInitializers.add(
-ProxyUserAuthenticationFilterInitializer.class.getName());
   }
+} else {
+  defaultInitializers.add(
+  ProxyUserAuthenticationFilterInitializer.class.getName());
 }
 
 defaultInitializers.add(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16367. Fixed MiniYarnCluster AuthenticationFilter initialization. Contributed by Prabhu Joseph

2019-06-12 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 205dd2d  HADOOP-16367.  Fixed MiniYarnCluster AuthenticationFilter 
initialization.Contributed by Prabhu Joseph
205dd2d is described below

commit 205dd2d8e1db46a8d4e1711e7b74e4e5fe162686
Author: Eric Yang 
AuthorDate: Wed Jun 12 18:03:33 2019 -0400

HADOOP-16367.  Fixed MiniYarnCluster AuthenticationFilter initialization.
   Contributed by Prabhu Joseph
---
 .../apache/hadoop/yarn/server/MiniYARNCluster.java | 25 ++
 1 file changed, 25 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index fa69f18..19c4eb4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -25,9 +25,13 @@ import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.Collection;
 import java.util.Map;
+import java.util.Set;
+import java.util.LinkedHashSet;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
+import org.apache.commons.lang3.StringUtils;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -90,6 +94,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptE
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
+import 
org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer;
 import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
 import 
org.apache.hadoop.yarn.server.timeline.recovery.MemoryTimelineStateStore;
@@ -824,6 +829,26 @@ public class MiniYARNCluster extends CompositeService {
 
 @Override
 protected synchronized void serviceStart() throws Exception {
+
+  // Removing RMAuthenticationFilter as it conflitcs with
+  // TimelineAuthenticationFilter
+  Configuration conf = getConfig();
+  String filterInitializerConfKey = "hadoop.http.filter.initializers";
+  String initializers = conf.get(filterInitializerConfKey, "");
+  String[] parts = initializers.split(",");
+  Set target = new LinkedHashSet();
+  for (String filterInitializer : parts) {
+filterInitializer = filterInitializer.trim();
+if (filterInitializer.equals(
+RMAuthenticationFilterInitializer.class.getName())
+|| filterInitializer.isEmpty()) {
+  continue;
+}
+target.add(filterInitializer);
+  }
+  initializers = StringUtils.join(target, ",");
+  conf.set(filterInitializerConfKey, initializers);
+
   appHistoryServer.start();
   if (appHistoryServer.getServiceState() != STATE.STARTED) {
 // AHS could have failed.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16354. Enable AuthFilter as default for WebHDFS. Contributed by Prabhu Joseph

2019-06-11 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4ea6c2f  HADOOP-16354.  Enable AuthFilter as default for WebHDFS.  
  Contributed by Prabhu Joseph
4ea6c2f is described below

commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49
Author: Eric Yang 
AuthorDate: Tue Jun 11 18:41:08 2019 -0400

HADOOP-16354.  Enable AuthFilter as default for WebHDFS.
   Contributed by Prabhu Joseph
---
 .../server/ProxyUserAuthenticationFilter.java  |  88 ++-
 .../server/TestProxyUserAuthenticationFilter.java  |   2 +-
 .../main/java/org/apache/hadoop/hdfs/DFSUtil.java  |  27 +
 .../hadoop/hdfs/server/common/JspHelper.java   |   2 +-
 .../hdfs/server/namenode/NameNodeHttpServer.java   |  50 -
 .../org/apache/hadoop/hdfs/web/AuthFilter.java | 115 ++-
 .../hadoop/hdfs/web/AuthFilterInitializer.java |  69 
 .../org/apache/hadoop/hdfs/web/TestAuthFilter.java | 125 +++--
 8 files changed, 231 insertions(+), 247 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java
index 42902b3..bd04efe 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java
@@ -18,12 +18,18 @@ import 
org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.HttpExceptionUtils;
+import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.security.Principal;
+import java.util.ArrayList;
 import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
 import javax.servlet.ServletException;
@@ -41,7 +47,7 @@ public class ProxyUserAuthenticationFilter extends 
AuthenticationFilter {
   private static final Logger LOG = LoggerFactory.getLogger(
   ProxyUserAuthenticationFilter.class);
 
-  private static final String DO_AS = "doAs";
+  private static final String DO_AS = "doas";
   public static final String PROXYUSER_PREFIX = "proxyuser";
 
   @Override
@@ -54,8 +60,9 @@ public class ProxyUserAuthenticationFilter extends 
AuthenticationFilter {
   @Override
   protected void doFilter(FilterChain filterChain, HttpServletRequest request,
   HttpServletResponse response) throws IOException, ServletException {
+final HttpServletRequest lowerCaseRequest = toLowerCase(request);
+String doAsUser = lowerCaseRequest.getParameter(DO_AS);
 
-String doAsUser = request.getParameter(DO_AS);
 if (doAsUser != null && !doAsUser.equals(request.getRemoteUser())) {
   LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ",
   doAsUser, request.getRemoteUser(), request.getRemoteAddr());
@@ -111,5 +118,82 @@ public class ProxyUserAuthenticationFilter extends 
AuthenticationFilter {
 return conf;
   }
 
+  static boolean containsUpperCase(final Iterable strings) {
+for(String s : strings) {
+  for(int i = 0; i < s.length(); i++) {
+if (Character.isUpperCase(s.charAt(i))) {
+  return true;
+}
+  }
+}
+return false;
+  }
+
+  public static HttpServletRequest toLowerCase(
+  final HttpServletRequest request) {
+@SuppressWarnings("unchecked")
+final Map original = (Map)
+request.getParameterMap();
+if (!containsUpperCase(original.keySet())) {
+  return request;
+}
+
+final Map> m = new HashMap>();
+for (Map.Entry entry : original.entrySet()) {
+  final String key = StringUtils.toLowerCase(entry.getKey());
+  List strings = m.get(key);
+  if (strings == null) {
+strings = new ArrayList();
+m.put(key, strings);
+  }
+  for (String v : entry.getValue()) {
+strings.add(v);
+  }
+}
+
+return new HttpServletRequestWrapper(request) {
+  private Map parameters = null;
+
+  @Override
+  public Map getParameterMap() {
+if (parameters == null) {
+  parameters = new HashMap();
+  for (Map.Entry> entry : m.entrySet()) {
+final List a = entry.getValue();
+

[hadoop] branch trunk updated: YARN-9581. Fixed yarn logs cli to access RM2. Contributed by Prabhu Joseph

2019-06-06 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cb9bc6e  YARN-9581.  Fixed yarn logs cli to access RM2. 
Contributed by Prabhu Joseph
cb9bc6e is described below

commit cb9bc6e64c590622ae04aea2c81962be59037f7a
Author: Eric Yang 
AuthorDate: Thu Jun 6 16:41:58 2019 -0400

YARN-9581.  Fixed yarn logs cli to access RM2.
Contributed by Prabhu Joseph
---
 .../org/apache/hadoop/yarn/client/cli/LogsCLI.java | 44 ++
 .../hadoop/yarn/client/cli/SchedConfCLI.java   | 26 -
 .../hadoop/yarn/webapp/util/WebAppUtils.java   | 39 +--
 .../yarn/webapp/util/YarnWebServiceUtils.java  | 29 ++
 .../hadoop/yarn/conf/TestYarnConfiguration.java|  6 +++
 5 files changed, 107 insertions(+), 37 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 96007f4..2b5439b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -25,6 +25,7 @@ import com.sun.jersey.api.client.ClientRequest;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.WebResource.Builder;
 import com.sun.jersey.api.client.filter.ClientFilter;
 import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
 import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
@@ -157,6 +158,9 @@ public class LogsCLI extends Configured implements Tool {
   if (yarnClient != null) {
 yarnClient.close();
   }
+  if (webServiceClient != null) {
+webServiceClient.destroy();
+  }
 }
   }
 
@@ -420,24 +424,34 @@ public class LogsCLI extends Configured implements Tool {
   }
 
   protected List getAMContainerInfoForRMWebService(
-  Configuration conf, String appId) throws ClientHandlerException,
-  UniformInterfaceException, JSONException {
-String webAppAddress = WebAppUtils.getRMWebAppURLWithScheme(conf);
-
-WebResource webResource = webServiceClient.resource(webAppAddress);
+  Configuration conf, String appId) throws Exception {
+return WebAppUtils.execOnActiveRM(conf, this::getAMContainerInfoFromRM,
+appId);
+  }
 
-ClientResponse response =
-webResource.path("ws").path("v1").path("cluster").path("apps")
-  .path(appId).path("appattempts").accept(MediaType.APPLICATION_JSON)
-  .get(ClientResponse.class);
-JSONObject json =
-response.getEntity(JSONObject.class).getJSONObject("appAttempts");
-JSONArray requests = json.getJSONArray("appAttempt");
+  private List getAMContainerInfoFromRM(
+  String webAppAddress, String appId) throws ClientHandlerException,
+  UniformInterfaceException, JSONException {
 List amContainersList = new ArrayList();
-for (int i = 0; i < requests.length(); i++) {
-  amContainersList.add(requests.getJSONObject(i));
+ClientResponse response = null;
+try {
+  Builder builder = webServiceClient.resource(webAppAddress)
+  .path("ws").path("v1").path("cluster")
+  .path("apps").path(appId).path("appattempts")
+  .accept(MediaType.APPLICATION_JSON);
+  response = builder.get(ClientResponse.class);
+  JSONObject json = response.getEntity(JSONObject.class)
+  .getJSONObject("appAttempts");
+  JSONArray requests = json.getJSONArray("appAttempt");
+  for (int j = 0; j < requests.length(); j++) {
+amContainersList.add(requests.getJSONObject(j));
+  }
+  return amContainersList;
+} finally {
+  if (response != null) {
+response.close();
+  }
 }
-return amContainersList;
   }
 
   private List getAMContainerInfoForAHSWebService(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
index a5f3b80..be54553 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli

[hadoop] branch branch-3.2 updated: YARN-9581. Fixed yarn logs cli to access RM2. Contributed by Prabhu Joseph

2019-06-06 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 68aec0a  YARN-9581.  Fixed yarn logs cli to access RM2. 
Contributed by Prabhu Joseph
68aec0a is described below

commit 68aec0a98d6a9a2e9f91fa6b5563df072cca5d7b
Author: Eric Yang 
AuthorDate: Thu Jun 6 16:41:58 2019 -0400

YARN-9581.  Fixed yarn logs cli to access RM2.
Contributed by Prabhu Joseph

(cherry picked from commit cb9bc6e64c590622ae04aea2c81962be59037f7a)
---
 .../org/apache/hadoop/yarn/client/cli/LogsCLI.java | 44 ++
 .../hadoop/yarn/client/cli/SchedConfCLI.java   | 26 -
 .../hadoop/yarn/webapp/util/WebAppUtils.java   | 39 +--
 .../yarn/webapp/util/YarnWebServiceUtils.java  | 29 ++
 .../hadoop/yarn/conf/TestYarnConfiguration.java|  6 +++
 5 files changed, 107 insertions(+), 37 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index a1550a5..c602442 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -25,6 +25,7 @@ import com.sun.jersey.api.client.ClientRequest;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.WebResource.Builder;
 import com.sun.jersey.api.client.filter.ClientFilter;
 import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
 import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
@@ -155,6 +156,9 @@ public class LogsCLI extends Configured implements Tool {
   if (yarnClient != null) {
 yarnClient.close();
   }
+  if (webServiceClient != null) {
+webServiceClient.destroy();
+  }
 }
   }
 
@@ -418,24 +422,34 @@ public class LogsCLI extends Configured implements Tool {
   }
 
   protected List getAMContainerInfoForRMWebService(
-  Configuration conf, String appId) throws ClientHandlerException,
-  UniformInterfaceException, JSONException {
-String webAppAddress = WebAppUtils.getRMWebAppURLWithScheme(conf);
-
-WebResource webResource = webServiceClient.resource(webAppAddress);
+  Configuration conf, String appId) throws Exception {
+return WebAppUtils.execOnActiveRM(conf, this::getAMContainerInfoFromRM,
+appId);
+  }
 
-ClientResponse response =
-webResource.path("ws").path("v1").path("cluster").path("apps")
-  .path(appId).path("appattempts").accept(MediaType.APPLICATION_JSON)
-  .get(ClientResponse.class);
-JSONObject json =
-response.getEntity(JSONObject.class).getJSONObject("appAttempts");
-JSONArray requests = json.getJSONArray("appAttempt");
+  private List getAMContainerInfoFromRM(
+  String webAppAddress, String appId) throws ClientHandlerException,
+  UniformInterfaceException, JSONException {
 List amContainersList = new ArrayList();
-for (int i = 0; i < requests.length(); i++) {
-  amContainersList.add(requests.getJSONObject(i));
+ClientResponse response = null;
+try {
+  Builder builder = webServiceClient.resource(webAppAddress)
+  .path("ws").path("v1").path("cluster")
+  .path("apps").path(appId).path("appattempts")
+  .accept(MediaType.APPLICATION_JSON);
+  response = builder.get(ClientResponse.class);
+  JSONObject json = response.getEntity(JSONObject.class)
+  .getJSONObject("appAttempts");
+  JSONArray requests = json.getJSONArray("appAttempt");
+  for (int j = 0; j < requests.length(); j++) {
+amContainersList.add(requests.getJSONObject(j));
+  }
+  return amContainersList;
+} finally {
+  if (response != null) {
+response.close();
+  }
 }
-return amContainersList;
   }
 
   private List getAMContainerInfoForAHSWebService(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
index a5f3b80..be54553 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
+++ 

[hadoop] branch trunk updated: HADOOP-16314. Make sure all web end points are covered by the same authentication filter. Contributed by Prabhu Joseph

2019-06-05 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 294695d  HADOOP-16314.  Make sure all web end points are covered by 
the same authentication filter.Contributed by Prabhu Joseph
294695d is described below

commit 294695dd57cb75f2756a31a54264bdd37b32bb01
Author: Eric Yang 
AuthorDate: Wed Jun 5 18:52:39 2019 -0400

HADOOP-16314.  Make sure all web end points are covered by the same 
authentication filter.
   Contributed by Prabhu Joseph
---
 .../java/org/apache/hadoop/http/HttpServer2.java   |  48 ++---
 .../java/org/apache/hadoop/http/WebServlet.java|  59 +
 .../src/site/markdown/HttpAuthentication.md|   4 +-
 .../org/apache/hadoop/http/TestGlobalFilter.java   |   4 +-
 .../hadoop/http/TestHttpServerWithSpnego.java  | 238 +
 .../org/apache/hadoop/http/TestPathFilter.java |   2 -
 .../org/apache/hadoop/http/TestServletFilter.java  |   1 -
 .../java/org/apache/hadoop/log/TestLogLevel.java   |   9 +
 .../hdfs/server/namenode/NameNodeHttpServer.java   |  12 --
 .../TestDFSInotifyEventInputStreamKerberized.java  |   9 +
 .../hadoop/hdfs/qjournal/TestSecureNNWithQJM.java  |   8 +
 .../apache/hadoop/hdfs/web/TestWebHdfsTokens.java  |   8 +
 .../web/TestWebHdfsWithAuthenticationFilter.java   |  18 +-
 .../org/apache/hadoop/yarn/webapp/Dispatcher.java  |   9 +
 .../server/util/timeline/TimelineServerUtils.java  |  10 +-
 .../resourcemanager/webapp/RMWebAppUtil.java   |   4 +
 .../reader/TimelineReaderServer.java   |  13 +-
 .../webproxy/amfilter/TestSecureAmFilter.java  |  10 +-
 18 files changed, 412 insertions(+), 54 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index fb2dff5..7825e08 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -27,6 +27,7 @@ import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URI;
 import java.net.URL;
+import java.util.Arrays;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Enumeration;
@@ -66,6 +67,8 @@ import 
org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import 
org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer;
+import 
org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.ssl.SSLFactory;
@@ -90,7 +93,6 @@ import org.eclipse.jetty.server.handler.HandlerCollection;
 import org.eclipse.jetty.server.handler.RequestLogHandler;
 import org.eclipse.jetty.server.session.AbstractSessionManager;
 import org.eclipse.jetty.server.session.SessionHandler;
-import org.eclipse.jetty.servlet.DefaultServlet;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.eclipse.jetty.servlet.FilterMapping;
 import org.eclipse.jetty.servlet.ServletContextHandler;
@@ -155,7 +157,7 @@ public final class HttpServer2 implements FilterContainer {
   // gets stored.
   public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
   public static final String ADMINS_ACL = "admins.acl";
-  public static final String SPNEGO_FILTER = "SpnegoFilter";
+  public static final String SPNEGO_FILTER = "authentication";
   public static final String NO_CACHE_FILTER = "NoCacheFilter";
 
   public static final String BIND_ADDRESS = "bind.address";
@@ -433,7 +435,9 @@ public final class HttpServer2 implements FilterContainer {
 
   HttpServer2 server = new HttpServer2(this);
 
-  if (this.securityEnabled) {
+  if (this.securityEnabled &&
+  !this.conf.get(authFilterConfigurationPrefix + "type").
+  equals(PseudoAuthenticationHandler.TYPE)) {
 server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
   }
 
@@ -608,13 +612,6 @@ public final class HttpServer2 implements FilterContainer {
 }
 
 addDefaultServlets();
-
-if (pathSpecs != null) {
-  for (String path : pathSpecs) {
-LOG.info("adding path spec: " + path);
-addFilterPathMapping(path, webAppContext);
-  }
-}
   }
 
   private void addListener(ServerConnector co

[hadoop] branch trunk updated: YARN-7537. Add ability to load hbase config from distributed file system. Contributed by Prabhu Joseph

2019-06-04 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d45669c  YARN-7537.  Add ability to load hbase config from distributed 
file system. Contributed by Prabhu Joseph
d45669c is described below

commit d45669cd3c65fe83b0821d4e5bc72358f52700a3
Author: Eric Yang 
AuthorDate: Tue Jun 4 19:26:06 2019 -0400

YARN-7537.  Add ability to load hbase config from distributed file system.
Contributed by Prabhu Joseph
---
 .../pom.xml| 13 
 .../storage/common/HBaseTimelineStorageUtils.java  | 32 ++
 .../common/TestHBaseTimelineStorageUtils.java  | 74 ++
 3 files changed, 106 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
index 26b3e4f..0288bb3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
@@ -76,6 +76,19 @@
 
 
   org.apache.hadoop
+  hadoop-hdfs
+  test
+
+
+
+  org.apache.hadoop
+  hadoop-hdfs
+  test-jar
+  test
+
+
+
+  org.apache.hadoop
   hadoop-yarn-api
   provided
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStor
 [...]
index f4cd6fb..93feb82 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -17,11 +17,13 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
-import java.net.MalformedURLException;
-import java.net.URL;
+import java.io.IOException;
 import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Query;
@@ -40,7 +42,6 @@ public final class HBaseTimelineStorageUtils {
   private HBaseTimelineStorageUtils() {
   }
 
-
   /**
* @param conf YARN configuration. Used to see if there is an explicit config
*  pointing to the HBase config file to read. It should not be null
@@ -48,28 +49,33 @@ public final class HBaseTimelineStorageUtils {
* @return a configuration with the HBase configuration from the classpath,
* optionally overwritten by the timeline service configuration URL 
if
* specified.
-   * @throws MalformedURLException if a timeline service HBase configuration 
URL
-   *   is specified but is a malformed URL.
+   * @throws IOException if a timeline service HBase configuration URL
+   *   is specified but unable to read it.
*/
   public static Configuration getTimelineServiceHBaseConf(Configuration conf)
-  throws MalformedURLException {
+  throws IOException {
 if (conf == null) {
   throw new NullPointerException();
 }
 
 Configuration hbaseConf;
-String timelineServiceHBaseConfFileURL =
+String timelineServiceHBaseConfFilePath =
 conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE);
-if (timelineServiceHBaseConfFileURL != null
-&& timelineServiceHBaseConfFileURL.length() > 0) {
+
+if (timelineServiceHBaseConfFilePath != null
+  && timelineServiceHBaseConfFilePath.length() > 0) {
   LOG.info("Using hbase configuration at " +
-  timelineServiceHBaseConfFileURL);
+  timelineServiceHBaseConfFilePa

[hadoop] branch trunk updated: YARN-9027. Fixed LevelDBCacheTimelineStore initialization. Contributed by Prabhu Joseph

2019-05-31 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4cb559e  YARN-9027.  Fixed LevelDBCacheTimelineStore initialization.   
  Contributed by Prabhu Joseph
4cb559e is described below

commit 4cb559ea7bcf00fc4a574fffad9a3f73b8c532b0
Author: Eric Yang 
AuthorDate: Fri May 31 14:31:44 2019 -0400

YARN-9027.  Fixed LevelDBCacheTimelineStore initialization.
Contributed by Prabhu Joseph
---
 .../yarn/server/timeline/LevelDBCacheTimelineStore.java |  8 
 .../yarn/server/timeline/TestLevelDBCacheTimelineStore.java | 13 +
 2 files changed, 21 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
index 9b1ffdc..f84eeeb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
@@ -38,6 +38,7 @@ import org.slf4j.LoggerFactory;
 import java.io.File;
 import java.io.IOException;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
 
 /**
  * LevelDB implementation of {@link KeyValueBasedTimelineStore}. This
@@ -63,6 +64,8 @@ public class LevelDBCacheTimelineStore extends 
KeyValueBasedTimelineStore {
   private String dbId;
   private DB entityDb;
   private Configuration configuration;
+  private static final AtomicInteger DB_COUNTER = new AtomicInteger(0);
+  private static final String CACHED_LDB_FILENAME = "db";
 
   public LevelDBCacheTimelineStore(String id, String name) {
 super(name);
@@ -76,6 +79,11 @@ public class LevelDBCacheTimelineStore extends 
KeyValueBasedTimelineStore {
 this(id, LevelDBCacheTimelineStore.class.getName());
   }
 
+  public LevelDBCacheTimelineStore() {
+this(CACHED_LDB_FILENAME + String.valueOf(DB_COUNTER.getAndIncrement()),
+LevelDBCacheTimelineStore.class.getName());
+  }
+
   @Override
   protected synchronized void serviceInit(Configuration conf) throws Exception 
{
 configuration = conf;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLevelDBCacheTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLevelDBCacheTimelineStore.java
index 66da1e0..43b04a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLevelDBCacheTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLevelDBCacheTimelineStore.java
@@ -19,9 +19,11 @@
 package org.apache.hadoop.yarn.server.timeline;
 
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import static org.junit.Assert.assertNotNull;
 
 import java.io.IOException;
 
@@ -47,6 +49,17 @@ public class TestLevelDBCacheTimelineStore extends 
TimelineStoreTestUtils {
   }
 
   @Test
+  public void testDefaultConstructor() {
+TimelineStore store = null;
+try {
+  store = ReflectionUtils.newInstance(LevelDBCacheTimelineStore.class,
+  new YarnConfiguration());
+} finally {
+  assertNotNull("LevelDBCacheTimelineStore failed to instantiate", store);
+}
+  }
+
+  @Test
   public void testGetSingleEntity() throws IOException {
 super.testGetSingleEntity();
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9542. Fix LogsCLI guessAppOwner ignores custome file format suffix. Contributed by Prabhu Joseph

2019-05-29 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 413a6b6  YARN-9542.  Fix LogsCLI guessAppOwner ignores custome file 
format suffix. Contributed by Prabhu Joseph
413a6b6 is described below

commit 413a6b63bcee2141a3aaf6749a08018407d7a5d7
Author: Eric Yang 
AuthorDate: Wed May 29 18:04:13 2019 -0400

YARN-9542.  Fix LogsCLI guessAppOwner ignores custome file format suffix.
Contributed by Prabhu Joseph

(cherry picked from commit b2a39e8883f8128e44543c2279dcc1835af72652)
---
 .../apache/hadoop/yarn/client/cli/TestLogsCLI.java | 11 ++---
 .../AggregatedLogDeletionService.java  | 12 +++--
 .../yarn/logaggregation/LogAggregationUtils.java   | 49 
 .../hadoop/yarn/logaggregation/LogCLIHelpers.java  | 52 +-
 .../tfile/LogAggregationTFileController.java   |  8 ++--
 .../tfile/TFileAggregatedLogsBlock.java|  9 +++-
 .../TestAggregatedLogDeletionService.java  | 43 +-
 .../logaggregation/TestAggregatedLogsBlock.java|  2 +-
 .../logaggregation/TestContainerLogsUtils.java |  9 ++--
 .../logaggregation/TestLogAggregationService.java  |  8 ++--
 10 files changed, 109 insertions(+), 94 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index 382125a..e479cfb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -370,7 +370,7 @@ public class TestLogsCLI {
 
 Path path =
 new Path(remoteLogRootDir + ugi.getShortUserName()
-+ "/logs/application_0_0001");
++ "/logs-tfile/application_0_0001");
 if (fs.exists(path)) {
   fs.delete(path, true);
 }
@@ -931,7 +931,8 @@ public class TestLogsCLI {
 
   // create the remote app dir for app
   // but for a different user testUser"
-  Path path = new Path(remoteLogRootDir + testUser + "/logs/" + appId);
+  Path path = new Path(remoteLogRootDir + testUser + "/logs-tfile/"
+  + appId);
   if (fs.exists(path)) {
 fs.delete(path, true);
   }
@@ -997,7 +998,7 @@ public class TestLogsCLI {
   System.currentTimeMillis(), 1000);
   String priorityUser = "priority";
   Path pathWithoutPerm = new Path(remoteLogRootDir + priorityUser
-  + "/logs/" + appTest);
+  + "/logs-tfile/" + appTest);
   if (fs.exists(pathWithoutPerm)) {
 fs.delete(pathWithoutPerm, true);
   }
@@ -1355,7 +1356,7 @@ public class TestLogsCLI {
 assertNotNull(harUrl);
 Path path =
 new Path(remoteLogRootDir + ugi.getShortUserName()
-+ "/logs/application_1440536969523_0001");
++ "/logs-tfile/application_1440536969523_0001");
 if (fs.exists(path)) {
   fs.delete(path, true);
 }
@@ -1416,7 +1417,7 @@ public class TestLogsCLI {
 }
 Path path =
 new Path(remoteLogRootDir + ugi.getShortUserName()
-+ "/logs/application_0_0001");
++ "/logs-tfile/application_0_0001");
 
 if (fs.exists(path)) {
   fs.delete(path, true);
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
index 841b870..b251862 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
+import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -66,10 +68,12 @@ public class AggregatedLogDeletionService extends 
Abstra

[hadoop] branch branch-3.2 updated: YARN-9542. Fix LogsCLI guessAppOwner ignores custome file format suffix. Contributed by Prabhu Joseph

2019-05-29 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new b2a39e8  YARN-9542.  Fix LogsCLI guessAppOwner ignores custome file 
format suffix. Contributed by Prabhu Joseph
b2a39e8 is described below

commit b2a39e8883f8128e44543c2279dcc1835af72652
Author: Eric Yang 
AuthorDate: Wed May 29 18:04:13 2019 -0400

YARN-9542.  Fix LogsCLI guessAppOwner ignores custome file format suffix.
Contributed by Prabhu Joseph
---
 .../apache/hadoop/yarn/client/cli/TestLogsCLI.java | 11 ++---
 .../AggregatedLogDeletionService.java  | 12 +++--
 .../yarn/logaggregation/LogAggregationUtils.java   | 49 
 .../hadoop/yarn/logaggregation/LogCLIHelpers.java  | 52 +-
 .../tfile/LogAggregationTFileController.java   |  8 ++--
 .../tfile/TFileAggregatedLogsBlock.java|  9 +++-
 .../TestAggregatedLogDeletionService.java  | 43 +-
 .../logaggregation/TestAggregatedLogsBlock.java|  2 +-
 .../logaggregation/TestContainerLogsUtils.java |  9 ++--
 .../logaggregation/TestLogAggregationService.java  |  8 ++--
 10 files changed, 109 insertions(+), 94 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index 5366769..791f6b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -370,7 +370,7 @@ public class TestLogsCLI {
 
 Path path =
 new Path(remoteLogRootDir + ugi.getShortUserName()
-+ "/logs/application_0_0001");
++ "/logs-tfile/application_0_0001");
 if (fs.exists(path)) {
   fs.delete(path, true);
 }
@@ -931,7 +931,8 @@ public class TestLogsCLI {
 
   // create the remote app dir for app
   // but for a different user testUser"
-  Path path = new Path(remoteLogRootDir + testUser + "/logs/" + appId);
+  Path path = new Path(remoteLogRootDir + testUser + "/logs-tfile/"
+  + appId);
   if (fs.exists(path)) {
 fs.delete(path, true);
   }
@@ -997,7 +998,7 @@ public class TestLogsCLI {
   System.currentTimeMillis(), 1000);
   String priorityUser = "priority";
   Path pathWithoutPerm = new Path(remoteLogRootDir + priorityUser
-  + "/logs/" + appTest);
+  + "/logs-tfile/" + appTest);
   if (fs.exists(pathWithoutPerm)) {
 fs.delete(pathWithoutPerm, true);
   }
@@ -1355,7 +1356,7 @@ public class TestLogsCLI {
 assertNotNull(harUrl);
 Path path =
 new Path(remoteLogRootDir + ugi.getShortUserName()
-+ "/logs/application_1440536969523_0001");
++ "/logs-tfile/application_1440536969523_0001");
 if (fs.exists(path)) {
   fs.delete(path, true);
 }
@@ -1416,7 +1417,7 @@ public class TestLogsCLI {
 }
 Path path =
 new Path(remoteLogRootDir + ugi.getShortUserName()
-+ "/logs/application_0_0001");
++ "/logs-tfile/application_0_0001");
 
 if (fs.exists(path)) {
   fs.delete(path, true);
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
index 841b870..b251862 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
+import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -66,10 +68,12 @@ public class AggregatedLogDeletionService extends 
AbstractService {
 public LogDeletionTask(Configuration conf, long retentionSecs,

[hadoop] branch trunk updated: HDFS-14434. Ignore user.name query parameter in secure WebHDFS. Contributed by KWON BYUNGCHANG

2019-05-28 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d78854b  HDFS-14434.  Ignore user.name query parameter in secure 
WebHDFS.  Contributed by KWON BYUNGCHANG
d78854b is described below

commit d78854b928bb877f26b11b5b212a100a79941f35
Author: Eric Yang 
AuthorDate: Tue May 28 17:31:35 2019 -0400

HDFS-14434.  Ignore user.name query parameter in secure WebHDFS.
 Contributed by KWON BYUNGCHANG
---
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  16 +-
 .../hadoop/hdfs/server/common/JspHelper.java   |   8 +-
 .../hadoop/hdfs/server/common/TestJspHelper.java   |  88 +
 .../apache/hadoop/hdfs/web/TestWebHdfsTokens.java  | 217 +
 .../org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java |  47 +++--
 5 files changed, 236 insertions(+), 140 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index fe30a9a..e8049e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -170,6 +170,7 @@ public class WebHdfsFileSystem extends FileSystem
   private InetSocketAddress nnAddrs[];
   private int currentNNAddrIndex;
   private boolean disallowFallbackToInsecureCluster;
+  private boolean isInsecureCluster;
   private String restCsrfCustomHeader;
   private Set restCsrfMethodsToIgnore;
 
@@ -282,6 +283,7 @@ public class WebHdfsFileSystem extends FileSystem
 
 this.workingDir = makeQualified(new Path(getHomeDirectoryString(ugi)));
 this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
+this.isInsecureCluster = !this.canRefreshDelegationToken;
 this.disallowFallbackToInsecureCluster = !conf.getBoolean(
 CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
 
CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
@@ -367,6 +369,7 @@ public class WebHdfsFileSystem extends FileSystem
 LOG.debug("Fetched new token: {}", token);
   } else { // security is disabled
 canRefreshDelegationToken = false;
+isInsecureCluster = true;
   }
 }
   }
@@ -413,8 +416,7 @@ public class WebHdfsFileSystem extends FileSystem
 if (cachedHomeDirectory == null) {
   final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY;
   try {
-String pathFromDelegatedFS = new FsPathResponseRunner(op, null,
-new UserParam(ugi)) {
+String pathFromDelegatedFS = new FsPathResponseRunner(op, 
null){
   @Override
   String decodeResponse(Map json) throws IOException {
 return JsonUtilClient.getPath(json);
@@ -576,7 +578,8 @@ public class WebHdfsFileSystem extends FileSystem
 return url;
   }
 
-  Param[] getAuthParameters(final HttpOpParam.Op op) throws IOException {
+  private synchronized Param[] getAuthParameters(final HttpOpParam.Op op)
+  throws IOException {
 List> authParams = Lists.newArrayList();
 // Skip adding delegation token for token operations because these
 // operations require authentication.
@@ -593,7 +596,12 @@ public class WebHdfsFileSystem extends FileSystem
 authParams.add(new DoAsParam(userUgi.getShortUserName()));
 userUgi = realUgi;
   }
-  authParams.add(new UserParam(userUgi.getShortUserName()));
+  UserParam userParam = new UserParam((userUgi.getShortUserName()));
+
+  //in insecure, use user.name parameter, in secure, use spnego auth
+  if(isInsecureCluster) {
+authParams.add(userParam);
+  }
 }
 return authParams.toArray(new Param[0]);
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
index eb488e8..2c65c3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
@@ -118,12 +118,9 @@ public class JspHelper {
   remoteUser = request.getRemoteUser();
   final String tokenString = 
request.getParameter(DELEGATION_PARAMETER_NAME);
   if (tokenString != null) {
-// Token-based connections need only verify the effective user, and
-// disallow proxying to different user.  Proxy authorization checks
-// are not required since the checks apply to is

[hadoop] branch trunk updated: YARN-9558. Fixed LogAggregation test cases. Contributed by Prabhu Joseph

2019-05-23 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 460ba7f  YARN-9558.  Fixed LogAggregation test cases. 
Contributed by Prabhu Joseph
460ba7f is described below

commit 460ba7fb14114f44e14a660f533f32c54e504478
Author: Eric Yang 
AuthorDate: Thu May 23 18:38:47 2019 -0400

YARN-9558.  Fixed LogAggregation test cases.
Contributed by Prabhu Joseph
---
 .../apache/hadoop/yarn/client/cli/TestLogsCLI.java |  95 -
 .../AggregatedLogDeletionService.java  |  10 +-
 .../yarn/logaggregation/LogAggregationUtils.java   |  49 +--
 .../hadoop/yarn/logaggregation/LogCLIHelpers.java  | 156 -
 .../tfile/LogAggregationTFileController.java   |   8 +-
 .../tfile/TFileAggregatedLogsBlock.java|   9 +-
 .../TestAggregatedLogDeletionService.java  |  19 ++-
 .../logaggregation/TestAggregatedLogsBlock.java|   2 +-
 .../logaggregation/TestContainerLogsUtils.java |  12 +-
 .../logaggregation/TestLogAggregationService.java  |  14 +-
 10 files changed, 235 insertions(+), 139 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index 801cf40..7a229dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.yarn.client.cli;
 
+import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_FILE_CONTROLLER_FMT;
+import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_REMOTE_APP_LOG_DIR_FMT;
+import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_REMOTE_APP_LOG_DIR_SUFFIX_FMT;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -82,6 +86,7 @@ import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers;
 import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
 import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext;
 import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
+import 
org.apache.hadoop.yarn.logaggregation.filecontroller.ifile.LogAggregationIndexedFileController;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Assert;
@@ -407,7 +412,7 @@ public class TestLogsCLI {
 
 Path path =
 new Path(remoteLogRootDir + ugi.getShortUserName()
-+ "/bucket_logs/0001/application_0_0001");
++ "/bucket-logs-tfile/0001/application_0_0001");
 if (fs.exists(path)) {
   fs.delete(path, true);
 }
@@ -966,8 +971,8 @@ public class TestLogsCLI {
   createContainerLogInLocalDir(appLogsDir, containerId, fs, logTypes);
 
   // create the remote app dir for app but for a different user testUser
-  Path path = new Path(remoteLogRootDir + testUser + "/bucket_logs/0001/"
-  + appId);
+  Path path = new Path(remoteLogRootDir + testUser +
+  "/bucket-logs-tfile/0001/" + appId);
   if (fs.exists(path)) {
 fs.delete(path, true);
   }
@@ -1049,7 +1054,7 @@ public class TestLogsCLI {
   System.currentTimeMillis(), 1000);
   String priorityUser = "priority";
   Path pathWithoutPerm = new Path(remoteLogRootDir + priorityUser
-  + "/bucket_logs/1000/" + appTest);
+  + "/bucket-logs-tfile/1000/" + appTest);
   if (fs.exists(pathWithoutPerm)) {
 fs.delete(pathWithoutPerm, true);
   }
@@ -1139,6 +1144,84 @@ public class TestLogsCLI {
 }
   }
 
+  @Test (timeout = 5000)
+  public void testGuessAppOwnerWithCustomSuffix() throws Exception {
+String remoteLogRootDir = "target/logs/";
+String jobUser = "user1";
+String loggedUser = "user2";
+Configuration conf = new YarnConfiguration();
+conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
+conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir);
+conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
+conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
+String controllerName = "indexed";
+conf.set(YarnConfiguration.LOG_AGGREGATION_

[hadoop] branch trunk updated: YARN-9080. Added clean up of bucket directories. Contributed by Prabhu Joseph, Peter Bacsko, Szilard Nemeth

2019-05-23 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7b03072  YARN-9080.  Added clean up of bucket directories. 
Contributed by Prabhu Joseph, Peter Bacsko, Szilard Nemeth
7b03072 is described below

commit 7b03072fd466de5817fdcd65f9dd88fd59c0bb00
Author: Eric Yang 
AuthorDate: Thu May 23 12:08:44 2019 -0400

YARN-9080.  Added clean up of bucket directories.
Contributed by Prabhu Joseph, Peter Bacsko, Szilard Nemeth
---
 .../timeline/EntityGroupFSTimelineStore.java   | 65 +-
 .../timeline/TestEntityGroupFSTimelineStore.java   | 53 +-
 2 files changed, 100 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java
index 80baf89..498230a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java
@@ -24,6 +24,8 @@ import com.fasterxml.jackson.databind.type.TypeFactory;
 import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.lang3.mutable.MutableBoolean;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -456,43 +458,76 @@ public class EntityGroupFSTimelineStore extends 
CompositeService
*dirpath should be a directory that contains a set of
*application log directories. The cleaner method will not
*work if the given dirpath itself is an application log dir.
-   * @param fs
* @param retainMillis
* @throws IOException
*/
   @InterfaceAudience.Private
   @VisibleForTesting
-  void cleanLogs(Path dirpath, FileSystem fs, long retainMillis)
+  void cleanLogs(Path dirpath, long retainMillis)
   throws IOException {
 long now = Time.now();
+RemoteIterator iter = list(dirpath);
+while (iter.hasNext()) {
+  FileStatus stat = iter.next();
+  Path clusterTimeStampPath = stat.getPath();
+  if (isValidClusterTimeStampDir(clusterTimeStampPath)) {
+MutableBoolean appLogDirPresent = new MutableBoolean(false);
+cleanAppLogDir(clusterTimeStampPath, retainMillis, appLogDirPresent);
+if (appLogDirPresent.isFalse() &&
+(now - stat.getModificationTime() > retainMillis)) {
+  deleteDir(clusterTimeStampPath);
+}
+  }
+}
+  }
+
+
+  private void cleanAppLogDir(Path dirpath, long retainMillis,
+  MutableBoolean appLogDirPresent) throws IOException {
+long now = Time.now();
 // Depth first search from root directory for all application log dirs
 RemoteIterator iter = list(dirpath);
 while (iter.hasNext()) {
   FileStatus stat = iter.next();
+  Path childPath = stat.getPath();
   if (stat.isDirectory()) {
 // If current is an application log dir, decide if we need to remove it
 // and remove if necessary.
 // Otherwise, keep iterating into it.
-ApplicationId appId = parseApplicationId(dirpath.getName());
+ApplicationId appId = parseApplicationId(childPath.getName());
 if (appId != null) { // Application log dir
-  if (shouldCleanAppLogDir(dirpath, now, fs, retainMillis)) {
-try {
-  LOG.info("Deleting {}", dirpath);
-  if (!fs.delete(dirpath, true)) {
-LOG.error("Unable to remove " + dirpath);
-  }
-  metrics.incrLogsDirsCleaned();
-} catch (IOException e) {
-  LOG.error("Unable to remove " + dirpath, e);
-}
+  appLogDirPresent.setTrue();
+  if (shouldCleanAppLogDir(childPath, now, fs, retainMillis)) {
+deleteDir(childPath);
   }
 } else { // Keep cleaning inside
-  cleanLogs(stat.getPath(), fs, retainMillis);
+  cleanAppLogDir(childPath, retainMillis, appLogDirPresent);
 }
   }
 }
   }
 
+  private void deleteDir(Path path) {
+try {
+  LOG.info(&

[hadoop] branch trunk updated: HADOOP-16287. Implement ProxyUserAuthenticationFilter for web protocol impersonation. Contributed by Prabhu Joseph

2019-05-23 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ea0b1d8  HADOOP-16287.  Implement ProxyUserAuthenticationFilter for 
web protocol impersonation.Contributed by Prabhu Joseph
ea0b1d8 is described below

commit ea0b1d8fba57f56e2a75e9a70d4768ba75952823
Author: Eric Yang 
AuthorDate: Thu May 23 11:36:32 2019 -0400

HADOOP-16287.  Implement ProxyUserAuthenticationFilter for web protocol 
impersonation.
   Contributed by Prabhu Joseph
---
 hadoop-common-project/hadoop-common/pom.xml|  10 ++
 .../server/ProxyUserAuthenticationFilter.java  | 115 +++
 .../ProxyUserAuthenticationFilterInitializer.java  |  60 ++
 .../authentication/server/package-info.java|  22 
 .../src/site/markdown/HttpAuthentication.md|   8 ++
 .../server/TestProxyUserAuthenticationFilter.java  | 125 +
 6 files changed, 340 insertions(+)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 73b4c18..54efeeb 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -161,6 +161,16 @@
   test
 
 
+  org.assertj
+  assertj-core
+  test
+
+
+  org.glassfish.grizzly
+  grizzly-http-servlet
+  test
+
+
   commons-beanutils
   commons-beanutils
   compile
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java
new file mode 100644
index 000..42902b3
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/server/ProxyUserAuthenticationFilter.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.HttpExceptionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.security.Principal;
+import java.util.Enumeration;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
+import javax.servlet.http.HttpServletResponse;
+
+/**
+ * AuthenticationFilter which adds support to perform operations
+ * using end user instead of proxy user. Fetches the end user from
+ * doAs Query Parameter.
+ */
+public class ProxyUserAuthenticationFilter extends AuthenticationFilter {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+  ProxyUserAuthenticationFilter.class);
+
+  private static final String DO_AS = "doAs";
+  public static final String PROXYUSER_PREFIX = "proxyuser";
+
+  @Override
+  public void init(FilterConfig filterConfig) throws ServletException {
+Configuration conf = getProxyuserConfiguration(filterConfig);
+ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX);
+super.init(filterConfig);
+  }
+
+  @Override
+  protected void doFilter(FilterChain filterChain, HttpServletRequest request,
+  HttpServletResponse response) throws IOException, ServletException {
+
+String doAsUser = request.getParameter(DO_AS);
+if (doAsUser != null && !doAsUser.equals(request.getRemoteUser())) {
+  LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ",
+  doAsUser, request.getRemoteUser(), request.getRemoteAddr());
+  UserGroupInformation requestUgi = (request.getUserPrincipal() != null) ?
+  UserGroupInformation.createRemoteUser(request.getRemoteUser())
+  : null;
+  if (requestUgi != null) {
+requestUgi = UserGroupInformation.createProxyUser(doAsUser,
+r

[hadoop] branch trunk updated: YARN-9554. Fixed TimelineEntity DAO serialization handling. Contributed by Prabhu Joseph

2019-05-16 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fab5b80  YARN-9554.  Fixed TimelineEntity DAO serialization handling.  
   Contributed by Prabhu Joseph
fab5b80 is described below

commit fab5b80a36bad90e03f7e5e37ded47d67d6e2e81
Author: Eric Yang 
AuthorDate: Thu May 16 16:35:54 2019 -0400

YARN-9554.  Fixed TimelineEntity DAO serialization handling.
Contributed by Prabhu Joseph
---
 .../webapp/ContextFactory.java | 91 +++---
 .../webapp/TestAHSWebServices.java | 11 ---
 .../timeline/webapp/TestTimelineWebServices.java   | 21 +
 3 files changed, 102 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
index 67668a9..ff52324 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -18,34 +19,104 @@
 
 package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
+import org.apache.hadoop.yarn.webapp.RemoteExceptionData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Set;
+import java.util.HashSet;
+import java.util.Arrays;
 import java.util.Map;
 import java.lang.reflect.Method;
 import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
 
 /**
  * ContextFactory to reuse JAXBContextImpl for DAO Classes.
  */
 public final class ContextFactory {
 
-  private static JAXBContext jaxbContext;
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ContextFactory.class);
+
+  private static JAXBContext cacheContext;
+
+  // All the dao classes from TimelineWebService and AHSWebService
+  // added except TimelineEntity and TimelineEntities
+  private static final Class[] CTYPES = {AppInfo.class, AppsInfo.class,
+  AppAttemptInfo.class, AppAttemptsInfo.class, ContainerInfo.class,
+  ContainersInfo.class, RemoteExceptionData.class, TimelineDomain.class,
+  TimelineDomains.class, TimelineEvents.class, TimelinePutResponse.class};
+  private static final Set CLASS_SET =
+  new HashSet<>(Arrays.asList(CTYPES));
+
+  // TimelineEntity has java.util.Set interface which JAXB
+  // can't handle and throws IllegalAnnotationExceptions
+  private static final Class[] IGNORE_TYPES = {TimelineEntity.class,
+  TimelineEntities.class};
+  private static final Set IGNORE_SET =
+  new HashSet<>(Arrays.asList(IGNORE_TYPES));
+
+  private static JAXBException je =
+  new JAXBException("TimelineEntity and TimelineEntities has " +
+  "IllegalAnnotation");
+
+  private static StackTraceElement[] stackTrace = new StackTraceElement[]{
+  new StackTraceElement(ContextFactory.class.getName(),
+  "createContext", "ContextFactory.java", -1)};
 
   private ContextFactory() {
   }
 
+  public static JAXBContext newContext(Class[] classes,
+  Map properties) throws Exception {
+Class spFactory = Class.forName(
+"com.sun.xml.internal.bind.v2.ContextFactory");
+Method m = spFactory.getMethod("createContext", Class[].class, Map.class);
+return (JAXBContext) m.invoke((Object) null, classes, properties);
+  }
+
   // Cal

[hadoop] branch branch-3.1 updated: YARN-8622. Fixed container-executor compilation on MacOSX. Contributed by Siyao Meng

2019-05-09 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new bf013aa  YARN-8622.  Fixed container-executor compilation on MacOSX.   
  Contributed by Siyao Meng
bf013aa is described below

commit bf013aa06ee3d145c7dd74b9c4cfcd3012532491
Author: Eric Yang 
AuthorDate: Thu Apr 18 18:57:14 2019 -0400

YARN-8622.  Fixed container-executor compilation on MacOSX.
Contributed by Siyao Meng

(cherry picked from commit ef97a20831677c055aa6bff6ad0649cbb3a56a86)
---
 .../src/main/native/container-executor/impl/utils/docker-util.c   | 8 
 1 file changed, 8 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 388045f..34f2052 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -1380,14 +1380,22 @@ static int check_privileges(const char *user) {
 exit(INITIALIZE_USER_FAILED);
   }
 
+#ifdef __linux__
   int rc = getgrouplist(user, pw->pw_gid, groups, );
+#else
+  int rc = getgrouplist(user, pw->pw_gid, (int *)groups, );
+#endif
   if (rc < 0) {
 groups = (gid_t *) alloc_and_clear_memory(ngroups, sizeof(gid_t));
 if (groups == NULL) {
   fprintf(ERRORFILE, "Failed to allocate buffer for group lookup for user 
%s.\n", user);
   exit(OUT_OF_MEMORY);
 }
+#ifdef __linux__
 if (getgrouplist(user, pw->pw_gid, groups, ) == -1) {
+#else
+if (getgrouplist(user, pw->pw_gid, (int *)groups, ) == -1) {
+#endif
   fprintf(ERRORFILE, "Fail to lookup groups for user %s.\n", user);
   ret = 2;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9535. Fixed spell check for container-executor in Docker documentation. Contributed by Charan Hebri

2019-05-08 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0c5fa2e  YARN-9535.  Fixed spell check for container-executor in 
Docker documentation. Contributed by Charan Hebri
0c5fa2e is described below

commit 0c5fa2e7d9de87acdeb66945f7103e2f282498c8
Author: Eric Yang 
AuthorDate: Wed May 8 16:56:26 2019 -0400

YARN-9535.  Fixed spell check for container-executor in Docker 
documentation.
Contributed by Charan Hebri
---
 .../hadoop-yarn-site/src/site/markdown/DockerContainers.md| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index 64988c1..b5c54be 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -253,7 +253,7 @@ The following properties should be set in yarn-site.xml:
 
 ```
 
-In addition, a container-executer.cfg file must exist and contain settings for
+In addition, a container-executor.cfg file must exist and contain settings for
 the container executor. The file must be owned by root with permissions 0400.
 The format of the file is the standard Java properties file format, for example
 
@@ -365,7 +365,7 @@ Application Submission
 Before attempting to launch a Docker container, make sure that the LCE
 configuration is working for applications requesting regular YARN containers.
 If after enabling the LCE one or more NodeManagers fail to start, the cause is
-most likely that the ownership and/or permissions on the container-executer
+most likely that the ownership and/or permissions on the container-executor
 binary are incorrect. Check the logs to confirm.
 
 In order to run an application in a Docker container, set the following


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9524. Fixed TestAHSWebService and TestLogsCLI unit tests. Contributed by Prabhu Joseph

2019-05-06 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 49e1292  YARN-9524.  Fixed TestAHSWebService and TestLogsCLI unit 
tests. Contributed by Prabhu Joseph
49e1292 is described below

commit 49e1292ea3e4d00ab0b0191bd8c4ea4d2afed671
Author: Eric Yang 
AuthorDate: Mon May 6 19:48:45 2019 -0400

YARN-9524.  Fixed TestAHSWebService and TestLogsCLI unit tests.
Contributed by Prabhu Joseph
---
 .../apache/hadoop/yarn/client/cli/TestLogsCLI.java | 27 +---
 .../yarn/logaggregation/LogAggregationUtils.java   | 48 --
 .../hadoop/yarn/logaggregation/LogCLIHelpers.java  | 34 ++-
 .../LogAggregationFileController.java  |  2 +-
 .../ifile/LogAggregationIndexedFileController.java |  2 +-
 5 files changed, 92 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index f9061eb..801cf40 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -407,7 +407,7 @@ public class TestLogsCLI {
 
 Path path =
 new Path(remoteLogRootDir + ugi.getShortUserName()
-+ "/logs/application_0_0001");
++ "/bucket_logs/0001/application_0_0001");
 if (fs.exists(path)) {
   fs.delete(path, true);
 }
@@ -925,7 +925,6 @@ public class TestLogsCLI {
   public void testFetchApplictionLogsAsAnotherUser() throws Exception {
 String remoteLogRootDir = "target/logs/";
 String rootLogDir = "target/LocalLogs";
-
 String testUser = "test";
 UserGroupInformation testUgi = UserGroupInformation
 .createRemoteUser(testUser);
@@ -966,9 +965,9 @@ public class TestLogsCLI {
   // create container logs in localLogDir for app
   createContainerLogInLocalDir(appLogsDir, containerId, fs, logTypes);
 
-  // create the remote app dir for app
-  // but for a different user testUser"
-  Path path = new Path(remoteLogRootDir + testUser + "/logs/" + appId);
+  // create the remote app dir for app but for a different user testUser
+  Path path = new Path(remoteLogRootDir + testUser + "/bucket_logs/0001/"
+  + appId);
   if (fs.exists(path)) {
 fs.delete(path, true);
   }
@@ -1016,6 +1015,22 @@ public class TestLogsCLI {
   logMessage(containerId, "syslog")));
   sysOutStream.reset();
 
+  // Verify appOwner guessed correctly with older log dir dtructure
+  path = new Path(remoteLogRootDir + testUser + "/logs/" + appId);
+  if (fs.exists(path)) {
+fs.delete(path, true);
+  }
+  assertTrue(fs.mkdirs(path));
+  uploadContainerLogIntoRemoteDir(testUgi, configuration, rootLogDirs,
+  nodeId, containerId, path, fs);
+
+  exitCode = cli.run(new String[] {
+  "-applicationId", appId.toString()});
+  assertTrue(exitCode == 0);
+  assertTrue(sysOutStream.toString().contains(
+  logMessage(containerId, "syslog")));
+  sysOutStream.reset();
+
   // Verify that we could get the err message "Can not find the appOwner"
   // if we do not specify the appOwner, can not get appReport, and
   // the app does not exist in remote dir.
@@ -1034,7 +1049,7 @@ public class TestLogsCLI {
   System.currentTimeMillis(), 1000);
   String priorityUser = "priority";
   Path pathWithoutPerm = new Path(remoteLogRootDir + priorityUser
-  + "/logs/" + appTest);
+  + "/bucket_logs/1000/" + appTest);
   if (fs.exists(pathWithoutPerm)) {
 fs.delete(pathWithoutPerm, true);
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
index 3f5151b..deff2aa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
@@ -83,6 +83,30 @@ public class LogAggregationUtils {
  suffix), appId.toString());
   }
 
+  public static Path getOlderRemoteAppLogDir(Configuration conf,
+ 

[hadoop] branch trunk updated: YARN-6929. Improved partition algorithm for yarn remote-app-log-dir. Contributed by Prabhu Joseph

2019-04-30 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new accb811  YARN-6929.  Improved partition algorithm for yarn 
remote-app-log-dir. Contributed by Prabhu Joseph
accb811 is described below

commit accb811e5727f2a780a41cd5e50bab47a0cccb68
Author: Eric Yang 
AuthorDate: Tue Apr 30 17:04:59 2019 -0400

YARN-6929.  Improved partition algorithm for yarn remote-app-log-dir.
Contributed by Prabhu Joseph
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java |  11 +-
 .../AggregatedLogDeletionService.java  |  16 +-
 .../yarn/logaggregation/LogAggregationUtils.java   | 184 +++--
 .../LogAggregationFileController.java  |  49 +++---
 .../LogAggregationFileControllerFactory.java   |  37 +++--
 .../ifile/LogAggregationIndexedFileController.java |   7 +
 .../src/main/resources/yarn-default.xml|   8 +
 .../TestAggregatedLogDeletionService.java  | 163 +++---
 .../logaggregation/TestContainerLogsUtils.java |   5 +-
 .../logaggregation/TestLogAggregationService.java  |  40 -
 10 files changed, 398 insertions(+), 122 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index b21d763..273f1a9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1434,13 +1434,20 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_NM_REMOTE_APP_LOG_DIR = "/tmp/logs";
 
   /**
-   * The remote log dir will be created at
-   * NM_REMOTE_APP_LOG_DIR/${user}/NM_REMOTE_APP_LOG_DIR_SUFFIX/${appId}
+   * The remote log dir will be created at below location.
+   * NM_REMOTE_APP_LOG_DIR/${user}/bucket_{NM_REMOTE_APP_LOG_DIR_SUFFIX}
+   * /${bucketDir}/${appId}
*/
   public static final String NM_REMOTE_APP_LOG_DIR_SUFFIX = 
 NM_PREFIX + "remote-app-log-dir-suffix";
   public static final String DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX="logs";
 
+  /** Specifies whether Older Application Log Directory is included. */
+  public static final String NM_REMOTE_APP_LOG_DIR_INCLUDE_OLDER =
+  NM_PREFIX + "remote-app-log-dir-include-older";
+  public static final boolean DEFAULT_NM_REMOTE_APP_LOG_DIR_INCLUDE_OLDER =
+  true;
+
   public static final String YARN_LOG_SERVER_URL =
 YARN_PREFIX + "log.server.url";
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
index 90395aa..09a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
@@ -67,7 +67,7 @@ public class AggregatedLogDeletionService extends 
AbstractService {
 public LogDeletionTask(Configuration conf, long retentionSecs, 
ApplicationClientProtocol rmClient) {
   this.conf = conf;
   this.retentionMillis = retentionSecs * 1000;
-  this.suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf);
+  this.suffix = LogAggregationUtils.getBucketSuffix();
   this.remoteRootLogDir =
 new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
 YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
@@ -82,8 +82,18 @@ public class AggregatedLogDeletionService extends 
AbstractService {
 FileSystem fs = remoteRootLogDir.getFileSystem(conf);
 for(FileStatus userDir : fs.listStatus(remoteRootLogDir)) {
   if(userDir.isDirectory()) {
-Path userDirPath = new Path(userDir.getPath(), suffix);
-deleteOldLogDirsFrom(userDirPath, cutoffMillis, fs, rmClient);
+for (FileStatus suffixDir : fs.listStatus(userDir.getPath())) {
+  Path suffixDirPath = suffixDir.getPath();
+  if (suffixDir.isDirectory() && suffixDirPath.getName().
+  startsWith(suffix)) {
+for (FileStatus bucketDir : fs.listStatus(suffixDirPath)) {
+  if (bucketDir.isDirectory()) {
+deleteOldLogDirsFrom(bucketDir.getP

[hadoop] branch trunk updated: YARN-8551. Project setup for MaWo application. Contributed by Yesha Vora

2019-04-22 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a54c1e3  YARN-8551. Project setup for MaWo application.
Contributed by Yesha Vora
a54c1e3 is described below

commit a54c1e3ace5a1eea2ada55ae3990fcfde0dcc4b1
Author: Eric Yang 
AuthorDate: Mon Apr 22 16:56:26 2019 -0400

YARN-8551. Project setup for MaWo application.
   Contributed by Yesha Vora
---
 hadoop-project/pom.xml |   5 +
 .../dev-support/findbugs-exclude.xml   |  17 +
 .../dev-support/findbugs-exclude.xml   |  17 +
 .../hadoop-yarn-applications-mawo-core/pom.xml | 173 +++
 .../src/assembly/bin.xml   |  41 ++
 .../mawo/server/common/AbstractTask.java   | 246 ++
 .../mawo/server/common/CompositeTask.java  |  42 ++
 .../applications/mawo/server/common/DieTask.java   |  32 ++
 .../mawo/server/common/MawoConfiguration.java  | 502 +
 .../applications/mawo/server/common/NullTask.java  |  33 ++
 .../mawo/server/common/SimpleTask.java |  57 +++
 .../applications/mawo/server/common/Task.java  |  83 
 .../applications/mawo/server/common/TaskId.java| 149 ++
 .../mawo/server/common/TaskStatus.java | 347 ++
 .../applications/mawo/server/common/TaskType.java  |  45 ++
 .../mawo/server/common/TeardownTask.java   |  48 ++
 .../mawo/server/common/WorkAssignmentProtocol.java |  65 +++
 .../mawo/server/common/package-info.java   |  20 +
 .../applications/mawo/server/master/job/JobId.java | 128 ++
 .../mawo/server/master/job/package-info.java   |  20 +
 .../applications/mawo/server/worker/WorkerId.java  | 162 +++
 .../mawo/server/worker/package-info.java   |  20 +
 .../src/main/resources/mawo-default.properties |  41 ++
 .../mawo/server/common/TestMaWoConfiguration.java  |  60 +++
 .../src/test/resources/mawo.properties |  28 ++
 .../hadoop-yarn-applications-mawo/pom.xml  |  37 ++
 .../hadoop-yarn/hadoop-yarn-applications/pom.xml   |   1 +
 .../src/site/markdown/yarn-service/MasterWorker.md |  36 ++
 28 files changed, 2455 insertions(+)

diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 1dc0baa..865e03d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1101,6 +1101,11 @@
 2.23.4
   
   
+org.mockito
+mockito-all
+1.8.5
+  
+  
 org.objenesis
 objenesis
 1.0
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/dev-support/findbugs-exclude.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/dev-support/findbugs-exclude.xml
new file mode 100644
index 000..e4e59d9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/dev-support/findbugs-exclude.xml
@@ -0,0 +1,17 @@
+
+
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/dev-support/findbugs-exclude.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/dev-support/findbugs-exclude.xml
new file mode 100644
index 000..e4e59d9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/dev-support/findbugs-exclude.xml
@@ -0,0 +1,17 @@
+
+
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml
new file mode 100644
index 000..02e0fdc
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml
@@ -0,0 +1,173 @@
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+
+hadoop-applications-mawo
+org.apache.hadoop.applications.mawo
+3.3.0-SNAPSHOT
+
+  4.0.0
+
+  
+  hadoop-applications-mawo-core
+  jar
+
+  Apache Hadoop YARN Application MaWo Core
+  http://maven.apache.org
+
+  
+UTF-8
+  
+
+  
+
+  junit
+  junit
+  test
+
+
+
+  org.apache.hadoop
+  hadoop-common
+
+
+  
+  org.apache.hadoop
+  hadoop-common
+  test-jar
+  test
+  
+
+
+  com.google.inject

[hadoop] branch branch-3.1 updated: YARN-8587. Added retries for fetching docker exit code. Contributed by Charo Zhang

2019-04-19 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 8b228a4  YARN-8587. Added retries for fetching docker exit code.   
 Contributed by Charo Zhang
8b228a4 is described below

commit 8b228a42e979fefe8ed7e16a0debe161ae7892a4
Author: Eric Yang 
AuthorDate: Wed Oct 24 17:28:23 2018 -0400

YARN-8587. Added retries for fetching docker exit code.
   Contributed by Charo Zhang

(cherry picked from commit c16c49b8c3b8e2e42c00e79a50e7ae029ebe98e2)
---
 .../container-executor/impl/container-executor.c   | 32 --
 1 file changed, 17 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 7765308..7e86e88 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1765,20 +1765,22 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
   docker_binary, container_id);
   fprintf(LOGFILE, "Obtaining the exit code...\n");
   fprintf(LOGFILE, "Docker inspect command: %s\n", 
docker_inspect_exitcode_command);
-  FILE* inspect_exitcode_docker = popen(docker_inspect_exitcode_command, "r");
-  if(inspect_exitcode_docker == NULL) {
-fprintf(ERRORFILE, "Done with inspect_exitcode, inspect_exitcode_docker is 
null\n");
-fflush(ERRORFILE);
-exit_code = -1;
-goto cleanup;
-  }
-  res = fscanf (inspect_exitcode_docker, "%d", _code);
-  if (pclose (inspect_exitcode_docker) != 0 || res <= 0) {
-  fprintf (ERRORFILE,
-   "Could not inspect docker to get exitcode:  %s.\n", 
docker_inspect_exitcode_command);
-fflush(ERRORFILE);
-exit_code = -1;
-goto cleanup;
+  int count = 0;
+  int max_retries = get_max_retries();
+  while (count < max_retries) {
+FILE* inspect_exitcode_docker = popen(docker_inspect_exitcode_command, 
"r");
+res = fscanf (inspect_exitcode_docker, "%d", _code);
+if (pclose (inspect_exitcode_docker) != 0 || res <= 0) {
+  fprintf (ERRORFILE, "Could not inspect docker to get Exit code %s.\n", 
docker_inspect_exitcode_command);
+  fflush(ERRORFILE);
+  exit_code = -1;
+} else {
+  if (exit_code != 0) {
+break;
+  }
+}
+sleep(3);
+count++;
   }
   fprintf(LOGFILE, "Exit code from docker inspect: %d\n", exit_code);
 
@@ -2708,4 +2710,4 @@ int remove_docker_container(char**argv, int argc) {
 exit_code = clean_docker_cgroups(yarn_hierarchy, container_id);
   }
   return exit_code;
-}
\ No newline at end of file
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-8587. Added retries for fetching docker exit code. Contributed by Charo Zhang

2019-04-19 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new ac85aa8  YARN-8587. Added retries for fetching docker exit code.   
 Contributed by Charo Zhang
ac85aa8 is described below

commit ac85aa80d973d56452bc678637b5868ab43bee4f
Author: Eric Yang 
AuthorDate: Wed Oct 24 17:28:23 2018 -0400

YARN-8587. Added retries for fetching docker exit code.
   Contributed by Charo Zhang

(cherry picked from commit c16c49b8c3b8e2e42c00e79a50e7ae029ebe98e2)
---
 .../container-executor/impl/container-executor.c   | 32 --
 1 file changed, 17 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 7765308..7e86e88 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1765,20 +1765,22 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
   docker_binary, container_id);
   fprintf(LOGFILE, "Obtaining the exit code...\n");
   fprintf(LOGFILE, "Docker inspect command: %s\n", 
docker_inspect_exitcode_command);
-  FILE* inspect_exitcode_docker = popen(docker_inspect_exitcode_command, "r");
-  if(inspect_exitcode_docker == NULL) {
-fprintf(ERRORFILE, "Done with inspect_exitcode, inspect_exitcode_docker is 
null\n");
-fflush(ERRORFILE);
-exit_code = -1;
-goto cleanup;
-  }
-  res = fscanf (inspect_exitcode_docker, "%d", _code);
-  if (pclose (inspect_exitcode_docker) != 0 || res <= 0) {
-  fprintf (ERRORFILE,
-   "Could not inspect docker to get exitcode:  %s.\n", 
docker_inspect_exitcode_command);
-fflush(ERRORFILE);
-exit_code = -1;
-goto cleanup;
+  int count = 0;
+  int max_retries = get_max_retries();
+  while (count < max_retries) {
+FILE* inspect_exitcode_docker = popen(docker_inspect_exitcode_command, 
"r");
+res = fscanf (inspect_exitcode_docker, "%d", _code);
+if (pclose (inspect_exitcode_docker) != 0 || res <= 0) {
+  fprintf (ERRORFILE, "Could not inspect docker to get Exit code %s.\n", 
docker_inspect_exitcode_command);
+  fflush(ERRORFILE);
+  exit_code = -1;
+} else {
+  if (exit_code != 0) {
+break;
+  }
+}
+sleep(3);
+count++;
   }
   fprintf(LOGFILE, "Exit code from docker inspect: %d\n", exit_code);
 
@@ -2708,4 +2710,4 @@ int remove_docker_container(char**argv, int argc) {
 exit_code = clean_docker_cgroups(yarn_hierarchy, container_id);
   }
   return exit_code;
-}
\ No newline at end of file
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-8622. Fixed container-executor compilation on MacOSX. Contributed by Siyao Meng

2019-04-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4a64dab  YARN-8622.  Fixed container-executor compilation on MacOSX.   
  Contributed by Siyao Meng
4a64dab is described below

commit 4a64dab0ddb07c0724f9e805bd73fe3cff6db809
Author: Eric Yang 
AuthorDate: Thu Apr 18 18:57:14 2019 -0400

YARN-8622.  Fixed container-executor compilation on MacOSX.
Contributed by Siyao Meng

(cherry picked from commit ef97a20831677c055aa6bff6ad0649cbb3a56a86)
---
 .../src/main/native/container-executor/impl/utils/docker-util.c   | 8 
 1 file changed, 8 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 1786062..44adfc2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -1381,14 +1381,22 @@ static int check_privileges(const char *user) {
 exit(INITIALIZE_USER_FAILED);
   }
 
+#ifdef __linux__
   int rc = getgrouplist(user, pw->pw_gid, groups, );
+#else
+  int rc = getgrouplist(user, pw->pw_gid, (int *)groups, );
+#endif
   if (rc < 0) {
 groups = (gid_t *) alloc_and_clear_memory(ngroups, sizeof(gid_t));
 if (groups == NULL) {
   fprintf(ERRORFILE, "Failed to allocate buffer for group lookup for user 
%s.\n", user);
   exit(OUT_OF_MEMORY);
 }
+#ifdef __linux__
 if (getgrouplist(user, pw->pw_gid, groups, ) == -1) {
+#else
+if (getgrouplist(user, pw->pw_gid, (int *)groups, ) == -1) {
+#endif
   fprintf(ERRORFILE, "Fail to lookup groups for user %s.\n", user);
   ret = 2;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-8622. Fixed container-executor compilation on MacOSX. Contributed by Siyao Meng

2019-04-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ef97a20  YARN-8622.  Fixed container-executor compilation on MacOSX.   
  Contributed by Siyao Meng
ef97a20 is described below

commit ef97a20831677c055aa6bff6ad0649cbb3a56a86
Author: Eric Yang 
AuthorDate: Thu Apr 18 18:57:14 2019 -0400

YARN-8622.  Fixed container-executor compilation on MacOSX.
Contributed by Siyao Meng
---
 .../src/main/native/container-executor/impl/utils/docker-util.c   | 8 
 1 file changed, 8 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 90d96bb..4abee02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -1462,14 +1462,22 @@ static int check_privileges(const char *user) {
 exit(INITIALIZE_USER_FAILED);
   }
 
+#ifdef __linux__
   int rc = getgrouplist(user, pw->pw_gid, groups, );
+#else
+  int rc = getgrouplist(user, pw->pw_gid, (int *)groups, );
+#endif
   if (rc < 0) {
 groups = (gid_t *) alloc_and_clear_memory(ngroups, sizeof(gid_t));
 if (groups == NULL) {
   fprintf(ERRORFILE, "Failed to allocate buffer for group lookup for user 
%s.\n", user);
   exit(OUT_OF_MEMORY);
 }
+#ifdef __linux__
 if (getgrouplist(user, pw->pw_gid, groups, ) == -1) {
+#else
+if (getgrouplist(user, pw->pw_gid, (int *)groups, ) == -1) {
+#endif
   fprintf(ERRORFILE, "Fail to lookup groups for user %s.\n", user);
   ret = 2;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-6695. Fixed NPE in publishing appFinished events to ATSv2. Contributed by Prabhu Joseph

2019-04-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 68a98be  YARN-6695. Fixed NPE in publishing appFinished events to 
ATSv2.Contributed by Prabhu Joseph
68a98be is described below

commit 68a98be8a24b2ab875cb1a1f2157dde24cfc9a24
Author: Eric Yang 
AuthorDate: Thu Apr 18 12:29:37 2019 -0400

YARN-6695. Fixed NPE in publishing appFinished events to ATSv2.
   Contributed by Prabhu Joseph

(cherry picked from commit df76cdc8959c51b71704ab5c38335f745a6f35d8)
---
 .../metrics/TimelineServiceV2Publisher.java| 13 --
 .../metrics/TestSystemMetricsPublisherForV2.java   | 47 ++
 .../src/site/markdown/TimelineServiceV2.md |  8 
 3 files changed, 56 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 89905e5..11db6b1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -454,10 +454,15 @@ public class TimelineServiceV2Publisher extends 
AbstractSystemMetricsPublisher {
   }
   TimelineCollector timelineCollector =
   rmTimelineCollectorManager.get(appId);
-  TimelineEntities entities = new TimelineEntities();
-  entities.addEntity(entity);
-  timelineCollector.putEntities(entities,
-  UserGroupInformation.getCurrentUser());
+  if (timelineCollector != null) {
+TimelineEntities entities = new TimelineEntities();
+entities.addEntity(entity);
+timelineCollector.putEntities(entities,
+UserGroupInformation.getCurrentUser());
+  } else {
+LOG.debug("Cannot find active collector while publishing entity "
++ entity);
+  }
 } catch (IOException e) {
   LOG.error("Error when publishing entity " + entity);
   if (LOG.isDebugEnabled()) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
index 3b503e5..ca26f58 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -28,12 +28,17 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -291,6 +296,48 @@ public class TestSystemMetricsPublisherForV2 {
 ContainerMetricsConstants.CREATED_IN_RM_EVENT_TYPE, 0, 0);
   }
 
+  @Test(timeout = 1)
+  public void testPutEntityWhenNoCollector() throws Exception {
+// Validating the logs as DrainDispatcher won't throw exception
+class TestAppender extends AppenderSkeleton {
+  private final List log = new ArrayList<>();
+
+  @Override
+  public boolean requiresLayout() {
+return false;
+  }
+
+  @Override
+  protected void append(final LoggingEvent loggingEvent) {
+log.add(loggingEvent);
+  }
+
+  @Override
+  public void close() {
+  }
+
+  public List getLog() {
+return new ArrayList<>(log);
+  }
+}
+
+TestAppender appender = new TestAppender();
+final Logger logger = Logger.getRootLogger();

[hadoop] branch branch-3.2 updated: YARN-6695. Fixed NPE in publishing appFinished events to ATSv2. Contributed by Prabhu Joseph

2019-04-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 2503409  YARN-6695. Fixed NPE in publishing appFinished events to 
ATSv2.Contributed by Prabhu Joseph
2503409 is described below

commit 2503409977e31276c43252e5ff3c759ca1931dbd
Author: Eric Yang 
AuthorDate: Thu Apr 18 12:29:37 2019 -0400

YARN-6695. Fixed NPE in publishing appFinished events to ATSv2.
   Contributed by Prabhu Joseph

(cherry picked from commit df76cdc8959c51b71704ab5c38335f745a6f35d8)
---
 .../metrics/TimelineServiceV2Publisher.java| 13 --
 .../metrics/TestSystemMetricsPublisherForV2.java   | 47 ++
 .../src/site/markdown/TimelineServiceV2.md |  8 
 3 files changed, 56 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 89905e5..11db6b1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -454,10 +454,15 @@ public class TimelineServiceV2Publisher extends 
AbstractSystemMetricsPublisher {
   }
   TimelineCollector timelineCollector =
   rmTimelineCollectorManager.get(appId);
-  TimelineEntities entities = new TimelineEntities();
-  entities.addEntity(entity);
-  timelineCollector.putEntities(entities,
-  UserGroupInformation.getCurrentUser());
+  if (timelineCollector != null) {
+TimelineEntities entities = new TimelineEntities();
+entities.addEntity(entity);
+timelineCollector.putEntities(entities,
+UserGroupInformation.getCurrentUser());
+  } else {
+LOG.debug("Cannot find active collector while publishing entity "
++ entity);
+  }
 } catch (IOException e) {
   LOG.error("Error when publishing entity " + entity);
   if (LOG.isDebugEnabled()) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
index 3b503e5..ca26f58 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -28,12 +28,17 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -291,6 +296,48 @@ public class TestSystemMetricsPublisherForV2 {
 ContainerMetricsConstants.CREATED_IN_RM_EVENT_TYPE, 0, 0);
   }
 
+  @Test(timeout = 1)
+  public void testPutEntityWhenNoCollector() throws Exception {
+// Validating the logs as DrainDispatcher won't throw exception
+class TestAppender extends AppenderSkeleton {
+  private final List log = new ArrayList<>();
+
+  @Override
+  public boolean requiresLayout() {
+return false;
+  }
+
+  @Override
+  protected void append(final LoggingEvent loggingEvent) {
+log.add(loggingEvent);
+  }
+
+  @Override
+  public void close() {
+  }
+
+  public List getLog() {
+return new ArrayList<>(log);
+  }
+}
+
+TestAppender appender = new TestAppender();
+final Logger logger = Logger.getRootLogger();

[hadoop] branch trunk updated: YARN-6695. Fixed NPE in publishing appFinished events to ATSv2. Contributed by Prabhu Joseph

2019-04-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new df76cdc  YARN-6695. Fixed NPE in publishing appFinished events to 
ATSv2.Contributed by Prabhu Joseph
df76cdc is described below

commit df76cdc8959c51b71704ab5c38335f745a6f35d8
Author: Eric Yang 
AuthorDate: Thu Apr 18 12:29:37 2019 -0400

YARN-6695. Fixed NPE in publishing appFinished events to ATSv2.
   Contributed by Prabhu Joseph
---
 .../metrics/TimelineServiceV2Publisher.java| 13 --
 .../metrics/TestSystemMetricsPublisherForV2.java   | 47 ++
 .../src/site/markdown/TimelineServiceV2.md |  8 
 3 files changed, 56 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 1438e25..d48 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -473,10 +473,15 @@ public class TimelineServiceV2Publisher extends 
AbstractSystemMetricsPublisher {
   }
   TimelineCollector timelineCollector =
   rmTimelineCollectorManager.get(appId);
-  TimelineEntities entities = new TimelineEntities();
-  entities.addEntity(entity);
-  timelineCollector.putEntities(entities,
-  UserGroupInformation.getCurrentUser());
+  if (timelineCollector != null) {
+TimelineEntities entities = new TimelineEntities();
+entities.addEntity(entity);
+timelineCollector.putEntities(entities,
+UserGroupInformation.getCurrentUser());
+  } else {
+LOG.debug("Cannot find active collector while publishing entity "
++ entity);
+  }
 } catch (IOException e) {
   LOG.error("Error when publishing entity " + entity);
   LOG.debug("Error when publishing entity {}", entity, e);
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
index 5e1a224..76e8f0e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -28,12 +28,17 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -293,6 +298,48 @@ public class TestSystemMetricsPublisherForV2 {
 TimelineServiceHelper.invertLong(containerId.getContainerId()));
   }
 
+  @Test(timeout = 1)
+  public void testPutEntityWhenNoCollector() throws Exception {
+// Validating the logs as DrainDispatcher won't throw exception
+class TestAppender extends AppenderSkeleton {
+  private final List log = new ArrayList<>();
+
+  @Override
+  public boolean requiresLayout() {
+return false;
+  }
+
+  @Override
+  protected void append(final LoggingEvent loggingEvent) {
+log.add(loggingEvent);
+  }
+
+  @Override
+  public void close() {
+  }
+
+  public List getLog() {
+return new ArrayList<>(log);
+  }
+}
+
+TestAppender appender = new TestAppender();
+final Logger logger = Logger.getRootLogger();
+logger.addAppender(appender);
+

[hadoop] branch trunk updated: YARN-9349. Improved log level practices for InvalidStateTransitionException. Contributed by Anuhan Torgonshar

2019-04-16 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9cf7401  YARN-9349.  Improved log level practices for 
InvalidStateTransitionException. Contributed by Anuhan Torgonshar
9cf7401 is described below

commit 9cf7401794def0d420876db5db20fcd76c52193f
Author: Eric Yang 
AuthorDate: Tue Apr 16 19:51:08 2019 -0400

YARN-9349.  Improved log level practices for 
InvalidStateTransitionException.
Contributed by Anuhan Torgonshar

(cherry picked from commit fe2370e039e1ee980d74769ae85d67434e0993cf)
---
 .../nodemanager/containermanager/application/ApplicationImpl.java   | 2 +-
 .../server/nodemanager/containermanager/container/ContainerImpl.java| 2 +-
 .../nodemanager/containermanager/localizer/LocalizedResource.java   | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index f3d4e51..1806af6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -646,7 +646,7 @@ public class ApplicationImpl implements Application {
 // queue event requesting init of the same app
 newState = stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.warn("Can't handle this event at current state", e);
+LOG.error("Can't handle this event at current state", e);
   }
   if (newState != null && oldState != newState) {
 LOG.info("Application " + applicationID + " transitioned from "
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index cfade27..b79c305 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -2125,7 +2125,7 @@ public class ContainerImpl implements Container {
 newState =
 stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.warn("Can't handle this event at current state: Current: ["
+LOG.error("Can't handle this event at current state: Current: ["
 + oldState + "], eventType: [" + event.getType() + "]," +
 " container: [" + containerID + "]", e);
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
index 279efd0..a75a13e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
@@ -196,7 +196,7 @@ public class LocalizedResource implements 
EventHandler {
   try {
 newState = this.stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.warn("Can't handle this event at current state", e);
+LOG.error("Can't handle this event at current state", e

[hadoop] branch trunk updated: YARN-9441. Updated YARN app catalog name for consistency. Contributed by Weiwei Yang

2019-04-04 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2bf82ae  YARN-9441.  Updated YARN app catalog name for consistency.
 Contributed by Weiwei Yang
2bf82ae is described below

commit 2bf82aee821a1737aa70feb24481454e626abb8c
Author: Eric Yang 
AuthorDate: Thu Apr 4 13:21:30 2019 -0400

YARN-9441.  Updated YARN app catalog name for consistency.
Contributed by Weiwei Yang
---
 .../hadoop-yarn-applications-catalog-docker/pom.xml | 2 +-
 .../hadoop-yarn-applications-catalog-webapp/pom.xml | 2 +-
 .../hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml   | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
index 7c0d3ef..c7129f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
@@ -26,7 +26,7 @@
 3.3.0-SNAPSHOT
   
 
-  YARN Application Catalog Docker Image
+  Apache Hadoop YARN Application Catalog Docker Image
   http://maven.apache.org
   
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
index 35beb5c..58646bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
@@ -26,7 +26,7 @@
 3.3.0-SNAPSHOT
   
 
-  YARN Application Catalog Webapp
+  Apache Hadoop YARN Application Catalog Webapp
 
   http://hadoop.apache.org
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml
index 770bf24..da395b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml
@@ -26,7 +26,7 @@
 hadoop-yarn-applications-catalog
 pom
 
-YARN Application Catalog
+Apache Hadoop YARN Application Catalog
 
 http://hadoop.apache.org
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9396. Fixed duplicated RM Container created event to ATS. Contributed by Prabhu Joseph

2019-04-04 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8d15006  YARN-9396.  Fixed duplicated RM Container created event to 
ATS. Contributed by Prabhu Joseph
8d15006 is described below

commit 8d150067e2e4d6c15b319d250e2e1b804066b6b6
Author: Eric Yang 
AuthorDate: Thu Apr 4 13:01:56 2019 -0400

YARN-9396.  Fixed duplicated RM Container created event to ATS.
Contributed by Prabhu Joseph
---
 .../yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java| 5 -
 .../server/resourcemanager/rmcontainer/TestRMContainerImpl.java | 6 +++---
 2 files changed, 7 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 1185170..004c170 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -608,7 +608,10 @@ public class RMContainerImpl implements RMContainer {
   container.eventHandler.handle(new RMAppRunningOnNodeEvent(container
   .getApplicationAttemptId().getApplicationId(), container.nodeId));
 
-  publishNonAMContainerEventstoATS(container);
+  // Opportunistic containers move directly from NEW to ACQUIRED
+  if (container.getState() == RMContainerState.NEW) {
+publishNonAMContainerEventstoATS(container);
+  }
 }
   }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index 2dfbf20..256bd94 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -150,7 +150,7 @@ public class TestRMContainerImpl {
 RMContainerEventType.LAUNCHED));
 drainDispatcher.await();
 assertEquals(RMContainerState.RUNNING, rmContainer.getState());
-verify(publisher, times(2)).containerCreated(any(RMContainer.class),
+verify(publisher, times(1)).containerCreated(any(RMContainer.class),
 anyLong());
 
assertEquals("http://host:3465/node/containerlogs/container_1_0001_01_01/user;,
 rmContainer.getLogURL());
@@ -253,7 +253,7 @@ public class TestRMContainerImpl {
 RMContainerEventType.ACQUIRED));
 drainDispatcher.await();
 assertEquals(RMContainerState.ACQUIRED, rmContainer.getState());
-verify(publisher, times(2)).containerCreated(any(RMContainer.class),
+verify(publisher, times(1)).containerCreated(any(RMContainer.class),
 anyLong());
 
 rmContainer.handle(new RMContainerEvent(containerId,
@@ -345,7 +345,7 @@ public class TestRMContainerImpl {
 // RMContainer should be publishing system metrics for all containers.
 // Since there is 1 AM container and 1 non-AM container, there should be 2
 // container created events and 2 container finished events.
-verify(publisher, times(4)).containerCreated(any(RMContainer.class),
+verify(publisher, times(2)).containerCreated(any(RMContainer.class),
 anyLong());
 verify(publisher, times(2)).containerFinished(any(RMContainer.class), 
anyLong());
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9405. Fixed flaky tests in TestYarnNativeServices. Contributed by Prabhu Joseph

2019-03-25 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 710cbc9  YARN-9405.  Fixed flaky tests in TestYarnNativeServices.  
   Contributed by Prabhu Joseph
710cbc9 is described below

commit 710cbc9bd649123cb0f742e4a91a6a216cb1ac76
Author: Eric Yang 
AuthorDate: Mon Mar 25 16:34:04 2019 -0400

YARN-9405.  Fixed flaky tests in TestYarnNativeServices.
Contributed by Prabhu Joseph
---
 .../hadoop/yarn/service/ServiceTestUtils.java  |  6 ++
 .../yarn/service/TestYarnNativeServices.java   | 22 --
 2 files changed, 26 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
index b3ba58d..a37ec75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
@@ -535,6 +535,12 @@ public class ServiceTestUtils {
 waitForServiceToBeInState(client, exampleApp, ServiceState.STARTED);
   }
 
+  protected void waitForServiceToBeExpressUpgrading(ServiceClient client,
+  Service exampleApp) throws TimeoutException, InterruptedException {
+waitForServiceToBeInState(client, exampleApp,
+ServiceState.EXPRESS_UPGRADING);
+  }
+
   protected void waitForServiceToBeInState(ServiceClient client,
   Service exampleApp, ServiceState desiredState) throws TimeoutException,
   InterruptedException {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index a22ada4..6c38511 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -439,6 +439,8 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 component2.getConfiguration().getEnv().put("key2", "val2");
 client.actionUpgradeExpress(service);
 
+waitForServiceToBeExpressUpgrading(client, service);
+
 // wait for upgrade to complete
 waitForServiceToBeStable(client, service);
 Service active = client.getStatus(service.getName());
@@ -859,16 +861,32 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
   private void checkCompInstancesInOrder(ServiceClient client,
   Service exampleApp) throws IOException, YarnException,
   TimeoutException, InterruptedException {
+waitForContainers(client, exampleApp);
 Service service = client.getStatus(exampleApp.getName());
 for (Component comp : service.getComponents()) {
   checkEachCompInstancesInOrder(comp, exampleApp.getName());
 }
   }
 
+  private void waitForContainers(ServiceClient client, Service exampleApp)
+  throws TimeoutException, InterruptedException {
+GenericTestUtils.waitFor(() -> {
+  try {
+Service service = client.getStatus(exampleApp.getName());
+for (Component comp : service.getComponents()) {
+  if (comp.getContainers().size() != comp.getNumberOfContainers()) {
+return false;
+  }
+}
+return true;
+  } catch (Exception e) {
+return false;
+  }
+}, 2000, 20);
+  }
+
   private void checkEachCompInstancesInOrder(Component component, String
   serviceName) throws TimeoutException, InterruptedException {
-long expectedNumInstances = component.getNumberOfContainers();
-Assert.assertEquals(expectedNumInstances, 
component.getContainers().size());
 TreeSet instances = new TreeSet<>();
 for (Container container : component.getContainers()) {
   instances.add(container.getComponentInstanceName());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9391. Fixed node manager environment leaks into Docker containers. Contributed by Jim Brennan

2019-03-25 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new dbc02bc  YARN-9391.  Fixed node manager environment leaks into Docker 
containers. Contributed by Jim Brennan
dbc02bc is described below

commit dbc02bcda7ab8db8a27a4f94391e5337af59a2be
Author: Eric Yang 
AuthorDate: Mon Mar 25 15:53:24 2019 -0400

YARN-9391.  Fixed node manager environment leaks into Docker containers.
Contributed by Jim Brennan

(cherry picked from commit 3c45762a0bfb403e069a03e30d35dd11432ee8b0)
---
 .../hadoop/yarn/server/nodemanager/ContainerExecutor.java  | 10 --
 .../containermanager/launcher/TestContainerLaunch.java |  5 +
 2 files changed, 5 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 9714731..dbee048 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -403,16 +403,6 @@ public abstract class ContainerExecutor implements 
Configurable {
   sb.env(env.getKey(), env.getValue());
 }
   }
-  // Add the whitelist vars to the environment.  Do this after writing
-  // environment variables so they are not written twice.
-  for(String var : whitelistVars) {
-if (!environment.containsKey(var)) {
-  String val = getNMEnvVar(var);
-  if (val != null) {
-environment.put(var, val);
-  }
-}
-  }
 }
 
 if (resources != null) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index ab5d47e..9cfa6a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -468,10 +468,15 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 for (String envVar : env.keySet()) {
   Assert.assertTrue(shellContent.contains(envVar + "="));
 }
+// The whitelist vars should not have been added to env
+// They should only be in the launch script
 for (String wlVar : whitelistVars) {
+  Assert.assertFalse(env.containsKey(wlVar));
   Assert.assertTrue(shellContent.contains(wlVar + "="));
 }
+// Non-whitelist nm vars should be in neither env nor in launch script
 for (String nwlVar : nonWhiteListEnv) {
+  Assert.assertFalse(env.containsKey(nwlVar));
   Assert.assertFalse(shellContent.contains(nwlVar + "="));
 }
 // Explicitly Set NM vars should be before user vars


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9391. Fixed node manager environment leaks into Docker containers. Contributed by Jim Brennan

2019-03-25 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 10642a6  YARN-9391.  Fixed node manager environment leaks into Docker 
containers. Contributed by Jim Brennan
10642a6 is described below

commit 10642a6205f8f2189eef56836a3f9208da4be8cb
Author: Eric Yang 
AuthorDate: Mon Mar 25 15:53:24 2019 -0400

YARN-9391.  Fixed node manager environment leaks into Docker containers.
Contributed by Jim Brennan

(cherry picked from commit 3c45762a0bfb403e069a03e30d35dd11432ee8b0)
---
 .../hadoop/yarn/server/nodemanager/ContainerExecutor.java  | 10 --
 .../containermanager/launcher/TestContainerLaunch.java |  5 +
 2 files changed, 5 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 98cc2a4..3fa7321 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -406,16 +406,6 @@ public abstract class ContainerExecutor implements 
Configurable {
   sb.env(env.getKey(), env.getValue());
 }
   }
-  // Add the whitelist vars to the environment.  Do this after writing
-  // environment variables so they are not written twice.
-  for(String var : whitelistVars) {
-if (!environment.containsKey(var)) {
-  String val = getNMEnvVar(var);
-  if (val != null) {
-environment.put(var, val);
-  }
-}
-  }
 }
 
 if (resources != null) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 1f7df56..b240f88 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -468,10 +468,15 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 for (String envVar : env.keySet()) {
   Assert.assertTrue(shellContent.contains(envVar + "="));
 }
+// The whitelist vars should not have been added to env
+// They should only be in the launch script
 for (String wlVar : whitelistVars) {
+  Assert.assertFalse(env.containsKey(wlVar));
   Assert.assertTrue(shellContent.contains(wlVar + "="));
 }
+// Non-whitelist nm vars should be in neither env nor in launch script
 for (String nwlVar : nonWhiteListEnv) {
+  Assert.assertFalse(env.containsKey(nwlVar));
   Assert.assertFalse(shellContent.contains(nwlVar + "="));
 }
 // Explicitly Set NM vars should be before user vars


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9391. Fixed node manager environment leaks into Docker containers. Contributed by Jim Brennan

2019-03-25 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3c45762  YARN-9391.  Fixed node manager environment leaks into Docker 
containers. Contributed by Jim Brennan
3c45762 is described below

commit 3c45762a0bfb403e069a03e30d35dd11432ee8b0
Author: Eric Yang 
AuthorDate: Mon Mar 25 15:53:24 2019 -0400

YARN-9391.  Fixed node manager environment leaks into Docker containers.
Contributed by Jim Brennan
---
 .../hadoop/yarn/server/nodemanager/ContainerExecutor.java  | 10 --
 .../containermanager/launcher/TestContainerLaunch.java |  5 +
 2 files changed, 5 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 61e4364..55836c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -431,16 +431,6 @@ public abstract class ContainerExecutor implements 
Configurable {
   sb.env(env.getKey(), env.getValue());
 }
   }
-  // Add the whitelist vars to the environment.  Do this after writing
-  // environment variables so they are not written twice.
-  for(String var : whitelistVars) {
-if (!environment.containsKey(var)) {
-  String val = getNMEnvVar(var);
-  if (val != null) {
-environment.put(var, val);
-  }
-}
-  }
 }
 
 if (resources != null) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index c1f4268..e048577 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -476,10 +476,15 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 for (String envVar : env.keySet()) {
   Assert.assertTrue(shellContent.contains(envVar + "="));
 }
+// The whitelist vars should not have been added to env
+// They should only be in the launch script
 for (String wlVar : whitelistVars) {
+  Assert.assertFalse(env.containsKey(wlVar));
   Assert.assertTrue(shellContent.contains(wlVar + "="));
 }
+// Non-whitelist nm vars should be in neither env nor in launch script
 for (String nwlVar : nonWhiteListEnv) {
+  Assert.assertFalse(env.containsKey(nwlVar));
   Assert.assertFalse(shellContent.contains(nwlVar + "="));
 }
 // Explicitly Set NM vars should be before user vars


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9370. Added logging for recovering assigned GPU devices. Contributed by Yesha Vora

2019-03-20 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 506502b  YARN-9370.  Added logging for recovering assigned GPU 
devices. Contributed by Yesha Vora
506502b is described below

commit 506502bb8301f3b4a68e5f692a2af0624980468e
Author: Eric Yang 
AuthorDate: Wed Mar 20 19:12:19 2019 -0400

YARN-9370.  Added logging for recovering assigned GPU devices.
Contributed by Yesha Vora
---
 .../containermanager/linux/resources/gpu/GpuResourceAllocator.java| 4 
 1 file changed, 4 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
index 67936ba..0b95ca7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
@@ -119,6 +119,7 @@ public class GpuResourceAllocator {
   + containerId);
 }
 
+LOG.info("Starting recovery of GpuDevice for {}.", containerId);
 for (Serializable gpuDeviceSerializable : c.getResourceMappings()
 .getAssignedResources(GPU_URI)) {
   if (!(gpuDeviceSerializable instanceof GpuDevice)) {
@@ -146,7 +147,10 @@ public class GpuResourceAllocator {
   }
 
   usedDevices.put(gpuDevice, containerId);
+  LOG.info("ContainerId {} is assigned to GpuDevice {} on recovery.",
+  containerId, gpuDevice);
 }
+LOG.info("Finished recovery of GpuDevice for {}.", containerId);
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9398. Fixed javadoc errors for FPGA related java files. Contributed by Peter Bacsko

2019-03-20 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f2b862c  YARN-9398.  Fixed javadoc errors for FPGA related java files. 
Contributed by Peter Bacsko
f2b862c is described below

commit f2b862cac666217ccb77f49776c54191035b13c4
Author: Eric Yang 
AuthorDate: Wed Mar 20 15:45:37 2019 -0400

YARN-9398.  Fixed javadoc errors for FPGA related java files.
Contributed by Peter Bacsko
---
 .../resourceplugin/fpga/AbstractFpgaVendorPlugin.java  | 7 +++
 .../containermanager/resourceplugin/fpga/FpgaDiscoverer.java   | 7 +--
 .../resourceplugin/fpga/IntelFpgaOpenclPlugin.java | 2 +-
 3 files changed, 13 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/AbstractFpgaVendorPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/AbstractFpgaVendorPlugin.java
index d238c67..aa50d23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/AbstractFpgaVendorPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/AbstractFpgaVendorPlugin.java
@@ -42,11 +42,16 @@ public interface AbstractFpgaVendorPlugin {
 
   /**
* Check vendor's toolchain and required environment
+   * @param conf Hadoop configuration
+   * @return true if the initialization was successful
* */
   boolean initPlugin(Configuration conf);
 
   /**
* Diagnose the devices using vendor toolchain but no need to parse device 
information
+   *
+   * @param timeout timeout in milliseconds
+   * @return true if the diagnostics was successful
* */
   boolean diagnose(int timeout);
 
@@ -60,6 +65,8 @@ public interface AbstractFpgaVendorPlugin {
   /**
* Since all vendor plugins share a {@link 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.fpga.FpgaResourceAllocator}
* which distinguish FPGA devices by type. Vendor plugin must report this.
+   *
+   * @return the type of FPGA plugin represented as a string
* */
   String getFpgaType();
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java
index 32b88b2..ecc2934 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java
@@ -114,8 +114,11 @@ public class FpgaDiscoverer {
   }
 
   /**
-   * get avialable devices minor numbers from toolchain or static configuration
-   * */
+   * Get available devices minor numbers from toolchain or static 
configuration.
+   *
+   * @return the list of FPGA devices
+   * @throws ResourceHandlerException if there's any error during discovery
+   **/
   public List discover()
   throws ResourceHandlerException {
 List list;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
index f6a8cff..19826d0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
@@ -153,7 +153,7 @@ public class IntelFpgaOpenclPlugin implements 
AbstractFpgaVendorPlugin {
   }
 
   /**
-   *  Helper class

[hadoop] branch trunk updated: YARN-9364. Remove commons-logging dependency from YARN. Contributed by Prabhu Joseph

2019-03-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 09eabda  YARN-9364.  Remove commons-logging dependency from YARN.  
   Contributed by Prabhu Joseph
09eabda is described below

commit 09eabda314fb0e5532e5391bc37fe84b883d3499
Author: Eric Yang 
AuthorDate: Mon Mar 18 19:58:42 2019 -0400

YARN-9364.  Remove commons-logging dependency from YARN.
Contributed by Prabhu Joseph
---
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml | 4 
 .../hadoop-yarn-applications-distributedshell/pom.xml   | 4 
 .../hadoop-yarn-applications-unmanaged-am-launcher/pom.xml  | 4 
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml  | 4 
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml  | 4 
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml | 6 +-
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml| 4 
 .../hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml   | 4 
 .../hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml   | 4 
 .../hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml | 4 
 .../hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml   | 5 -
 .../hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml | 4 
 12 files changed, 1 insertion(+), 50 deletions(-)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 486d20e..fdd25e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -39,10 +39,6 @@
   guava
 
 
-  commons-logging
-  commons-logging
-
-
   javax.xml.bind
   jaxb-api
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index 01edf66..d64ad0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -54,10 +54,6 @@
   guava
 
 
-  commons-logging
-  commons-logging
-
-
   commons-cli
   commons-cli
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
index 478262c..4a12a91 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
@@ -38,10 +38,6 @@
   test
 
 
-  commons-logging
-  commons-logging
-
-
   commons-cli
   commons-cli
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index b680b2f..5de2cb4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -43,10 +43,6 @@
   guava
 
 
-  commons-logging
-  commons-logging
-
-
   commons-cli
   commons-cli
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 7b5a4b7..14efd2a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -90,10 +90,6 @@
   guava
 
 
-  commons-logging
-  commons-logging
-
-
   commons-cli
   commons-cli
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
index 44c2607..9b2be66 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
@@ -70,10 +70,6 @@
 ${grpc.version}
 
 
-commons-logging
-commons-logging
-
-
 junit
 junit
 
@@ -200,4 +196,4 @@
 
 
 
-
\ No newline at end of file
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index e3f7a6b..62f3747 100644
--- 
a/hadoop-yarn-project

[hadoop] branch trunk updated: YARN-9363. Replaced debug logging with SLF4J parameterized log message. Contributed by Prabhu Joseph

2019-03-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5f6e225  YARN-9363.  Replaced debug logging with SLF4J parameterized 
log message. Contributed by Prabhu Joseph
5f6e225 is described below

commit 5f6e22516668ff94a76737ad5e2cdcb2ff9f6dfd
Author: Eric Yang 
AuthorDate: Mon Mar 18 13:57:18 2019 -0400

YARN-9363.  Replaced debug logging with SLF4J parameterized log message.
Contributed by Prabhu Joseph
---
 .../apache/hadoop/fs/DelegationTokenRenewer.java   |  4 +-
 .../yarn/service/provider/ProviderUtils.java   |  7 +---
 .../client/api/impl/FileSystemTimelineWriter.java  |  4 +-
 .../hadoop/yarn/csi/client/CsiGrpcClient.java  |  7 ++--
 .../hadoop/yarn/csi/client/FakeCsiDriver.java  |  5 ++-
 .../server/util/timeline/TimelineServerUtils.java  |  7 ++--
 .../WindowsSecureContainerExecutor.java| 32 +--
 .../launcher/RecoverPausedContainerLaunch.java |  6 +--
 .../resources/CGroupElasticMemoryController.java   |  8 ++--
 .../linux/resources/CGroupsHandlerImpl.java|  7 +---
 .../linux/resources/CGroupsResourceCalculator.java | 18 -
 .../resources/CombinedResourceCalculator.java  |  8 ++--
 .../linux/resources/DefaultOOMHandler.java |  8 ++--
 .../resources/NetworkTagMappingManagerFactory.java |  6 +--
 .../resources/fpga/FpgaResourceAllocator.java  |  7 ++--
 .../resources/fpga/FpgaResourceHandlerImpl.java|  7 ++--
 .../linux/resources/gpu/GpuResourceAllocator.java  |  7 ++--
 .../resources/gpu/GpuResourceHandlerImpl.java  |  8 ++--
 .../linux/resources/numa/NumaNodeResource.java |  7 ++--
 .../resources/numa/NumaResourceAllocator.java  |  7 ++--
 .../resources/numa/NumaResourceHandlerImpl.java|  8 ++--
 .../deviceframework/DeviceMappingManager.java  | 24 +--
 .../deviceframework/DevicePluginAdapter.java   |  7 ++--
 .../DeviceResourceDockerRuntimePluginImpl.java | 47 --
 .../deviceframework/DeviceResourceHandlerImpl.java | 20 -
 .../deviceframework/DeviceResourceUpdaterImpl.java |  7 ++--
 .../resourceplugin/fpga/FpgaResourcePlugin.java|  7 ++--
 .../gpu/NvidiaDockerV1CommandPlugin.java   | 21 --
 .../gpu/NvidiaDockerV2CommandPlugin.java   |  7 ++--
 .../server/nodemanager/TestNodeManagerMXBean.java  |  8 ++--
 .../TestCGroupElasticMemoryController.java |  8 ++--
 .../capacity/AbstractAutoCreatedLeafQueue.java |  4 +-
 .../scheduler/capacity/CapacityScheduler.java  |  6 +--
 .../scheduler/capacity/ParentQueue.java| 12 ++
 .../capacity/QueueManagementDynamicEditPolicy.java | 13 ++
 .../GuaranteedOrZeroCapacityOverTimePolicy.java| 12 ++
 .../constraint/PlacementConstraintsUtil.java   | 28 +
 .../resourcemanager/scheduler/fair/FSQueue.java|  6 +--
 .../scheduler/fifo/FifoScheduler.java  | 11 ++---
 .../placement/LocalityAppPlacementAllocator.java   |  6 +--
 .../yarn/server/resourcemanager/Application.java   | 29 +
 .../timelineservice/storage/flow/FlowScanner.java  |  4 +-
 42 files changed, 188 insertions(+), 277 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
index 09c3a8a..2feb937 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
@@ -242,9 +242,7 @@ public class DelegationTokenRenewer
   } catch (InterruptedException ie) {
 LOG.error("Interrupted while canceling token for " + fs.getUri()
 + "filesystem");
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Exception in removeRenewAction: ", ie);
-}
+LOG.debug("Exception in removeRenewAction: {}", ie);
   }
 }
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
index ea1fb0c..5fc96a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/servic

[hadoop] branch trunk updated: YARN-9385. Fixed ApiServiceClient to use current UGI. Contributed by Eric Yang

2019-03-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 19b22c4  YARN-9385.  Fixed ApiServiceClient to use current UGI.
 Contributed by Eric Yang
19b22c4 is described below

commit 19b22c4385a8cf0f89a2ad939380cfd3f033ffdc
Author: Eric Yang 
AuthorDate: Mon Mar 18 13:16:34 2019 -0400

YARN-9385.  Fixed ApiServiceClient to use current UGI.
Contributed by Eric Yang
---
 .../apache/hadoop/yarn/service/client/ApiServiceClient.java | 13 -
 .../hadoop/yarn/service/client/TestApiServiceClient.java|  9 +
 2 files changed, 17 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
index 008f497..94f03c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
@@ -151,7 +151,7 @@ public class ApiServiceClient extends AppAdminClient {
* @return URI to API Service
* @throws IOException
*/
-  private String getServicePath(String appName) throws IOException {
+  protected String getServicePath(String appName) throws IOException {
 String url = getRMWebAddress();
 StringBuilder api = new StringBuilder();
 api.append(url)
@@ -203,12 +203,15 @@ public class ApiServiceClient extends AppAdminClient {
 return api.toString();
   }
 
-  private void appendUserNameIfRequired(StringBuilder builder) {
+  private void appendUserNameIfRequired(StringBuilder builder)
+  throws IOException {
 Configuration conf = getConfig();
-if (conf.get("hadoop.http.authentication.type").equalsIgnoreCase(
-"simple")) {
+if (conf.get("hadoop.http.authentication.type")
+.equalsIgnoreCase("simple")) {
+  String username = UserGroupInformation.getCurrentUser()
+.getShortUserName();
   builder.append("?user.name=").append(UrlEncoded
-  .encodeString(System.getProperty("user.name")));
+  .encodeString(username));
 }
   }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
index 6cf0880..0ffeb45 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
@@ -27,6 +27,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import com.google.common.collect.Lists;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.eclipse.jetty.server.Server;
@@ -310,5 +311,13 @@ public class TestApiServiceClient {
 }
   }
 
+  @Test
+  public void testNoneSecureApiClient() throws IOException {
+String url = asc.getServicePath("/foobar");
+assertTrue("User.name flag is missing in service path.",
+url.contains("user.name"));
+assertTrue("User.name flag is not matching JVM user.",
+url.contains(System.getProperty("user.name")));
+  }
 
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16167. Fixed Hadoop shell script for Ubuntu 18. Contributed by Daniel Templeton

2019-03-18 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5446e3c  HADOOP-16167.  Fixed Hadoop shell script for Ubuntu 18.   
 Contributed by Daniel Templeton
5446e3c is described below

commit 5446e3cb8a4d9b6aa517fc8437ba194a9ae9b193
Author: Eric Yang 
AuthorDate: Mon Mar 18 13:04:49 2019 -0400

HADOOP-16167.  Fixed Hadoop shell script for Ubuntu 18.
   Contributed by Daniel Templeton
---
 .../hadoop-common/src/main/bin/hadoop-functions.sh   | 12 
 1 file changed, 12 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 71ba7ff..e291a9b 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -2364,6 +2364,10 @@ function hadoop_verify_user_perm
   declare command=$2
   declare uvar
 
+  if [[ ${command} =~ \. ]]; then
+return 1
+  fi
+
   uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
 
   if [[ -n ${!uvar} ]]; then
@@ -2395,6 +2399,10 @@ function hadoop_need_reexec
 return 1
   fi
 
+  if [[ ${command} =~ \. ]]; then
+return 1
+  fi
+
   # if we have privilege, and the _USER is defined, and _USER is
   # set to someone who isn't us, then yes, we should re-exec.
   # otherwise no, don't re-exec and let the system deal with it.
@@ -2431,6 +2439,10 @@ function hadoop_subcommand_opts
 return 1
   fi
 
+  if [[ ${command} =~ \. ]]; then
+return 1
+  fi
+
   # bash 4 and up have built-in ways to upper and lower
   # case the contents of vars.  This is faster than
   # calling tr.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-4404. Corrected typo in javadoc. Contributed by Yesha Vora

2019-03-15 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 03f3c8a  YARN-4404.  Corrected typo in javadoc. 
Contributed by Yesha Vora
03f3c8a is described below

commit 03f3c8aed27f73a6aacecc14b41beb1250d4f2f0
Author: Eric Yang 
AuthorDate: Fri Mar 15 18:04:04 2019 -0400

YARN-4404.  Corrected typo in javadoc.
Contributed by Yesha Vora
---
 .../apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java| 2 +-
 .../yarn/server/resourcemanager/scheduler/SchedulerUtils.java   | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 2c1cbbe..7fb7476 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -240,7 +240,7 @@ public class RMServerUtils {
   }
 
   /**
-   * Utility method to validate a list resource requests, by insuring that the
+   * Utility method to validate a list resource requests, by ensuring that the
* requested memory/vcore is non-negative and not greater than max
*/
   public static void normalizeAndValidateRequests(List ask,
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index 88796db..73aa2c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -195,7 +195,7 @@ public class SchedulerUtils {
   }
 
   /**
-   * Utility method to normalize a resource request, by insuring that the
+   * Utility method to normalize a resource request, by ensuring that the
* requested memory is a multiple of minMemory and is not zero.
*/
   @VisibleForTesting
@@ -210,7 +210,7 @@ public class SchedulerUtils {
   }
 
   /**
-   * Utility method to normalize a resource request, by insuring that the
+   * Utility method to normalize a resource request, by ensuring that the
* requested memory is a multiple of increment resource and is not zero.
*
* @return normalized resource
@@ -304,7 +304,7 @@ public class SchedulerUtils {
   }
 
   /**
-   * Utility method to validate a resource request, by insuring that the
+   * Utility method to validate a resource request, by ensuring that the
* requested memory/vcore is non-negative and not greater than max
*
* @throws InvalidResourceRequestException when there is invalid request


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.7 updated: YARN-7266. Fixed deadlock in Timeline Server thread initialization. Contributed by Prabhu Joseph

2019-03-06 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-2.7
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.7 by this push:
 new cec0041  YARN-7266. Fixed deadlock in Timeline Server thread 
initialization.Contributed by Prabhu Joseph
cec0041 is described below

commit cec004182c4fc546ef340b4bb0eddb8141730748
Author: Eric Yang 
AuthorDate: Wed Mar 6 20:03:44 2019 -0500

YARN-7266. Fixed deadlock in Timeline Server thread initialization.
   Contributed by Prabhu Joseph
---
 .../hadoop-yarn/hadoop-yarn-api/pom.xml|  8 +++
 .../yarn/api/records/timeline/jaxb.properties  | 13 +
 .../webapp/ContextFactory.java | 62 ++
 .../webapp/TestAHSWebServices.java | 12 +
 4 files changed, 95 insertions(+)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 74107ab..d36f0ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -65,6 +65,14 @@
   
 
   
+
+  
+src/main/resources
+
+  **/jaxb.properties
+
+  
+
 
   
 org.apache.hadoop
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
new file mode 100644
index 000..8e545b3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
@@ -0,0 +1,13 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+javax.xml.bind.context.factory=org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.ContextFactory
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
new file mode 100644
index 000..67668a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import java.util.Map;
+import java.lang.reflect.Method;
+import javax.xml.bind.JAXBContext;
+
+/**
+ * ContextFactory to reuse JAXBContextImpl for DAO Classes.
+ */
+public final class ContextFactory {
+
+  private static JAXBContext jaxbContext;
+
+  private ContextFactory() {
+  }
+
+  // Called from WebComponent.service
+  public static JAXBContext createContext(Class[] classes,
+  Map properties) throws Exception {
+synchronized (ContextFactory.class) {
+  if (jaxbContext == null) {
+Class spFactory = Class.forName(
+"com.sun.xml.internal.bind.v2.ContextFactory");
+Method m = spFactory.getMethod("createContext", Class[].class,
+Map.class);
+jaxbContext = (JAXBContext) m.invoke((Object) null, classes,
+

[hadoop] branch branch-2.8 updated: YARN-7266. Fixed deadlock in Timeline Server thread initialization. Contributed by Prabhu Joseph

2019-03-06 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new 0422590  YARN-7266. Fixed deadlock in Timeline Server thread 
initialization.Contributed by Prabhu Joseph
0422590 is described below

commit 0422590291e94f48f7734408ef5fd753fd69a6c9
Author: Eric Yang 
AuthorDate: Wed Mar 6 20:01:56 2019 -0500

YARN-7266. Fixed deadlock in Timeline Server thread initialization.
   Contributed by Prabhu Joseph
---
 .../hadoop-yarn/hadoop-yarn-api/pom.xml|  6 +++
 .../yarn/api/records/timeline/jaxb.properties  | 13 +
 .../webapp/ContextFactory.java | 62 ++
 .../webapp/TestAHSWebServices.java | 12 +
 4 files changed, 93 insertions(+)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 8aa712f..114905e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -88,6 +88,12 @@
 
 false
   
+  
+src/main/resources
+
+  **/jaxb.properties
+
+  
 
 
   
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
new file mode 100644
index 000..8e545b3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
@@ -0,0 +1,13 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+javax.xml.bind.context.factory=org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.ContextFactory
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
new file mode 100644
index 000..67668a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import java.util.Map;
+import java.lang.reflect.Method;
+import javax.xml.bind.JAXBContext;
+
+/**
+ * ContextFactory to reuse JAXBContextImpl for DAO Classes.
+ */
+public final class ContextFactory {
+
+  private static JAXBContext jaxbContext;
+
+  private ContextFactory() {
+  }
+
+  // Called from WebComponent.service
+  public static JAXBContext createContext(Class[] classes,
+  Map properties) throws Exception {
+synchronized (ContextFactory.class) {
+  if (jaxbContext == null) {
+Class spFactory = Class.forName(
+"com.sun.xml.internal.bind.v2.ContextFactory");
+Method m = spFactory.getMethod("createContext", Class[].class,
+Map.class);
+jaxbContext = (JAXBContext) m.invoke((Object) null, classes,
+properties);
+

[hadoop] branch trunk updated: YARN-9348. Application catalog build system bug fixes. Contributed by Eric Yang

2019-03-06 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 01ada40  YARN-9348.  Application catalog build system bug fixes.   
  Contributed by Eric Yang
01ada40 is described below

commit 01ada40ea47da0ba32fee22d44f185da2a967456
Author: Eric Yang 
AuthorDate: Wed Mar 6 17:55:48 2019 -0500

YARN-9348.  Application catalog build system bug fixes.
Contributed by Eric Yang
---
 .../hadoop-yarn-applications-catalog-docker/pom.xml  |  1 +
 .../hadoop-yarn-applications-catalog-webapp/.gitignore   |  2 ++
 .../hadoop-yarn-applications-catalog-webapp/package.json | 16 +---
 .../hadoop-yarn-applications-catalog-webapp/pom.xml  |  8 +++-
 4 files changed, 11 insertions(+), 16 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
index 6fd56fe..40e3278 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
@@ -56,6 +56,7 @@
 
   /var/run/docker.sock
 
+linux
   
   application-catalog-docker-image
   
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/.gitignore
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/.gitignore
index a82f945..2d8667e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/.gitignore
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/.gitignore
@@ -1,3 +1,5 @@
 .classpath
 .project
 /target/
+/node_modules/
+yarn.lock
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/package.json
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/package.json
index d9c899e..65a6ace 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/package.json
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/package.json
@@ -14,26 +14,12 @@
 "bootstrap": "~3.3.7"
 },
 "devDependencies": {
-"http-server": "^0.6.1",
 "requirejs": "^2.1.0",
-"karma": "4.0.0",
-"karma-requirejs": "^0.2.2",
-"karma-script-launcher": "^0.1.0",
-"karma-chrome-launcher": "^0.1.4",
-"karma-firefox-launcher": "^0.1.3",
-"karma-jasmine": "^0.1.5",
-"karma-junit-reporter": "^0.2.2",
 "shelljs": "^0.2.6",
 "apidoc": "0.17.7"
 },
 "scripts": {
 "prestart": "npm install & mvn clean package",
-"start": "http-server target/app -a localhost -p 8000",
-"pretest": "npm install",
-"test": "karma start src/test/javascript/karma.conf.js",
-"test-single-run": "karma start src/test/javascript/karma.conf.js  
--single-run",
-"preupdate-webdriver": "npm install",
-"update-webdriver": "webdriver-manager update",
-"update-index-async": "node -e \"require('shelljs/global'); sed('-i', 
/\\/\\/@@NG_LOADER_START@@[\\s\\S]*\\/\\/@@NG_LOADER_END@@/, 
'//@@NG_LOADER_START@@\\n' + 
cat('src/main/webapp/vendor/angular-loader/angular-loader.min.js') + 
'\\n//@@NG_LOADER_END@@', 'src/main/webapp/index.html');\""
+"pretest": "npm install"
 }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/ha

[hadoop] branch trunk updated: HADOOP-16150. Added concat method to ChecksumFS as unsupported operation. Contributed by Steve Loughran

2019-03-05 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3c5b713  HADOOP-16150. Added concat method to ChecksumFS as 
unsupported operation.   Contributed by Steve Loughran
3c5b713 is described below

commit 3c5b7136e25d39a3c29cc2025dc8ae7b347f4db0
Author: Eric Yang 
AuthorDate: Tue Mar 5 13:27:06 2019 -0500

HADOOP-16150. Added concat method to ChecksumFS as unsupported operation.
  Contributed by Steve Loughran

(cherry picked from commit 8b517e7ad670aa8ee0b73ce5a572f36ce63eabee)
---
 .../src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java| 6 ++
 .../fs/contract/localfs/TestLocalFSContractMultipartUploader.java | 8 
 2 files changed, 14 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 59d95cf..99aa5d2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -372,6 +372,12 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
 + "by ChecksumFileSystem");
   }
 
+  @Override
+  public void concat(final Path f, final Path[] psrcs) throws IOException {
+throw new UnsupportedOperationException("Concat is not supported "
++ "by ChecksumFileSystem");
+  }
+
   /**
* Calculated the length of the checksum file in bytes.
* @param size the length of the data file in bytes
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java
index 6e27964d..f675ddf 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.fs.contract.localfs;
 
+import org.junit.Assume;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
@@ -28,6 +30,12 @@ public class TestLocalFSContractMultipartUploader
 extends AbstractContractMultipartUploaderTest {
 
   @Override
+  public void setup() throws Exception {
+Assume.assumeTrue("Skipping until HDFS-13934", false);
+super.setup();
+  }
+
+  @Override
   protected AbstractFSContract createContract(Configuration conf) {
 return new LocalFSContract(conf);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.9 updated: YARN-7266. Fixed deadlock in Timeline Server thread initialization. Contributed by Prabhu Joseph

2019-03-05 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new fc76e98  YARN-7266.  Fixed deadlock in Timeline Server thread 
initialization. Contributed by Prabhu Joseph
fc76e98 is described below

commit fc76e98d8d4b4235fef1c1b687c880bec1b8dabb
Author: Eric Yang 
AuthorDate: Tue Mar 5 12:17:01 2019 -0500

YARN-7266.  Fixed deadlock in Timeline Server thread initialization.
Contributed by Prabhu Joseph

(cherry picked from commit 7b42e0e32ac7dfb60f25fa656a9bef69c2a62501)
---
 .../hadoop-yarn/hadoop-yarn-api/pom.xml|  6 +++
 .../yarn/api/records/timeline/jaxb.properties  | 13 +
 .../webapp/ContextFactory.java | 62 ++
 .../webapp/TestAHSWebServices.java | 11 
 4 files changed, 92 insertions(+)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index aa8ebe6..7d0e1f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -92,6 +92,12 @@
 
 false
   
+  
+src/main/resources
+
+  **/jaxb.properties
+
+  
 
 
   
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
new file mode 100644
index 000..8e545b3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
@@ -0,0 +1,13 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+javax.xml.bind.context.factory=org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.ContextFactory
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
new file mode 100644
index 000..67668a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import java.util.Map;
+import java.lang.reflect.Method;
+import javax.xml.bind.JAXBContext;
+
+/**
+ * ContextFactory to reuse JAXBContextImpl for DAO Classes.
+ */
+public final class ContextFactory {
+
+  private static JAXBContext jaxbContext;
+
+  private ContextFactory() {
+  }
+
+  // Called from WebComponent.service
+  public static JAXBContext createContext(Class[] classes,
+  Map properties) throws Exception {
+synchronized (ContextFactory.class) {
+  if (jaxbContext == null) {
+Class spFactory = Class.forName(
+"com.sun.xml.internal.bind.v2.ContextFactory");
+Method m = spFactory.getMethod("createContext", Class[].class,
+Map.class);
+   

[hadoop] reference refs/remotes/origin/branch-2 created (now d71cfe1)

2019-03-05 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a change to reference refs/remotes/origin/branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at d71cfe1  HDFS-14314. fullBlockReportLeaseId should be reset after 
registering to NN. Contributed by star.

No new revisions were added by this update.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-7266. Fixed deadlock in Timeline Server thread initialization. Contributed by Prabhu Joseph

2019-03-05 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7b42e0e  YARN-7266.  Fixed deadlock in Timeline Server thread 
initialization. Contributed by Prabhu Joseph
7b42e0e is described below

commit 7b42e0e32ac7dfb60f25fa656a9bef69c2a62501
Author: Eric Yang 
AuthorDate: Tue Mar 5 12:17:01 2019 -0500

YARN-7266.  Fixed deadlock in Timeline Server thread initialization.
Contributed by Prabhu Joseph
---
 .../hadoop-yarn/hadoop-yarn-api/pom.xml|  6 +++
 .../yarn/api/records/timeline/jaxb.properties  | 13 +
 .../webapp/ContextFactory.java | 62 ++
 .../webapp/TestAHSWebServices.java | 11 
 4 files changed, 92 insertions(+)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 832e98c..486d20e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -91,6 +91,12 @@
 
 false
   
+  
+src/main/resources
+
+  **/jaxb.properties
+
+  
 
 
   
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
new file mode 100644
index 000..8e545b3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/resources/org/apache/hadoop/yarn/api/records/timeline/jaxb.properties
@@ -0,0 +1,13 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+javax.xml.bind.context.factory=org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.ContextFactory
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
new file mode 100644
index 000..67668a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import java.util.Map;
+import java.lang.reflect.Method;
+import javax.xml.bind.JAXBContext;
+
+/**
+ * ContextFactory to reuse JAXBContextImpl for DAO Classes.
+ */
+public final class ContextFactory {
+
+  private static JAXBContext jaxbContext;
+
+  private ContextFactory() {
+  }
+
+  // Called from WebComponent.service
+  public static JAXBContext createContext(Class[] classes,
+  Map properties) throws Exception {
+synchronized (ContextFactory.class) {
+  if (jaxbContext == null) {
+Class spFactory = Class.forName(
+"com.sun.xml.internal.bind.v2.ContextFactory");
+Method m = spFactory.getMethod("createContext", Class[].class,
+Map.class);
+jaxbContext = (JAXBContext) m.invoke((Object) null, classes,
+properties);
+

[hadoop] branch trunk updated: YARN-9334. Allow YARN Service client to send SPNEGO challenge header when authentication type is not simple. Contributed by Billie Rinaldi

2019-02-27 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 04b228e  YARN-9334.  Allow YARN Service client to send SPNEGO 
challenge header when authentication type is not simple. 
Contributed by Billie Rinaldi
04b228e is described below

commit 04b228e43b728d574d7ad97330aa4218cb7f8bf8
Author: Eric Yang 
AuthorDate: Wed Feb 27 18:47:14 2019 -0500

YARN-9334.  Allow YARN Service client to send SPNEGO challenge header when 
authentication type is not simple.
Contributed by Billie Rinaldi
---
 .../java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java   | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
index a6d1502..008f497 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
@@ -226,11 +226,10 @@ public class ApiServiceClient extends AppAdminClient {
   private Builder getApiClient(String requestPath)
   throws IOException {
 Client client = Client.create(getClientConfig());
-Configuration conf = getConfig();
 client.setChunkedEncodingSize(null);
 Builder builder = client
 .resource(requestPath).type(MediaType.APPLICATION_JSON);
-if (conf.get("hadoop.http.authentication.type").equals("kerberos")) {
+if (UserGroupInformation.isSecurityEnabled()) {
   try {
 URI url = new URI(requestPath);
 String challenge = YarnClientUtils.generateToken(url.getHost());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16107. Update ChecksumFileSystem createFile/openFile API to generate checksum. Contributed by Steve Loughran

2019-02-27 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new feccd28  HADOOP-16107.  Update ChecksumFileSystem createFile/openFile 
API to generate checksum.Contributed by Steve Loughran
feccd28 is described below

commit feccd282febb5fe5d043480ba989b6f045409efa
Author: Eric Yang 
AuthorDate: Wed Feb 27 15:53:41 2019 -0500

HADOOP-16107.  Update ChecksumFileSystem createFile/openFile API to 
generate checksum.
   Contributed by Steve Loughran
---
 .../org/apache/hadoop/fs/ChecksumFileSystem.java   |  86 
 .../main/java/org/apache/hadoop/fs/FileSystem.java |  60 +-
 .../hadoop/fs/viewfs/ChRootedFileSystem.java   |  12 ++
 .../org/apache/hadoop/fs/TestLocalFileSystem.java  | 227 +
 .../org/apache/hadoop/fs/impl/TestFutureIO.java|  76 +++
 5 files changed, 456 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 3db3173..59d95cf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -24,15 +24,22 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
 import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.impl.AbstractFSBuilderImpl;
+import org.apache.hadoop.fs.impl.FutureDataInputStreamBuilderImpl;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.LambdaUtils;
 import org.apache.hadoop.util.Progressable;
 
 /
@@ -484,6 +491,32 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
 blockSize, progress);
   }
 
+  @Override
+  public FSDataOutputStream create(final Path f,
+  final FsPermission permission,
+  final EnumSet flags,
+  final int bufferSize,
+  final short replication,
+  final long blockSize,
+  final Progressable progress,
+  final Options.ChecksumOpt checksumOpt) throws IOException {
+return create(f, permission, flags.contains(CreateFlag.OVERWRITE),
+bufferSize, replication, blockSize, progress);
+  }
+
+  @Override
+  public FSDataOutputStream createNonRecursive(final Path f,
+  final FsPermission permission,
+  final EnumSet flags,
+  final int bufferSize,
+  final short replication,
+  final long blockSize,
+  final Progressable progress) throws IOException {
+return create(f, permission, flags.contains(CreateFlag.OVERWRITE),
+false, bufferSize, replication,
+blockSize, progress);
+  }
+
   abstract class FsOperation {
 boolean run(Path p) throws IOException {
   boolean status = apply(p);
@@ -780,4 +813,57 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
long inPos, FSDataInputStream sums, 
long sumsPos) {
 return false;
   }
+
+  /**
+   * This is overridden to ensure that this class's
+   * {@link #openFileWithOptions}() method is called, and so ultimately
+   * its {@link #open(Path, int)}.
+   *
+   * {@inheritDoc}
+   */
+  @Override
+  public FutureDataInputStreamBuilder openFile(final Path path)
+  throws IOException, UnsupportedOperationException {
+return ((FutureDataInputStreamBuilderImpl)
+createDataInputStreamBuilder(this, path)).getThisBuilder();
+  }
+
+  /**
+   * Open the file as a blocking call to {@link #open(Path, int)}.
+   *
+   * {@inheritDoc}
+   */
+  @Override
+  protected CompletableFuture openFileWithOptions(
+  final Path path,
+  final Set mandatoryKeys,
+  final Configuration options,
+  final int bufferSize) throws IOException {
+AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(mandatoryKeys,
+Collections.emptySet(),
+"for " + path);
+return LambdaUtils.eval(
+new CompletableFuture<>(), () -> open(path, bufferSize));
+  }
+
+  /**
+   * This is overridden to ensure that this class's create() method is
+   * ultimately called.
+   *
+   * {@inheritDoc}
+   */
+  public FSDataOutputStrea

[hadoop] branch trunk updated: YARN-9245. Added query docker image command ability to node manager. Contributed by Chandni Singh

2019-02-27 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fbc7bb3  YARN-9245.  Added query docker image command ability to node 
manager. Contributed by Chandni Singh
fbc7bb3 is described below

commit fbc7bb315fd0991baaa74a72ef2d98a88ac738ad
Author: Eric Yang 
AuthorDate: Wed Feb 27 14:57:24 2019 -0500

YARN-9245.  Added query docker image command ability to node manager.
Contributed by Chandni Singh
---
 .../linux/runtime/docker/DockerImagesCommand.java  | 38 +
 .../container-executor/impl/utils/docker-util.c| 38 +
 .../container-executor/impl/utils/docker-util.h| 12 +
 .../test/utils/test_docker_util.cc | 24 +
 .../runtime/docker/TestDockerImagesCommand.java| 62 ++
 5 files changed, 174 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java
new file mode 100644
index 000..87dfcd2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Encapsulates the docker images command and its command
+ * line arguments.
+ */
+public class DockerImagesCommand extends DockerCommand {
+  private static final String IMAGES_COMMAND = "images";
+
+  public DockerImagesCommand() {
+super(IMAGES_COMMAND);
+  }
+
+  public DockerImagesCommand getSingleImageStatus(String imageName) {
+Preconditions.checkNotNull(imageName, "imageName");
+super.addCommandArguments("image", imageName);
+return this;
+  }
+}
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 6db5b5d..59a39fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -458,6 +458,8 @@ int get_docker_command(const char *command_file, const 
struct configuration *con
 ret = get_docker_start_command(command_file, conf, args);
   } else if (strcmp(DOCKER_EXEC_COMMAND, command) == 0) {
 ret = get_docker_exec_command(command_file, conf, args);
+  } else if (strcmp(DOCKER_IMAGES_COMMAND, command) == 0) {
+  ret = get_docker_images_command(command_file, conf, args);
   } else {
 ret = UNKNOWN_DOCKER_COMMAND;
   }
@@ -1736,3 +1738,39 @@ free_and_exit:
   free_configuration(_config);
   return ret;
 }
+
+int get_docker_images_command(const char *command_file, const struct 
configuration *conf, args *args) {
+  int ret = 0;
+  char *image_name = NULL;
+
+  struct configuration command_config = {0, NULL};
+  ret = read_and_verify_command_file(command_file, DOCKER_IMAGES_COMMAND, 
_config);
+  if (ret != 0) {
+goto free_and_exit;
+  }
+
+  ret = add_to_args(args, DOCKER_IMAGES_COMMAND);
+  if (ret != 0) {
+goto free_and_exit;
+  }
+
+  image_name = get_configuration_value("image", DOCKER_COMMAND_FILE_SECTION, 
_config);
+  if (image_name) {
+if (validate_docker_image_nam

[hadoop] branch trunk updated: YARN-9244. Document docker registry deployment with direct S3 driver. Contributed by Suma Shivaprasad

2019-02-22 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 05bce33  YARN-9244.  Document docker registry deployment with direct 
S3 driver. Contributed by Suma Shivaprasad
05bce33 is described below

commit 05bce33d6e8b46c416a37b8d12fd84eb53474d6d
Author: Eric Yang 
AuthorDate: Fri Feb 22 19:13:52 2019 -0500

YARN-9244.  Document docker registry deployment with direct S3 driver.
Contributed by Suma Shivaprasad
---
 .../src/site/markdown/DockerContainers.md  | 67 ++
 1 file changed, 67 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index 135a0fc..4d55877 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -801,6 +801,73 @@ When docker-registry application reaches STABLE state in 
YARN, user can push or
 ### Docker Registry on S3
 
 Docker Registry provides its own S3 driver and YAML configuration.  YARN 
service configuration can generate YAML template, and enable direct Docker 
Registry to S3 storage.  This option is the top choice for deploying Docker 
Trusted Registry on AWS.
+Configuring Docker registry storage driver to S3 requires mounting 
/etc/docker/registry/config.yml file (through 
YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS), which needs to configure an S3 bucket 
with its corresponding accesskey and secretKey.
+
+Sample config.yml
+```
+version: 0.1
+log:
+fields:
+service: registry
+http:
+addr: :5000
+storage:
+cache:
+blobdescriptor: inmemory
+s3:
+accesskey: #AWS_KEY#
+secretkey: #AWS_SECRET#
+region: #AWS_REGION#
+bucket: #AWS_BUCKET#
+encrypt: #ENCRYPT#
+secure:  #SECURE#
+chunksize: 5242880
+multipartcopychunksize: 33554432
+multipartcopymaxconcurrency: 100
+multipartcopythresholdsize: 33554432
+rootdirectory: #STORAGE_PATH#
+```
+
+Docker Registry can be started using YARN service:
+registry.json
+
+```
+{
+  "name": "docker-registry",
+  "version": "1.0",
+  "kerberos_principal" : {
+"principal_name" : "registry/_h...@example.com",
+"keytab" : "file:///etc/security/keytabs/registry.service.keytab"
+  },
+  "components" :
+  [
+{
+  "name": "registry",
+  "number_of_containers": 1,
+  "artifact": {
+"id": "registry:latest",
+"type": "DOCKER"
+  },
+  "resource": {
+"cpus": 1,
+"memory": "256"
+  },
+  "run_privileged_container": true,
+  "configuration": {
+"env": {
+  "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE":"true",
+  "YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS":":/etc/docker/registry/config.yml",
+},
+"properties": {
+  "docker.network": "host"
+}
+  }
+}
+  ]
+}
+```
+
+For further details and parameters that could be configured in the S3 storage 
driver, please refer https://docs.docker.com/registry/storage-drivers/s3/.
 
 ### Docker Registry with CSI Driver
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.9 updated: Revert "HADOOP-13707. Skip authorization for anonymous user to access Hadoop"

2019-02-21 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new 59a65c3  Revert "HADOOP-13707. Skip authorization for anonymous user 
to access Hadoop"
59a65c3 is described below

commit 59a65c396c49f8bb20843324e98a724c4edb590b
Author: Eric Yang 
AuthorDate: Thu Feb 21 17:36:59 2019 -0500

Revert "HADOOP-13707. Skip authorization for anonymous user to access 
Hadoop"

This reverts commit 439422fff923ae6aea1f7547fe24d0e23fbd8f7f.

(cherry picked from commit bae607f73435ec1ec29da994a5ce2466ff7e4f4e)

Conflicts:

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

(cherry picked from commit 43541570bc711ad83722ae49b49ef4d14ca46aec)

Conflicts:

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
---
 .../java/org/apache/hadoop/conf/ConfServlet.java   |  8 +-
 .../apache/hadoop/http/AdminAuthorizedServlet.java | 11 +++-
 .../java/org/apache/hadoop/http/HttpServer2.java   | 29 +++---
 .../java/org/apache/hadoop/jmx/JMXJsonServlet.java |  8 +-
 .../main/java/org/apache/hadoop/log/LogLevel.java  | 10 +++-
 .../org/apache/hadoop/http/TestHttpServer.java | 17 +
 6 files changed, 12 insertions(+), 71 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
index cfd7b97..cdc9581 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.conf;
 import java.io.IOException;
 import java.io.Writer;
 
-import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
@@ -59,12 +58,7 @@ public class ConfServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
   throws ServletException, IOException {
 
-// If user is a static user and auth Type is null, that means
-// there is a non-security environment and no need authorization,
-// otherwise, do the authorization.
-final ServletContext servletContext = getServletContext();
-if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
-!HttpServer2.isInstrumentationAccessAllowed(servletContext,
+if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
request, response)) {
   return;
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
index e591ab4..ef562b4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.http;
 
 import java.io.IOException;
 
-import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
@@ -36,13 +35,9 @@ public class AdminAuthorizedServlet extends DefaultServlet {
 
   @Override
   protected void doGet(HttpServletRequest request, HttpServletResponse 
response)
-  throws ServletException, IOException {
-// If user is a static user and auth Type is null, that means
-// there is a non-security environment and no need authorization,
-// otherwise, do the authorization.
-final ServletContext servletContext = getServletContext();
-if (HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) ||
-HttpServer2.hasAdministratorAccess(servletContext, request,
+ throws ServletException, IOException {
+// Do the authorization
+if (HttpServer2.hasAdministratorAccess(getServletContext(), request,
 response)) {
   // Authorization is done. Just call super.
   super.doGet(request, response);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 861fa48..756c51e 100644

  1   2   3   4   5   6   7   >