hadoop git commit: Updating GPG module pom version post rebase.

2018-05-30 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/YARN-7402 c5bf22dc1 -> 262ca7f16


Updating GPG module pom version post rebase.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/262ca7f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/262ca7f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/262ca7f1

Branch: refs/heads/YARN-7402
Commit: 262ca7f160df4005a8386d1b797a34d400b452cf
Parents: c5bf22d
Author: Subru Krishnan 
Authored: Wed May 30 12:59:22 2018 -0700
Committer: Subru Krishnan 
Committed: Wed May 30 12:59:22 2018 -0700

--
 .../hadoop-yarn-server-globalpolicygenerator/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/262ca7f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
index 9398b0b..c137c9e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
@@ -19,12 +19,12 @@
   
 hadoop-yarn-server
 org.apache.hadoop
-3.1.0-SNAPSHOT
+3.2.0-SNAPSHOT
   
   4.0.0
   org.apache.hadoop
   hadoop-yarn-server-globalpolicygenerator
-  3.1.0-SNAPSHOT
+  3.2.0-SNAPSHOT
   hadoop-yarn-server-globalpolicygenerator
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature exit. (Botong Huang via Subru).

2018-04-06 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 f24d3b69b -> a2a50e54f


YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature 
exit. (Botong Huang via Subru).

(cherry picked from commit 00905efab22edd9857e0a3828c201bf70f03cb96)
(cherry picked from commit 05246b758590286e9dec9e8ff40cddc138003e12)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2a50e54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2a50e54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2a50e54

Branch: refs/heads/branch-2.9
Commit: a2a50e54f0e0a593e163b76fd032716286fd92b1
Parents: f24d3b6
Author: Subru Krishnan 
Authored: Fri Apr 6 16:31:16 2018 -0700
Committer: Subru Krishnan 
Committed: Fri Apr 6 16:41:23 2018 -0700

--
 .../nodemanager/amrmproxy/AMRMProxyService.java |  2 +-
 .../amrmproxy/BaseAMRMProxyTest.java|  5 +++
 .../amrmproxy/TestAMRMProxyService.java | 42 
 3 files changed, 48 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a50e54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 815e39b..86fbb72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -261,7 +261,7 @@ public class AMRMProxyService extends CompositeService 
implements
 // Create the intercepter pipeline for the AM
 initializePipeline(attemptId, user, amrmToken, localToken,
 entry.getValue(), true, amCred);
-  } catch (IOException e) {
+  } catch (Throwable e) {
 LOG.error("Exception when recovering " + attemptId
 + ", removing it from NMStateStore and move on", e);
 this.nmContext.getNMStateStore().removeAMRMProxyAppContext(attemptId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a50e54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 56d48ef..0958191 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -109,6 +109,11 @@ public abstract class BaseAMRMProxyTest {
 return this.amrmProxyService;
   }
 
+  protected Context getNMContext() {
+Assert.assertNotNull(this.nmContext);
+return this.nmContext;
+  }
+
   @Before
   public void setUp() throws IOException {
 this.conf = createConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a50e54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
index b955311..1eefbd5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/

hadoop git commit: YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature exit. (Botong Huang via Subru).

2018-04-06 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b19267259 -> 05246b758


YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature 
exit. (Botong Huang via Subru).

(cherry picked from commit 00905efab22edd9857e0a3828c201bf70f03cb96)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05246b75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05246b75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05246b75

Branch: refs/heads/branch-2
Commit: 05246b758590286e9dec9e8ff40cddc138003e12
Parents: b192672
Author: Subru Krishnan 
Authored: Fri Apr 6 16:31:16 2018 -0700
Committer: Subru Krishnan 
Committed: Fri Apr 6 16:38:28 2018 -0700

--
 .../nodemanager/amrmproxy/AMRMProxyService.java |  2 +-
 .../amrmproxy/BaseAMRMProxyTest.java|  5 +++
 .../amrmproxy/TestAMRMProxyService.java | 42 
 3 files changed, 48 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05246b75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 815e39b..86fbb72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -261,7 +261,7 @@ public class AMRMProxyService extends CompositeService 
implements
 // Create the intercepter pipeline for the AM
 initializePipeline(attemptId, user, amrmToken, localToken,
 entry.getValue(), true, amCred);
-  } catch (IOException e) {
+  } catch (Throwable e) {
 LOG.error("Exception when recovering " + attemptId
 + ", removing it from NMStateStore and move on", e);
 this.nmContext.getNMStateStore().removeAMRMProxyAppContext(attemptId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05246b75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 56d48ef..0958191 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -109,6 +109,11 @@ public abstract class BaseAMRMProxyTest {
 return this.amrmProxyService;
   }
 
+  protected Context getNMContext() {
+Assert.assertNotNull(this.nmContext);
+return this.nmContext;
+  }
+
   @Before
   public void setUp() throws IOException {
 this.conf = createConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05246b75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
index b955311..1eefbd5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
+++ 
b/hadoop-y

hadoop git commit: YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature exit. (Botong Huang via Subru).

2018-04-06 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 b232dcab3 -> b9bad9459


YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature 
exit. (Botong Huang via Subru).

(cherry picked from commit 00905efab22edd9857e0a3828c201bf70f03cb96)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9bad945
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9bad945
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9bad945

Branch: refs/heads/branch-3.1
Commit: b9bad94596096377c72a7fd1f2bc46674daeeb0e
Parents: b232dca
Author: Subru Krishnan 
Authored: Fri Apr 6 16:31:16 2018 -0700
Committer: Subru Krishnan 
Committed: Fri Apr 6 16:35:29 2018 -0700

--
 .../nodemanager/amrmproxy/AMRMProxyService.java |  2 +-
 .../amrmproxy/BaseAMRMProxyTest.java|  5 +++
 .../amrmproxy/TestAMRMProxyService.java | 42 
 3 files changed, 48 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9bad945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 815e39b..86fbb72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -261,7 +261,7 @@ public class AMRMProxyService extends CompositeService 
implements
 // Create the intercepter pipeline for the AM
 initializePipeline(attemptId, user, amrmToken, localToken,
 entry.getValue(), true, amCred);
-  } catch (IOException e) {
+  } catch (Throwable e) {
 LOG.error("Exception when recovering " + attemptId
 + ", removing it from NMStateStore and move on", e);
 this.nmContext.getNMStateStore().removeAMRMProxyAppContext(attemptId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9bad945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 4b1a887..677732d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -112,6 +112,11 @@ public abstract class BaseAMRMProxyTest {
 return this.amrmProxyService;
   }
 
+  protected Context getNMContext() {
+Assert.assertNotNull(this.nmContext);
+return this.nmContext;
+  }
+
   @Before
   public void setUp() throws IOException {
 this.conf = createConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9bad945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
index b955311..1eefbd5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
+++ 
b/had

hadoop git commit: YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature exit. (Botong Huang via Subru).

2018-04-06 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 024d7c087 -> 00905efab


YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature 
exit. (Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00905efa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00905efa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00905efa

Branch: refs/heads/trunk
Commit: 00905efab22edd9857e0a3828c201bf70f03cb96
Parents: 024d7c0
Author: Subru Krishnan 
Authored: Fri Apr 6 16:31:16 2018 -0700
Committer: Subru Krishnan 
Committed: Fri Apr 6 16:31:16 2018 -0700

--
 .../nodemanager/amrmproxy/AMRMProxyService.java |  2 +-
 .../amrmproxy/BaseAMRMProxyTest.java|  5 +++
 .../amrmproxy/TestAMRMProxyService.java | 42 
 3 files changed, 48 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00905efa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 815e39b..86fbb72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -261,7 +261,7 @@ public class AMRMProxyService extends CompositeService 
implements
 // Create the intercepter pipeline for the AM
 initializePipeline(attemptId, user, amrmToken, localToken,
 entry.getValue(), true, amCred);
-  } catch (IOException e) {
+  } catch (Throwable e) {
 LOG.error("Exception when recovering " + attemptId
 + ", removing it from NMStateStore and move on", e);
 this.nmContext.getNMStateStore().removeAMRMProxyAppContext(attemptId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00905efa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 4b1a887..677732d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -112,6 +112,11 @@ public abstract class BaseAMRMProxyTest {
 return this.amrmProxyService;
   }
 
+  protected Context getNMContext() {
+Assert.assertNotNull(this.nmContext);
+return this.nmContext;
+  }
+
   @Before
   public void setUp() throws IOException {
 this.conf = createConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00905efa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
index b955311..1eefbd5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/s

[2/2] hadoop git commit: YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru).

2018-03-28 Thread subru
YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache 
when failing over. (Botong Huang via Subru).

(cherry picked from commit 0d7e014fde717e8b122773b68664f4594106)
(cherry picked from commit 304ce1871406a5ee4f1e88e294088d1e01b8de3e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb3e5147
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb3e5147
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb3e5147

Branch: refs/heads/branch-2.9
Commit: cb3e5147a1eda4036b793d4fca67ad471ba349f1
Parents: ad7d793
Author: Subru Krishnan 
Authored: Wed Mar 28 11:33:19 2018 -0700
Committer: Subru Krishnan 
Committed: Wed Mar 28 11:55:18 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 ++-
 .../yarn/conf/TestYarnConfigurationFields.java  |  2 +
 .../TestFederationRMFailoverProxyProvider.java  | 83 +++-
 .../FederationRMFailoverProxyProvider.java  | 11 ++-
 4 files changed, 95 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3e5147/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index edeec9f..001d02e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2792,15 +2792,18 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
+  public static final String FEDERATION_FLUSH_CACHE_FOR_RM_ADDR =
+  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
+  public static final boolean DEFAULT_FEDERATION_FLUSH_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3e5147/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 1d3111c..230d840 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -79,6 +79,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
+configurationPrefixToSkipCompare
+.add(YarnConfiguration.FEDERATION_FLUSH_CACHE_FOR_RM_ADDR);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.RM_EPOCH);
 configurationPropsToSkipCompare

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3e5147/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
index e3f9155..7e670c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyPr

[1/2] hadoop git commit: Revert "YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru)."

2018-03-28 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 a998f22f5 -> cb3e5147a


Revert "YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade 
cache when failing over. (Botong Huang via Subru)."

This reverts commit 8d51daaaf5b86f0f4cb4c1af47a39fb6308053cd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad7d7935
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad7d7935
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad7d7935

Branch: refs/heads/branch-2.9
Commit: ad7d7935c4b42c23171a476aa62a27d05ee11a0b
Parents: a998f22
Author: Subru Krishnan 
Authored: Wed Mar 28 11:55:04 2018 -0700
Committer: Subru Krishnan 
Committed: Wed Mar 28 11:55:04 2018 -0700

--
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java   |  9 +++--
 .../src/main/resources/yarn-default.xml  |  9 -
 .../failover/FederationRMFailoverProxyProvider.java  | 11 +++
 3 files changed, 6 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad7d7935/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6cfb2a9..edeec9f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2792,18 +2792,15 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
-  public static final String FEDERATION_FLUSh_CACHE_FOR_RM_ADDR =
-  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
-  public static final boolean DEFAULT_FEDERATION_FLUSh_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad7d7935/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 800e56f..ce8c0f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2816,15 +2816,6 @@
   
 
   
-   
-Whether to flush FederationStateStoreFacade cache to get subcluster info
-when FederationRMFailoverProxyProvider is performing failover.
-   
-   yarn.federation.flush-cache-for-rm-addr
-   true
-  
-
-  
 The registry base directory for federation.
 yarn.federation.registry.base-dir
 yarnfederation/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad7d7935/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
index b72b199..c631208 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation

[2/2] hadoop git commit: Revert "YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru)."

2018-03-28 Thread subru
Revert "YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade 
cache when failing over. (Botong Huang via Subru)."

This reverts commit 0ab1d2f25ba39391bf7bc5e486bf75c828a0e856.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b36b4380
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b36b4380
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b36b4380

Branch: refs/heads/branch-2
Commit: b36b438043b9490d330c35e2effdc83cef087f27
Parents: c93ab3e
Author: Subru Krishnan 
Authored: Wed Mar 28 11:39:11 2018 -0700
Committer: Subru Krishnan 
Committed: Wed Mar 28 11:51:59 2018 -0700

--
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java   |  9 +++--
 .../src/main/resources/yarn-default.xml  |  9 -
 .../failover/FederationRMFailoverProxyProvider.java  | 11 +++
 3 files changed, 6 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b36b4380/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ac7e168..7c25be3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2806,18 +2806,15 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
-  public static final String FEDERATION_FLUSh_CACHE_FOR_RM_ADDR =
-  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
-  public static final boolean DEFAULT_FEDERATION_FLUSh_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b36b4380/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 5225da2..24279f5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2824,15 +2824,6 @@
   
 
   
-   
-Whether to flush FederationStateStoreFacade cache to get subcluster info
-when FederationRMFailoverProxyProvider is performing failover.
-   
-   yarn.federation.flush-cache-for-rm-addr
-   true
-  
-
-  
 The registry base directory for federation.
 yarn.federation.registry.base-dir
 yarnfederation/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b36b4380/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
index b72b199..c631208 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
@@ -64,8 +64,7 @@ public class FederationR

[1/2] hadoop git commit: YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru).

2018-03-28 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c93ab3e62 -> 304ce1871


YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache 
when failing over. (Botong Huang via Subru).

(cherry picked from commit 0d7e014fde717e8b122773b68664f4594106)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/304ce187
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/304ce187
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/304ce187

Branch: refs/heads/branch-2
Commit: 304ce1871406a5ee4f1e88e294088d1e01b8de3e
Parents: b36b438
Author: Subru Krishnan 
Authored: Wed Mar 28 11:33:19 2018 -0700
Committer: Subru Krishnan 
Committed: Wed Mar 28 11:51:59 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 ++-
 .../yarn/conf/TestYarnConfigurationFields.java  |  2 +
 .../TestFederationRMFailoverProxyProvider.java  | 83 +++-
 .../FederationRMFailoverProxyProvider.java  | 11 ++-
 4 files changed, 95 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/304ce187/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7c25be3..d6e4388 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2806,15 +2806,18 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
+  public static final String FEDERATION_FLUSH_CACHE_FOR_RM_ADDR =
+  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
+  public static final boolean DEFAULT_FEDERATION_FLUSH_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/304ce187/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 1d3111c..230d840 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -79,6 +79,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
+configurationPrefixToSkipCompare
+.add(YarnConfiguration.FEDERATION_FLUSH_CACHE_FOR_RM_ADDR);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.RM_EPOCH);
 configurationPropsToSkipCompare

http://git-wip-us.apache.org/repos/asf/hadoop/blob/304ce187/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
index e3f9155..7e670c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRM

[1/2] hadoop git commit: Revert "YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru)."

2018-03-28 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a2b4daab5 -> a0091ec4b


Revert "YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade 
cache when failing over. (Botong Huang via Subru)."

This reverts commit 85eebf1bebc7b191dcd692395f77903257cd85c4.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed49f59e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed49f59e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed49f59e

Branch: refs/heads/branch-3.1
Commit: ed49f59e1896a81c9ab4e3a744413f9f1bb37da0
Parents: a2b4daa
Author: Subru Krishnan 
Authored: Wed Mar 28 11:35:30 2018 -0700
Committer: Subru Krishnan 
Committed: Wed Mar 28 11:35:30 2018 -0700

--
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java   |  9 +++--
 .../src/main/resources/yarn-default.xml  |  9 -
 .../failover/FederationRMFailoverProxyProvider.java  | 11 +++
 3 files changed, 6 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed49f59e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6390afc..b76f457 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3087,18 +3087,15 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
-  public static final String FEDERATION_FLUSh_CACHE_FOR_RM_ADDR =
-  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
-  public static final boolean DEFAULT_FEDERATION_FLUSh_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed49f59e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 2eba8df..114ba4b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2925,15 +2925,6 @@
   
 
   
-   
-Whether to flush FederationStateStoreFacade cache to get subcluster info
-when FederationRMFailoverProxyProvider is performing failover.
-   
-   yarn.federation.flush-cache-for-rm-addr
-   true
-  
-
-  
 The registry base directory for federation.
 yarn.federation.registry.base-dir
 yarnfederation/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed49f59e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
index b72b199..c631208 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation

[2/2] hadoop git commit: YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru).

2018-03-28 Thread subru
YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache 
when failing over. (Botong Huang via Subru).

(cherry picked from commit 0d7e014fde717e8b122773b68664f4594106)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0091ec4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0091ec4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0091ec4

Branch: refs/heads/branch-3.1
Commit: a0091ec4b3c28a468f95d8f066a769320937ad6f
Parents: ed49f59
Author: Subru Krishnan 
Authored: Wed Mar 28 11:33:19 2018 -0700
Committer: Subru Krishnan 
Committed: Wed Mar 28 11:35:45 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 ++-
 .../yarn/conf/TestYarnConfigurationFields.java  |  2 +
 .../TestFederationRMFailoverProxyProvider.java  | 81 +++-
 .../FederationRMFailoverProxyProvider.java  | 11 ++-
 4 files changed, 94 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0091ec4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index b76f457..5a2c1f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3087,15 +3087,18 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
+  public static final String FEDERATION_FLUSH_CACHE_FOR_RM_ADDR =
+  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
+  public static final boolean DEFAULT_FEDERATION_FLUSH_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0091ec4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 9fe4f88..f4d1ac0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -79,6 +79,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
+configurationPrefixToSkipCompare
+.add(YarnConfiguration.FEDERATION_FLUSH_CACHE_FOR_RM_ADDR);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.RM_EPOCH);
 configurationPropsToSkipCompare

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0091ec4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
index e3f9155..0a7ee3f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-

[2/2] hadoop git commit: YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru).

2018-03-28 Thread subru
YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache 
when failing over. (Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d7e

Branch: refs/heads/trunk
Commit: 0d7e014fde717e8b122773b68664f4594106
Parents: 725b10e
Author: Subru Krishnan 
Authored: Wed Mar 28 11:33:19 2018 -0700
Committer: Subru Krishnan 
Committed: Wed Mar 28 11:33:19 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 ++-
 .../yarn/conf/TestYarnConfigurationFields.java  |  2 +
 .../TestFederationRMFailoverProxyProvider.java  | 81 +++-
 .../FederationRMFailoverProxyProvider.java  | 11 ++-
 4 files changed, 94 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 04b2898..1f62bbd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3089,15 +3089,18 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
+  public static final String FEDERATION_FLUSH_CACHE_FOR_RM_ADDR =
+  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
+  public static final boolean DEFAULT_FEDERATION_FLUSH_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 9fe4f88..f4d1ac0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -79,6 +79,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
+configurationPrefixToSkipCompare
+.add(YarnConfiguration.FEDERATION_FLUSH_CACHE_FOR_RM_ADDR);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.RM_EPOCH);
 configurationPropsToSkipCompare

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
index e3f9155..0a7ee3f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMF

[1/2] hadoop git commit: Revert "YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru)."

2018-03-28 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 47f711eeb -> 0d7e0


Revert "YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade 
cache when failing over. (Botong Huang via Subru)."

This reverts commit 2a2ef15caf791f30c471526c1b74e68803f0c405 as 
smart-apply-patch script didn't pick the latest patch.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/725b10e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/725b10e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/725b10e3

Branch: refs/heads/trunk
Commit: 725b10e3aee383d049c97f8ed2b0b1ae873d5ae8
Parents: 47f711e
Author: Subru Krishnan 
Authored: Wed Mar 28 11:26:50 2018 -0700
Committer: Subru Krishnan 
Committed: Wed Mar 28 11:26:50 2018 -0700

--
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java   |  9 +++--
 .../src/main/resources/yarn-default.xml  |  9 -
 .../failover/FederationRMFailoverProxyProvider.java  | 11 +++
 3 files changed, 6 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/725b10e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 658099a..04b2898 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3089,18 +3089,15 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
-  public static final String FEDERATION_FLUSh_CACHE_FOR_RM_ADDR =
-  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
-  public static final boolean DEFAULT_FEDERATION_FLUSh_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/725b10e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 2eba8df..114ba4b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2925,15 +2925,6 @@
   
 
   
-   
-Whether to flush FederationStateStoreFacade cache to get subcluster info
-when FederationRMFailoverProxyProvider is performing failover.
-   
-   yarn.federation.flush-cache-for-rm-addr
-   true
-  
-
-  
 The registry base directory for federation.
 yarn.federation.registry.base-dir
 yarnfederation/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/725b10e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
index b72b199..c631208 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-co

hadoop git commit: YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru).

2018-03-27 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 085acec02 -> 8d51daaaf


YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache 
when failing over. (Botong Huang via Subru).

(cherry picked from commit 2a2ef15caf791f30c471526c1b74e68803f0c405)
(cherry picked from commit 0ab1d2f25ba39391bf7bc5e486bf75c828a0e856)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d51daaa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d51daaa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d51daaa

Branch: refs/heads/branch-2.9
Commit: 8d51daaaf5b86f0f4cb4c1af47a39fb6308053cd
Parents: 085acec
Author: Subru Krishnan 
Authored: Tue Mar 27 17:39:46 2018 -0700
Committer: Subru Krishnan 
Committed: Tue Mar 27 17:50:06 2018 -0700

--
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java   |  9 ++---
 .../src/main/resources/yarn-default.xml  |  9 +
 .../failover/FederationRMFailoverProxyProvider.java  | 11 ---
 3 files changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d51daaa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index edeec9f..6cfb2a9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2792,15 +2792,18 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
+  public static final String FEDERATION_FLUSh_CACHE_FOR_RM_ADDR =
+  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
+  public static final boolean DEFAULT_FEDERATION_FLUSh_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d51daaa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index ce8c0f1..800e56f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2816,6 +2816,15 @@
   
 
   
+   
+Whether to flush FederationStateStoreFacade cache to get subcluster info
+when FederationRMFailoverProxyProvider is performing failover.
+   
+   yarn.federation.flush-cache-for-rm-addr
+   true
+  
+
+  
 The registry base directory for federation.
 yarn.federation.registry.base-dir
 yarnfederation/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d51daaa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
index c631208..b72b199 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-serv

hadoop git commit: YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru).

2018-03-27 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 827d2015c -> 0ab1d2f25


YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache 
when failing over. (Botong Huang via Subru).

(cherry picked from commit 2a2ef15caf791f30c471526c1b74e68803f0c405)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ab1d2f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ab1d2f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ab1d2f2

Branch: refs/heads/branch-2
Commit: 0ab1d2f25ba39391bf7bc5e486bf75c828a0e856
Parents: 827d201
Author: Subru Krishnan 
Authored: Tue Mar 27 17:39:46 2018 -0700
Committer: Subru Krishnan 
Committed: Tue Mar 27 17:47:37 2018 -0700

--
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java   |  9 ++---
 .../src/main/resources/yarn-default.xml  |  9 +
 .../failover/FederationRMFailoverProxyProvider.java  | 11 ---
 3 files changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ab1d2f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7c25be3..ac7e168 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2806,15 +2806,18 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
+  public static final String FEDERATION_FLUSh_CACHE_FOR_RM_ADDR =
+  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
+  public static final boolean DEFAULT_FEDERATION_FLUSh_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ab1d2f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 24279f5..5225da2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2824,6 +2824,15 @@
   
 
   
+   
+Whether to flush FederationStateStoreFacade cache to get subcluster info
+when FederationRMFailoverProxyProvider is performing failover.
+   
+   yarn.federation.flush-cache-for-rm-addr
+   true
+  
+
+  
 The registry base directory for federation.
 yarn.federation.registry.base-dir
 yarnfederation/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ab1d2f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
index c631208..b72b199 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/

hadoop git commit: YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru).

2018-03-27 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 1176a128d -> 85eebf1be


YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache 
when failing over. (Botong Huang via Subru).

(cherry picked from commit 2a2ef15caf791f30c471526c1b74e68803f0c405)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85eebf1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85eebf1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85eebf1b

Branch: refs/heads/branch-3.1
Commit: 85eebf1bebc7b191dcd692395f77903257cd85c4
Parents: 1176a12
Author: Subru Krishnan 
Authored: Tue Mar 27 17:39:46 2018 -0700
Committer: Subru Krishnan 
Committed: Tue Mar 27 17:44:00 2018 -0700

--
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java   |  9 ++---
 .../src/main/resources/yarn-default.xml  |  9 +
 .../failover/FederationRMFailoverProxyProvider.java  | 11 ---
 3 files changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85eebf1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index b76f457..6390afc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3087,15 +3087,18 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
+  public static final String FEDERATION_FLUSh_CACHE_FOR_RM_ADDR =
+  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
+  public static final boolean DEFAULT_FEDERATION_FLUSh_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85eebf1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 114ba4b..2eba8df 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2925,6 +2925,15 @@
   
 
   
+   
+Whether to flush FederationStateStoreFacade cache to get subcluster info
+when FederationRMFailoverProxyProvider is performing failover.
+   
+   yarn.federation.flush-cache-for-rm-addr
+   true
+  
+
+  
 The registry base directory for federation.
 yarn.federation.registry.base-dir
 yarnfederation/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85eebf1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
index c631208..b72b199 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/

hadoop git commit: YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache when failing over. (Botong Huang via Subru).

2018-03-27 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3fe41c65d -> 2a2ef15ca


YARN-8010. Add config in FederationRMFailoverProxy to not bypass facade cache 
when failing over. (Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a2ef15c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a2ef15c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a2ef15c

Branch: refs/heads/trunk
Commit: 2a2ef15caf791f30c471526c1b74e68803f0c405
Parents: 3fe41c6
Author: Subru Krishnan 
Authored: Tue Mar 27 17:39:46 2018 -0700
Committer: Subru Krishnan 
Committed: Tue Mar 27 17:39:46 2018 -0700

--
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java   |  9 ++---
 .../src/main/resources/yarn-default.xml  |  9 +
 .../failover/FederationRMFailoverProxyProvider.java  | 11 ---
 3 files changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a2ef15c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 04b2898..658099a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3089,15 +3089,18 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
+  public static final String FEDERATION_FLUSh_CACHE_FOR_RM_ADDR =
+  FEDERATION_PREFIX + "flush-cache-for-rm-addr";
+  public static final boolean DEFAULT_FEDERATION_FLUSh_CACHE_FOR_RM_ADDR = 
true;
 
   public static final String FEDERATION_REGISTRY_BASE_KEY =
   FEDERATION_PREFIX + "registry.base-dir";
   public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
   "yarnfederation/";
 
-  // 5 minutes
-  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
-
   public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
   FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a2ef15c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 114ba4b..2eba8df 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2925,6 +2925,15 @@
   
 
   
+   
+Whether to flush FederationStateStoreFacade cache to get subcluster info
+when FederationRMFailoverProxyProvider is performing failover.
+   
+   yarn.federation.flush-cache-for-rm-addr
+   true
+  
+
+  
 The registry base directory for federation.
 yarn.federation.registry.base-dir
 yarnfederation/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a2ef15c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
index c631208..b72b199 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java
@@ -64,7 +64,8 @@ publ

hadoop git commit: YARN-7630. Fix AMRMToken rollover handling in AMRMProxy. Contributed by Botong Huang.

2017-12-14 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 43b9fbc2b -> 2b77729fd


YARN-7630. Fix AMRMToken rollover handling in AMRMProxy. Contributed by Botong 
Huang.

(cherry picked from commit 17ba74be29193ac15474f73baaaf4e647a95078b)
(cherry picked from commit 8d3d7fa1c5fda4ec394b2f64baf1008e7b0d9524)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b77729f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b77729f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b77729f

Branch: refs/heads/branch-2.9
Commit: 2b77729fdf4075eab645fdc458047c8f186c32bb
Parents: 43b9fbc
Author: Subru Krishnan 
Authored: Thu Dec 14 14:03:55 2017 -0800
Committer: Subru Krishnan 
Committed: Thu Dec 14 14:11:07 2017 -0800

--
 .../org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java   | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b77729f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
index e1f08e3..5a380c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
@@ -163,8 +163,12 @@ public final class AMRMClientUtils {
   LOG.info("Creating RMProxy to RM {} for protocol {} for user {}",
   rmClusterId, protocol.getSimpleName(), user);
   if (token != null) {
-token.setService(ClientRMProxy.getAMRMTokenService(configuration));
+// preserve the token service sent by the RM when adding the token
+// to ensure we replace the previous token setup by the RM.
+// Afterwards we can update the service address for the RPC layer.
+// Same as YarnServerSecurityUtils.updateAMRMToken()
 user.addToken(token);
+token.setService(ClientRMProxy.getAMRMTokenService(configuration));
 setAuthModeInConf(configuration);
   }
   final T proxyConnection = user.doAs(new PrivilegedExceptionAction() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7630. Fix AMRMToken rollover handling in AMRMProxy. Contributed by Botong Huang.

2017-12-14 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 08a50da95 -> 8d3d7fa1c


YARN-7630. Fix AMRMToken rollover handling in AMRMProxy. Contributed by Botong 
Huang.

(cherry picked from commit 17ba74be29193ac15474f73baaaf4e647a95078b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d3d7fa1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d3d7fa1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d3d7fa1

Branch: refs/heads/branch-2
Commit: 8d3d7fa1c5fda4ec394b2f64baf1008e7b0d9524
Parents: 08a50da
Author: Subru Krishnan 
Authored: Thu Dec 14 14:03:55 2017 -0800
Committer: Subru Krishnan 
Committed: Thu Dec 14 14:07:44 2017 -0800

--
 .../org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java   | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d3d7fa1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
index e1f08e3..5a380c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
@@ -163,8 +163,12 @@ public final class AMRMClientUtils {
   LOG.info("Creating RMProxy to RM {} for protocol {} for user {}",
   rmClusterId, protocol.getSimpleName(), user);
   if (token != null) {
-token.setService(ClientRMProxy.getAMRMTokenService(configuration));
+// preserve the token service sent by the RM when adding the token
+// to ensure we replace the previous token setup by the RM.
+// Afterwards we can update the service address for the RPC layer.
+// Same as YarnServerSecurityUtils.updateAMRMToken()
 user.addToken(token);
+token.setService(ClientRMProxy.getAMRMTokenService(configuration));
 setAuthModeInConf(configuration);
   }
   final T proxyConnection = user.doAs(new PrivilegedExceptionAction() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7630. Fix AMRMToken rollover handling in AMRMProxy. Contributed by Botong Huang.

2017-12-14 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk f5a72424c -> 17ba74be2


YARN-7630. Fix AMRMToken rollover handling in AMRMProxy. Contributed by Botong 
Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17ba74be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17ba74be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17ba74be

Branch: refs/heads/trunk
Commit: 17ba74be29193ac15474f73baaaf4e647a95078b
Parents: f5a7242
Author: Subru Krishnan 
Authored: Thu Dec 14 14:03:55 2017 -0800
Committer: Subru Krishnan 
Committed: Thu Dec 14 14:03:55 2017 -0800

--
 .../org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java   | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17ba74be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
index 3cecdca..37e2b5e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
@@ -163,8 +163,12 @@ public final class AMRMClientUtils {
   LOG.info("Creating RMProxy to RM {} for protocol {} for user {}",
   rmClusterId, protocol.getSimpleName(), user);
   if (token != null) {
-token.setService(ClientRMProxy.getAMRMTokenService(configuration));
+// preserve the token service sent by the RM when adding the token
+// to ensure we replace the previous token setup by the RM.
+// Afterwards we can update the service address for the RPC layer.
+// Same as YarnServerSecurityUtils.updateAMRMToken()
 user.addToken(token);
+token.setService(ClientRMProxy.getAMRMTokenService(configuration));
 setAuthModeInConf(configuration);
   }
   final T proxyConnection = user.doAs(new PrivilegedExceptionAction() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6704. Add support for work preserving NM restart when FederationInterceptor is enabled in AMRMProxyService. (Botong Huang via Subru).

2017-12-08 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 4a064dd64 -> 39d0fdf1b


YARN-6704. Add support for work preserving NM restart when 
FederationInterceptor is enabled in AMRMProxyService. (Botong Huang via Subru).

(cherry picked from commit 670e8d4ec7e71fc3b054cd3b2826f869b649a788)
(cherry picked from commit 850bd0ed7cebdddf982c8a586ee4d3ad833cfc57)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39d0fdf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39d0fdf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39d0fdf1

Branch: refs/heads/branch-2.9
Commit: 39d0fdf1b2a08851cee6f9dfc180f201e7231b9d
Parents: 4a064dd
Author: Subru Krishnan 
Authored: Fri Dec 8 15:39:18 2017 -0800
Committer: Subru Krishnan 
Committed: Fri Dec 8 15:46:13 2017 -0800

--
 .../yarn/server/MockResourceManagerFacade.java  |  16 +-
 .../nodemanager/amrmproxy/AMRMProxyService.java |   5 +-
 .../amrmproxy/FederationInterceptor.java| 271 +--
 .../amrmproxy/BaseAMRMProxyTest.java|  15 +
 .../amrmproxy/TestFederationInterceptor.java| 104 +++
 .../TestableFederationInterceptor.java  |   8 +-
 6 files changed, 387 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39d0fdf1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
index c509994..1e37769 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
@@ -105,6 +105,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -616,7 +617,20 @@ public class MockResourceManagerFacade implements 
ApplicationClientProtocol,
 
 validateRunning();
 
-return GetContainersResponse.newInstance(null);
+ApplicationAttemptId attemptId = request.getApplicationAttemptId();
+List containers = new ArrayList<>();
+synchronized (applicationContainerIdMap) {
+  // Return the list of running containers that were being tracked for this
+  // application
+  Assert.assertTrue("The application id is NOT registered: " + attemptId,
+  applicationContainerIdMap.containsKey(attemptId));
+  List ids = applicationContainerIdMap.get(attemptId);
+  for (ContainerId c : ids) {
+containers.add(ContainerReport.newInstance(c, null, null, null, 0, 0,
+null, null, 0, null, null));
+  }
+}
+return GetContainersResponse.newInstance(containers);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39d0fdf1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index ebd85bf..815e39b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -128,11 +128,8 @@ public class AMRMProxyService extends CompositeService 
implements
 new AMRMProxyTokenSecretManager(this.nmCont

hadoop git commit: YARN-6704. Add support for work preserving NM restart when FederationInterceptor is enabled in AMRMProxyService. (Botong Huang via Subru).

2017-12-08 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 94390fcd5 -> 850bd0ed7


YARN-6704. Add support for work preserving NM restart when 
FederationInterceptor is enabled in AMRMProxyService. (Botong Huang via Subru).

(cherry picked from commit 670e8d4ec7e71fc3b054cd3b2826f869b649a788)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/850bd0ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/850bd0ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/850bd0ed

Branch: refs/heads/branch-2
Commit: 850bd0ed7cebdddf982c8a586ee4d3ad833cfc57
Parents: 94390fc
Author: Subru Krishnan 
Authored: Fri Dec 8 15:39:18 2017 -0800
Committer: Subru Krishnan 
Committed: Fri Dec 8 15:44:45 2017 -0800

--
 .../yarn/server/MockResourceManagerFacade.java  |  16 +-
 .../nodemanager/amrmproxy/AMRMProxyService.java |   5 +-
 .../amrmproxy/FederationInterceptor.java| 271 +--
 .../amrmproxy/BaseAMRMProxyTest.java|  15 +
 .../amrmproxy/TestFederationInterceptor.java| 104 +++
 .../TestableFederationInterceptor.java  |   8 +-
 6 files changed, 387 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/850bd0ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
index c509994..1e37769 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
@@ -105,6 +105,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -616,7 +617,20 @@ public class MockResourceManagerFacade implements 
ApplicationClientProtocol,
 
 validateRunning();
 
-return GetContainersResponse.newInstance(null);
+ApplicationAttemptId attemptId = request.getApplicationAttemptId();
+List containers = new ArrayList<>();
+synchronized (applicationContainerIdMap) {
+  // Return the list of running containers that were being tracked for this
+  // application
+  Assert.assertTrue("The application id is NOT registered: " + attemptId,
+  applicationContainerIdMap.containsKey(attemptId));
+  List ids = applicationContainerIdMap.get(attemptId);
+  for (ContainerId c : ids) {
+containers.add(ContainerReport.newInstance(c, null, null, null, 0, 0,
+null, null, 0, null, null));
+  }
+}
+return GetContainersResponse.newInstance(containers);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/850bd0ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index ebd85bf..815e39b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -128,11 +128,8 @@ public class AMRMProxyService extends CompositeService 
implements
 new AMRMProxyTokenSecretManager(this.nmContext.getNMStateStore());
 this.secretManager.init(conf);
 
-// Both second app attempt an

hadoop git commit: YARN-6704. Add support for work preserving NM restart when FederationInterceptor is enabled in AMRMProxyService. (Botong Huang via Subru).

2017-12-08 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 04b84da24 -> 670e8d4ec


YARN-6704. Add support for work preserving NM restart when 
FederationInterceptor is enabled in AMRMProxyService. (Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/670e8d4e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/670e8d4e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/670e8d4e

Branch: refs/heads/trunk
Commit: 670e8d4ec7e71fc3b054cd3b2826f869b649a788
Parents: 04b84da
Author: Subru Krishnan 
Authored: Fri Dec 8 15:39:18 2017 -0800
Committer: Subru Krishnan 
Committed: Fri Dec 8 15:39:18 2017 -0800

--
 .../yarn/server/MockResourceManagerFacade.java  |  16 +-
 .../nodemanager/amrmproxy/AMRMProxyService.java |   5 +-
 .../amrmproxy/FederationInterceptor.java| 271 +--
 .../amrmproxy/BaseAMRMProxyTest.java|  15 +
 .../amrmproxy/TestFederationInterceptor.java| 104 +++
 .../TestableFederationInterceptor.java  |   8 +-
 6 files changed, 387 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/670e8d4e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
index b5727aa..15e1cea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
@@ -111,6 +111,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -622,7 +623,20 @@ public class MockResourceManagerFacade implements 
ApplicationClientProtocol,
 
 validateRunning();
 
-return GetContainersResponse.newInstance(null);
+ApplicationAttemptId attemptId = request.getApplicationAttemptId();
+List containers = new ArrayList<>();
+synchronized (applicationContainerIdMap) {
+  // Return the list of running containers that were being tracked for this
+  // application
+  Assert.assertTrue("The application id is NOT registered: " + attemptId,
+  applicationContainerIdMap.containsKey(attemptId));
+  List ids = applicationContainerIdMap.get(attemptId);
+  for (ContainerId c : ids) {
+containers.add(ContainerReport.newInstance(c, null, null, null, 0, 0,
+null, null, 0, null, null));
+  }
+}
+return GetContainersResponse.newInstance(containers);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/670e8d4e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index ebd85bf..815e39b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -128,11 +128,8 @@ public class AMRMProxyService extends CompositeService 
implements
 new AMRMProxyTokenSecretManager(this.nmContext.getNMStateStore());
 this.secretManager.init(conf);
 
-// Both second app attempt and NM restart within Federation need registry
 

[2/2] hadoop git commit: YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).

2017-11-20 Thread subru
YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).

(cherry picked from commit ed3109136100a21d971484f242d80f2a7e7d337d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6db6c98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6db6c98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6db6c98

Branch: refs/heads/branch-2.9
Commit: a6db6c9855e2e5dfb36165209f497ad2bf743bb3
Parents: 49ba091
Author: Subru Krishnan 
Authored: Mon Nov 20 14:21:58 2017 -0800
Committer: Subru Krishnan 
Committed: Mon Nov 20 14:22:59 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +
 .../src/main/resources/yarn-default.xml |  21 ++
 .../hadoop-yarn-server-common/pom.xml   |   5 +
 .../utils/FederationRegistryClient.java | 338 +++
 .../yarn/server/uam/UnmanagedAMPoolManager.java | 140 ++--
 .../server/uam/UnmanagedApplicationManager.java | 212 +++-
 .../yarn/server/utils/AMRMClientUtils.java  |  30 +-
 .../yarn/server/MockResourceManagerFacade.java  | 100 +++---
 .../utils/TestFederationRegistryClient.java |  90 +
 .../uam/TestUnmanagedApplicationManager.java| 100 +-
 .../amrmproxy/AMRMProxyApplicationContext.java  |  16 +
 .../AMRMProxyApplicationContextImpl.java|  35 +-
 .../nodemanager/amrmproxy/AMRMProxyService.java |  83 -
 .../amrmproxy/FederationInterceptor.java| 221 +++-
 .../containermanager/ContainerManagerImpl.java  |   9 +-
 .../amrmproxy/BaseAMRMProxyTest.java|  12 +-
 .../amrmproxy/TestAMRMProxyService.java |  21 +-
 .../amrmproxy/TestFederationInterceptor.java| 126 ++-
 .../TestableFederationInterceptor.java  |  29 +-
 .../hadoop/yarn/server/MiniYARNCluster.java |   6 +-
 .../src/site/markdown/Federation.md |  11 +-
 21 files changed, 1341 insertions(+), 277 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6db6c98/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a97dc57..edeec9f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1948,6 +1948,9 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE =
   "org.apache.hadoop.yarn.server.nodemanager.amrmproxy."
   + "DefaultRequestInterceptor";
+  public static final String AMRM_PROXY_HA_ENABLED = NM_PREFIX
+  + "amrmproxy.ha.enable";
+  public static final boolean DEFAULT_AMRM_PROXY_HA_ENABLED = false;
 
   /**
* Default platform-agnostic CLASSPATH for YARN applications. A
@@ -2790,6 +2793,11 @@ public class YarnConfiguration extends Configuration {
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
 
+  public static final String FEDERATION_REGISTRY_BASE_KEY =
+  FEDERATION_PREFIX + "registry.base-dir";
+  public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
+  "yarnfederation/";
+
   // 5 minutes
   public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
 
@@ -2947,6 +2955,11 @@ public class YarnConfiguration extends Configuration {
   // Other Configs
   
 
+  public static final String YARN_REGISTRY_CLASS =
+  YARN_PREFIX + "registry.class";
+  public static final String DEFAULT_YARN_REGISTRY_CLASS =
+  "org.apache.hadoop.registry.client.impl.FSRegistryOperationsService";
+
   /**
* Use YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS instead.
* The interval of the yarn client's querying application state after

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6db6c98/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 46fb7c7..71dd72a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-def

[1/2] hadoop git commit: YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).

2017-11-20 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 49ba091f7 -> a6db6c985


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6db6c98/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index d63b2cf..ebd85bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -34,12 +34,13 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
@@ -60,15 +61,19 @@ import 
org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.ContainerType;
+import 
org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredAMRMProxyState;
 import 
org.apache.hadoop.yarn.server.nodemanager.scheduler.DistributedScheduler;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
+import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -82,7 +87,7 @@ import com.google.common.base.Preconditions;
  * pipeline is a chain of interceptor instances that can inspect and modify the
  * request/response as needed.
  */
-public class AMRMProxyService extends AbstractService implements
+public class AMRMProxyService extends CompositeService implements
 ApplicationMasterProtocol {
   private static final Logger LOG = LoggerFactory
   .getLogger(AMRMProxyService.class);
@@ -96,6 +101,7 @@ public class AMRMProxyService extends AbstractService 
implements
   private InetSocketAddress listenerEndpoint;
   private AMRMProxyTokenSecretManager secretManager;
   private Map applPipelineMap;
+  private RegistryOperations registry;
 
   /**
* Creates an instance of the service.
@@ -118,10 +124,23 @@ public class AMRMProxyService extends AbstractService 
implements
 
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
-super.serviceInit(conf);
 this.secretManager =
 new AMRMProxyTokenSecretManager(this.nmContext.getNMStateStore());
 this.secretManager.init(conf);
+
+// Both second app attempt and NM restart within Federation need registry
+if (conf.getBoolean(YarnConfiguration.AMRM_PROXY_HA_ENABLED,
+YarnConfiguration.DEFAULT_AMRM_PROXY_HA_ENABLED)
+|| conf.getBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,
+YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED)) {
+  this.registry = FederationStateStoreFacade.createInstance(conf,
+  YarnConfiguration.YARN_REGISTRY_CLASS,
+  YarnConfiguration.DEFAULT_YARN_REGISTRY_CLASS,
+  RegistryOperations.class);
+  addService(this.registry);
+}
+
+super.serviceInit(conf);
   }
 
   @Override
@@ -203,6 +222,8 @@ publi

[1/2] hadoop git commit: YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).

2017-11-20 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ea8a12142 -> ed3109136


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed310913/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index d63b2cf..ebd85bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -34,12 +34,13 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
@@ -60,15 +61,19 @@ import 
org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.ContainerType;
+import 
org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredAMRMProxyState;
 import 
org.apache.hadoop.yarn.server.nodemanager.scheduler.DistributedScheduler;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
+import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -82,7 +87,7 @@ import com.google.common.base.Preconditions;
  * pipeline is a chain of interceptor instances that can inspect and modify the
  * request/response as needed.
  */
-public class AMRMProxyService extends AbstractService implements
+public class AMRMProxyService extends CompositeService implements
 ApplicationMasterProtocol {
   private static final Logger LOG = LoggerFactory
   .getLogger(AMRMProxyService.class);
@@ -96,6 +101,7 @@ public class AMRMProxyService extends AbstractService 
implements
   private InetSocketAddress listenerEndpoint;
   private AMRMProxyTokenSecretManager secretManager;
   private Map applPipelineMap;
+  private RegistryOperations registry;
 
   /**
* Creates an instance of the service.
@@ -118,10 +124,23 @@ public class AMRMProxyService extends AbstractService 
implements
 
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
-super.serviceInit(conf);
 this.secretManager =
 new AMRMProxyTokenSecretManager(this.nmContext.getNMStateStore());
 this.secretManager.init(conf);
+
+// Both second app attempt and NM restart within Federation need registry
+if (conf.getBoolean(YarnConfiguration.AMRM_PROXY_HA_ENABLED,
+YarnConfiguration.DEFAULT_AMRM_PROXY_HA_ENABLED)
+|| conf.getBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,
+YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED)) {
+  this.registry = FederationStateStoreFacade.createInstance(conf,
+  YarnConfiguration.YARN_REGISTRY_CLASS,
+  YarnConfiguration.DEFAULT_YARN_REGISTRY_CLASS,
+  RegistryOperations.class);
+  addService(this.registry);
+}
+
+super.serviceInit(conf);
   }
 
   @Override
@@ -203,6 +222,8 @@ public 

[2/2] hadoop git commit: YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).

2017-11-20 Thread subru
YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed310913
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed310913
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed310913

Branch: refs/heads/branch-2
Commit: ed3109136100a21d971484f242d80f2a7e7d337d
Parents: ea8a121
Author: Subru Krishnan 
Authored: Mon Nov 20 14:21:58 2017 -0800
Committer: Subru Krishnan 
Committed: Mon Nov 20 14:21:58 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +
 .../src/main/resources/yarn-default.xml |  21 ++
 .../hadoop-yarn-server-common/pom.xml   |   5 +
 .../utils/FederationRegistryClient.java | 338 +++
 .../yarn/server/uam/UnmanagedAMPoolManager.java | 140 ++--
 .../server/uam/UnmanagedApplicationManager.java | 212 +++-
 .../yarn/server/utils/AMRMClientUtils.java  |  30 +-
 .../yarn/server/MockResourceManagerFacade.java  | 100 +++---
 .../utils/TestFederationRegistryClient.java |  90 +
 .../uam/TestUnmanagedApplicationManager.java| 100 +-
 .../amrmproxy/AMRMProxyApplicationContext.java  |  16 +
 .../AMRMProxyApplicationContextImpl.java|  35 +-
 .../nodemanager/amrmproxy/AMRMProxyService.java |  83 -
 .../amrmproxy/FederationInterceptor.java| 221 +++-
 .../containermanager/ContainerManagerImpl.java  |   9 +-
 .../amrmproxy/BaseAMRMProxyTest.java|  12 +-
 .../amrmproxy/TestAMRMProxyService.java |  21 +-
 .../amrmproxy/TestFederationInterceptor.java| 126 ++-
 .../TestableFederationInterceptor.java  |  29 +-
 .../hadoop/yarn/server/MiniYARNCluster.java |   6 +-
 .../src/site/markdown/Federation.md |  11 +-
 21 files changed, 1341 insertions(+), 277 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed310913/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a97dc57..edeec9f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1948,6 +1948,9 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE =
   "org.apache.hadoop.yarn.server.nodemanager.amrmproxy."
   + "DefaultRequestInterceptor";
+  public static final String AMRM_PROXY_HA_ENABLED = NM_PREFIX
+  + "amrmproxy.ha.enable";
+  public static final boolean DEFAULT_AMRM_PROXY_HA_ENABLED = false;
 
   /**
* Default platform-agnostic CLASSPATH for YARN applications. A
@@ -2790,6 +2793,11 @@ public class YarnConfiguration extends Configuration {
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
 
+  public static final String FEDERATION_REGISTRY_BASE_KEY =
+  FEDERATION_PREFIX + "registry.base-dir";
+  public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
+  "yarnfederation/";
+
   // 5 minutes
   public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
 
@@ -2947,6 +2955,11 @@ public class YarnConfiguration extends Configuration {
   // Other Configs
   
 
+  public static final String YARN_REGISTRY_CLASS =
+  YARN_PREFIX + "registry.class";
+  public static final String DEFAULT_YARN_REGISTRY_CLASS =
+  "org.apache.hadoop.registry.client.impl.FSRegistryOperationsService";
+
   /**
* Use YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS instead.
* The interval of the yarn client's querying application state after

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed310913/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 46fb7c7..71dd72a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/

[1/2] hadoop git commit: YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).

2017-11-17 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0940e4f69 -> d5f66888b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5f66888/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index d63b2cf..ebd85bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -34,12 +34,13 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
@@ -60,15 +61,19 @@ import 
org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.ContainerType;
+import 
org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredAMRMProxyState;
 import 
org.apache.hadoop.yarn.server.nodemanager.scheduler.DistributedScheduler;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
+import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -82,7 +87,7 @@ import com.google.common.base.Preconditions;
  * pipeline is a chain of interceptor instances that can inspect and modify the
  * request/response as needed.
  */
-public class AMRMProxyService extends AbstractService implements
+public class AMRMProxyService extends CompositeService implements
 ApplicationMasterProtocol {
   private static final Logger LOG = LoggerFactory
   .getLogger(AMRMProxyService.class);
@@ -96,6 +101,7 @@ public class AMRMProxyService extends AbstractService 
implements
   private InetSocketAddress listenerEndpoint;
   private AMRMProxyTokenSecretManager secretManager;
   private Map applPipelineMap;
+  private RegistryOperations registry;
 
   /**
* Creates an instance of the service.
@@ -118,10 +124,23 @@ public class AMRMProxyService extends AbstractService 
implements
 
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
-super.serviceInit(conf);
 this.secretManager =
 new AMRMProxyTokenSecretManager(this.nmContext.getNMStateStore());
 this.secretManager.init(conf);
+
+// Both second app attempt and NM restart within Federation need registry
+if (conf.getBoolean(YarnConfiguration.AMRM_PROXY_HA_ENABLED,
+YarnConfiguration.DEFAULT_AMRM_PROXY_HA_ENABLED)
+|| conf.getBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,
+YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED)) {
+  this.registry = FederationStateStoreFacade.createInstance(conf,
+  YarnConfiguration.YARN_REGISTRY_CLASS,
+  YarnConfiguration.DEFAULT_YARN_REGISTRY_CLASS,
+  RegistryOperations.class);
+  addService(this.registry);
+}
+
+super.serviceInit(conf);
   }
 
   @Override
@@ -203,6 +222,8 @@ public cla

[2/2] hadoop git commit: YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).

2017-11-17 Thread subru
YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5f66888
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5f66888
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5f66888

Branch: refs/heads/trunk
Commit: d5f66888b8d767ee6706fab9950c194a1bf26d32
Parents: 0940e4f
Author: Subru Krishnan 
Authored: Fri Nov 17 17:39:06 2017 -0800
Committer: Subru Krishnan 
Committed: Fri Nov 17 17:39:06 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +
 .../src/main/resources/yarn-default.xml |  21 ++
 .../hadoop-yarn-server-common/pom.xml   |   5 +
 .../utils/FederationRegistryClient.java | 338 +++
 .../yarn/server/uam/UnmanagedAMPoolManager.java | 141 ++--
 .../server/uam/UnmanagedApplicationManager.java | 212 +++-
 .../yarn/server/utils/AMRMClientUtils.java  |  30 +-
 .../yarn/server/MockResourceManagerFacade.java  | 103 +++---
 .../utils/TestFederationRegistryClient.java |  90 +
 .../uam/TestUnmanagedApplicationManager.java| 100 +-
 .../amrmproxy/AMRMProxyApplicationContext.java  |  16 +
 .../AMRMProxyApplicationContextImpl.java|  35 +-
 .../nodemanager/amrmproxy/AMRMProxyService.java |  83 -
 .../amrmproxy/FederationInterceptor.java| 221 +++-
 .../containermanager/ContainerManagerImpl.java  |   9 +-
 .../amrmproxy/BaseAMRMProxyTest.java|  14 +-
 .../amrmproxy/TestAMRMProxyService.java |  21 +-
 .../amrmproxy/TestFederationInterceptor.java| 126 ++-
 .../TestableFederationInterceptor.java  |  29 +-
 .../hadoop/yarn/server/MiniYARNCluster.java |   6 +-
 .../src/site/markdown/Federation.md |  11 +-
 21 files changed, 1345 insertions(+), 279 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5f66888/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 34257ed..ead9977 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2096,6 +2096,9 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE =
   "org.apache.hadoop.yarn.server.nodemanager.amrmproxy."
   + "DefaultRequestInterceptor";
+  public static final String AMRM_PROXY_HA_ENABLED = NM_PREFIX
+  + "amrmproxy.ha.enable";
+  public static final boolean DEFAULT_AMRM_PROXY_HA_ENABLED = false;
 
   /**
* Default platform-agnostic CLASSPATH for YARN applications. A
@@ -2930,6 +2933,11 @@ public class YarnConfiguration extends Configuration {
   public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
   FEDERATION_PREFIX + "cache-ttl.secs";
 
+  public static final String FEDERATION_REGISTRY_BASE_KEY =
+  FEDERATION_PREFIX + "registry.base-dir";
+  public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY =
+  "yarnfederation/";
+
   // 5 minutes
   public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
 
@@ -3087,6 +3095,11 @@ public class YarnConfiguration extends Configuration {
   // Other Configs
   
 
+  public static final String YARN_REGISTRY_CLASS =
+  YARN_PREFIX + "registry.class";
+  public static final String DEFAULT_YARN_REGISTRY_CLASS =
+  "org.apache.hadoop.registry.client.impl.FSRegistryOperationsService";
+
   /**
* Use YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS instead.
* The interval of the yarn client's querying application state after

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5f66888/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e90d0f2..12cb902 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/

hadoop git commit: YARN-7412. Fix unit test for docker mount check on ubuntu. (Contributed by Eric Badger)

2017-11-12 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.0 d6f4b9159 -> 23b3efe31


YARN-7412. Fix unit test for docker mount check on ubuntu.  (Contributed by 
Eric Badger)

(cherry picked from commit 7a49ddfdde2e2a7b407f4a62a42d97bfe456075a)
(cherry picked from commit 90ac8cd5a1c056d535eee92b148132c1674c5523)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23b3efe3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23b3efe3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23b3efe3

Branch: refs/heads/branch-2.9.0
Commit: 23b3efe3190335feb5fbfc29e6230ee9fe085348
Parents: d6f4b91
Author: Eric Yang 
Authored: Wed Nov 1 18:39:56 2017 -0400
Committer: Subru Krishnan 
Committed: Sun Nov 12 10:14:13 2017 -0800

--
 .../test/utils/test_docker_util.cc  | 62 ++--
 1 file changed, 31 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b3efe3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index c42cd78..5233000 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -429,12 +429,12 @@ namespace ContainerExecutor {
   }
 
   TEST_F(TestDockerUtil, test_check_mount_permitted) {
-const char *permitted_mounts[] = {"/etc", "/usr/bin/touch", "/tmp/", NULL};
+const char *permitted_mounts[] = {"/etc", "/usr/bin/cut", "/tmp/", NULL};
 std::vector > test_data;
 test_data.push_back(std::make_pair("/etc", 1));
 test_data.push_back(std::make_pair("/etc/", 1));
 test_data.push_back(std::make_pair("/etc/passwd", 1));
-test_data.push_back(std::make_pair("/usr/bin/touch", 1));
+test_data.push_back(std::make_pair("/usr/bin/cut", 1));
 test_data.push_back(std::make_pair("//usr/", 0));
 test_data.push_back(std::make_pair("/etc/random-file", 
-1));
 
@@ -447,8 +447,8 @@ namespace ContainerExecutor {
 
   TEST_F(TestDockerUtil, test_normalize_mounts) {
 const int entries = 4;
-const char *permitted_mounts[] = {"/home", "/etc", "/usr/bin/touch", NULL};
-const char *expected[] = {"/home/", "/etc/", "/usr/bin/touch", NULL};
+const char *permitted_mounts[] = {"/home", "/etc", "/usr/bin/cut", NULL};
+const char *expected[] = {"/home/", "/etc/", "/usr/bin/cut", NULL};
 char **ptr = static_cast(malloc(entries * sizeof(char *)));
 for (int i = 0; i < entries; ++i) {
   if (permitted_mounts[i] != NULL) {
@@ -660,7 +660,7 @@ namespace ContainerExecutor {
 const int buff_len = 1024;
 char buff[buff_len];
 int ret = 0;
-std::string container_executor_cfg_contents = "[docker]\n  
docker.allowed.rw-mounts=/opt,/var,/usr/bin/touch,..\n  "
+std::string container_executor_cfg_contents = "[docker]\n  
docker.allowed.rw-mounts=/opt,/var,/usr/bin/cut,..\n  "
   
"docker.allowed.ro-mounts=/etc/passwd";
 std::vector > file_cmd_vec;
 file_cmd_vec.push_back(std::make_pair(
@@ -668,8 +668,8 @@ namespace ContainerExecutor {
 file_cmd_vec.push_back(std::make_pair(
 "[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/var/:/var/", "-v '/var/:/var/' "));
 file_cmd_vec.push_back(std::make_pair(
-"[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/usr/bin/touch:/usr/bin/touch",
- "-v '/usr/bin/touch:/usr/bin/touch' "));
+"[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/usr/bin/cut:/usr/bin/cut",
+ "-v '/usr/bin/cut:/usr/bin/cut' "));
 file_cmd_vec.push_back(std::make_pair(
 "[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/opt:/mydisk1,/var/log/:/mydisk2",
  

hadoop git commit: YARN-7412. Fix unit test for docker mount check on ubuntu. (Contributed by Eric Badger)

2017-11-12 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 8bcd1d62c -> 45c5b7f71


YARN-7412. Fix unit test for docker mount check on ubuntu.  (Contributed by 
Eric Badger)

(cherry picked from commit 7a49ddfdde2e2a7b407f4a62a42d97bfe456075a)
(cherry picked from commit 90ac8cd5a1c056d535eee92b148132c1674c5523)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45c5b7f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45c5b7f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45c5b7f7

Branch: refs/heads/branch-2.9
Commit: 45c5b7f71d9f08e15f36e04b18023b27119c55fd
Parents: 8bcd1d6
Author: Eric Yang 
Authored: Wed Nov 1 18:39:56 2017 -0400
Committer: Subru Krishnan 
Committed: Sun Nov 12 10:13:49 2017 -0800

--
 .../test/utils/test_docker_util.cc  | 62 ++--
 1 file changed, 31 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45c5b7f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index c42cd78..5233000 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -429,12 +429,12 @@ namespace ContainerExecutor {
   }
 
   TEST_F(TestDockerUtil, test_check_mount_permitted) {
-const char *permitted_mounts[] = {"/etc", "/usr/bin/touch", "/tmp/", NULL};
+const char *permitted_mounts[] = {"/etc", "/usr/bin/cut", "/tmp/", NULL};
 std::vector > test_data;
 test_data.push_back(std::make_pair("/etc", 1));
 test_data.push_back(std::make_pair("/etc/", 1));
 test_data.push_back(std::make_pair("/etc/passwd", 1));
-test_data.push_back(std::make_pair("/usr/bin/touch", 1));
+test_data.push_back(std::make_pair("/usr/bin/cut", 1));
 test_data.push_back(std::make_pair("//usr/", 0));
 test_data.push_back(std::make_pair("/etc/random-file", 
-1));
 
@@ -447,8 +447,8 @@ namespace ContainerExecutor {
 
   TEST_F(TestDockerUtil, test_normalize_mounts) {
 const int entries = 4;
-const char *permitted_mounts[] = {"/home", "/etc", "/usr/bin/touch", NULL};
-const char *expected[] = {"/home/", "/etc/", "/usr/bin/touch", NULL};
+const char *permitted_mounts[] = {"/home", "/etc", "/usr/bin/cut", NULL};
+const char *expected[] = {"/home/", "/etc/", "/usr/bin/cut", NULL};
 char **ptr = static_cast(malloc(entries * sizeof(char *)));
 for (int i = 0; i < entries; ++i) {
   if (permitted_mounts[i] != NULL) {
@@ -660,7 +660,7 @@ namespace ContainerExecutor {
 const int buff_len = 1024;
 char buff[buff_len];
 int ret = 0;
-std::string container_executor_cfg_contents = "[docker]\n  
docker.allowed.rw-mounts=/opt,/var,/usr/bin/touch,..\n  "
+std::string container_executor_cfg_contents = "[docker]\n  
docker.allowed.rw-mounts=/opt,/var,/usr/bin/cut,..\n  "
   
"docker.allowed.ro-mounts=/etc/passwd";
 std::vector > file_cmd_vec;
 file_cmd_vec.push_back(std::make_pair(
@@ -668,8 +668,8 @@ namespace ContainerExecutor {
 file_cmd_vec.push_back(std::make_pair(
 "[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/var/:/var/", "-v '/var/:/var/' "));
 file_cmd_vec.push_back(std::make_pair(
-"[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/usr/bin/touch:/usr/bin/touch",
- "-v '/usr/bin/touch:/usr/bin/touch' "));
+"[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/usr/bin/cut:/usr/bin/cut",
+ "-v '/usr/bin/cut:/usr/bin/cut' "));
 file_cmd_vec.push_back(std::make_pair(
 "[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/opt:/mydisk1,/var/log/:/mydisk2",
  

hadoop git commit: YARN-7412. Fix unit test for docker mount check on ubuntu. (Contributed by Eric Badger)

2017-11-12 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0282efab1 -> 90ac8cd5a


YARN-7412. Fix unit test for docker mount check on ubuntu.  (Contributed by 
Eric Badger)

(cherry picked from commit 7a49ddfdde2e2a7b407f4a62a42d97bfe456075a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90ac8cd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90ac8cd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90ac8cd5

Branch: refs/heads/branch-2
Commit: 90ac8cd5a1c056d535eee92b148132c1674c5523
Parents: 0282efa
Author: Eric Yang 
Authored: Wed Nov 1 18:39:56 2017 -0400
Committer: Subru Krishnan 
Committed: Sun Nov 12 09:59:39 2017 -0800

--
 .../test/utils/test_docker_util.cc  | 62 ++--
 1 file changed, 31 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90ac8cd5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index c42cd78..5233000 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -429,12 +429,12 @@ namespace ContainerExecutor {
   }
 
   TEST_F(TestDockerUtil, test_check_mount_permitted) {
-const char *permitted_mounts[] = {"/etc", "/usr/bin/touch", "/tmp/", NULL};
+const char *permitted_mounts[] = {"/etc", "/usr/bin/cut", "/tmp/", NULL};
 std::vector > test_data;
 test_data.push_back(std::make_pair("/etc", 1));
 test_data.push_back(std::make_pair("/etc/", 1));
 test_data.push_back(std::make_pair("/etc/passwd", 1));
-test_data.push_back(std::make_pair("/usr/bin/touch", 1));
+test_data.push_back(std::make_pair("/usr/bin/cut", 1));
 test_data.push_back(std::make_pair("//usr/", 0));
 test_data.push_back(std::make_pair("/etc/random-file", 
-1));
 
@@ -447,8 +447,8 @@ namespace ContainerExecutor {
 
   TEST_F(TestDockerUtil, test_normalize_mounts) {
 const int entries = 4;
-const char *permitted_mounts[] = {"/home", "/etc", "/usr/bin/touch", NULL};
-const char *expected[] = {"/home/", "/etc/", "/usr/bin/touch", NULL};
+const char *permitted_mounts[] = {"/home", "/etc", "/usr/bin/cut", NULL};
+const char *expected[] = {"/home/", "/etc/", "/usr/bin/cut", NULL};
 char **ptr = static_cast(malloc(entries * sizeof(char *)));
 for (int i = 0; i < entries; ++i) {
   if (permitted_mounts[i] != NULL) {
@@ -660,7 +660,7 @@ namespace ContainerExecutor {
 const int buff_len = 1024;
 char buff[buff_len];
 int ret = 0;
-std::string container_executor_cfg_contents = "[docker]\n  
docker.allowed.rw-mounts=/opt,/var,/usr/bin/touch,..\n  "
+std::string container_executor_cfg_contents = "[docker]\n  
docker.allowed.rw-mounts=/opt,/var,/usr/bin/cut,..\n  "
   
"docker.allowed.ro-mounts=/etc/passwd";
 std::vector > file_cmd_vec;
 file_cmd_vec.push_back(std::make_pair(
@@ -668,8 +668,8 @@ namespace ContainerExecutor {
 file_cmd_vec.push_back(std::make_pair(
 "[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/var/:/var/", "-v '/var/:/var/' "));
 file_cmd_vec.push_back(std::make_pair(
-"[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/usr/bin/touch:/usr/bin/touch",
- "-v '/usr/bin/touch:/usr/bin/touch' "));
+"[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/usr/bin/cut:/usr/bin/cut",
+ "-v '/usr/bin/cut:/usr/bin/cut' "));
 file_cmd_vec.push_back(std::make_pair(
 "[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/opt:/mydisk1,/var/log/:/mydisk2",
 "-v '/opt:/mydisk1' -v '/var/log/:/mydisk2' "));
@@ -767,7 +767,7

hadoop git commit: YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).

2017-11-12 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 bfbf94197 -> 37ae81866


YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).

(cherry picked from commit 3c5b46c2edd69bb238d635ae61ff91656dec23df)
(cherry picked from commit 3e26077848ed1d7461576116a9ae841d38aa3ef1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37ae8186
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37ae8186
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37ae8186

Branch: refs/heads/branch-3.0
Commit: 37ae818660e42dbf22a978ae6e4860f46faa46c8
Parents: bfbf941
Author: Subru Krishnan 
Authored: Sun Nov 12 09:18:08 2017 -0800
Committer: Subru Krishnan 
Committed: Sun Nov 12 09:54:55 2017 -0800

--
 .../src/main/webapp/app/adapters/yarn-container-log.js   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37ae8186/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
index 8d1b12b..df46127 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
@@ -42,9 +42,9 @@ export default DS.RESTAdapter.extend({
 var nodeHttpAddr = splits[0];
 var containerId = splits[1];
 var filename = splits[2];
-this.host = this.get('host') + nodeHttpAddr;
 var url = this._buildURL();
-url = url + "/containerlogs/" + containerId + "/" + filename;
+url = url.replace("{nodeAddress}", nodeHttpAddr)  + "/containerlogs/"
+   + containerId + "/" + filename;
 return url;
   },
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).

2017-11-12 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk ff9f7fcf7 -> 3e2607784


YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).

(cherry picked from commit 3c5b46c2edd69bb238d635ae61ff91656dec23df)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e260778
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e260778
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e260778

Branch: refs/heads/trunk
Commit: 3e26077848ed1d7461576116a9ae841d38aa3ef1
Parents: ff9f7fc
Author: Subru Krishnan 
Authored: Sun Nov 12 09:18:08 2017 -0800
Committer: Subru Krishnan 
Committed: Sun Nov 12 09:53:39 2017 -0800

--
 .../src/main/webapp/app/adapters/yarn-container-log.js   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e260778/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
index 8d1b12b..df46127 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
@@ -42,9 +42,9 @@ export default DS.RESTAdapter.extend({
 var nodeHttpAddr = splits[0];
 var containerId = splits[1];
 var filename = splits[2];
-this.host = this.get('host') + nodeHttpAddr;
 var url = this._buildURL();
-url = url + "/containerlogs/" + containerId + "/" + filename;
+url = url.replace("{nodeAddress}", nodeHttpAddr)  + "/containerlogs/"
+   + containerId + "/" + filename;
 return url;
   },
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-7476. Fix miscellaneous issues in ATSv2 after merge to branch-2. (Varun Saxena via Subru).

2017-11-12 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.0 d15be3c93 -> d6f4b9159


YARN-7476. Fix miscellaneous issues in ATSv2 after merge to branch-2. (Varun 
Saxena via Subru).

(cherry picked from commit 0282efab147203e7323450fa18704af46bc524ad)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6f4b915
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6f4b915
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6f4b915

Branch: refs/heads/branch-2.9.0
Commit: d6f4b91594d8e082beae5417e1b28fb6da71fa4c
Parents: fc67563
Author: Subru Krishnan 
Authored: Sun Nov 12 09:19:30 2017 -0800
Committer: Subru Krishnan 
Committed: Sun Nov 12 09:25:57 2017 -0800

--
 .../src/main/proto/yarn_server_common_service_protos.proto | 2 +-
 .../server/nodemanager/timelineservice/NMTimelinePublisher.java| 2 +-
 .../server/resourcemanager/metrics/TimelineServiceV1Publisher.java | 2 +-
 .../server/resourcemanager/metrics/TimelineServiceV2Publisher.java | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6f4b915/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index 3072867..8e59f14 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -118,9 +118,9 @@ message NodeHeartbeatResponseProto {
   repeated SignalContainerRequestProto containers_to_signal = 13;
   optional ResourceProto resource = 14;
   optional ContainerQueuingLimitProto container_queuing_limit = 15;
+  repeated AppCollectorDataProto app_collectors = 16;
   // to be used in place of containers_to_decrease
   repeated ContainerProto containers_to_update = 17;
-  repeated AppCollectorDataProto app_collectors = 18;
 }
 
 message ContainerQueuingLimitProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6f4b915/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index 515ff8d..2124c1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -187,7 +187,7 @@ public class NMTimelinePublisher extends CompositeService {
 
 Map entityInfo = new HashMap();
 entityInfo.put(ContainerMetricsConstants.ALLOCATED_MEMORY_INFO,
-resource.getMemory());
+resource.getMemorySize());
 entityInfo.put(ContainerMetricsConstants.ALLOCATED_VCORE_INFO,
 resource.getVirtualCores());
 entityInfo.put(ContainerMetricsConstants.ALLOCATED_HOST_INFO,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6f4b915/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
index 4c371a7..e1fe512 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/ser

[2/2] hadoop git commit: YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).

2017-11-12 Thread subru
YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).

(cherry picked from commit 3c5b46c2edd69bb238d635ae61ff91656dec23df)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc67563c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc67563c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc67563c

Branch: refs/heads/branch-2.9.0
Commit: fc67563c6e827fc6437c0be50c31dcf88a96f661
Parents: d15be3c
Author: Subru Krishnan 
Authored: Sun Nov 12 09:18:08 2017 -0800
Committer: Subru Krishnan 
Committed: Sun Nov 12 09:25:57 2017 -0800

--
 .../src/main/webapp/app/adapters/yarn-container-log.js   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc67563c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
index 8d1b12b..df46127 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
@@ -42,9 +42,9 @@ export default DS.RESTAdapter.extend({
 var nodeHttpAddr = splits[0];
 var containerId = splits[1];
 var filename = splits[2];
-this.host = this.get('host') + nodeHttpAddr;
 var url = this._buildURL();
-url = url + "/containerlogs/" + containerId + "/" + filename;
+url = url.replace("{nodeAddress}", nodeHttpAddr)  + "/containerlogs/"
+   + containerId + "/" + filename;
 return url;
   },
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-7476. Fix miscellaneous issues in ATSv2 after merge to branch-2. (Varun Saxena via Subru).

2017-11-12 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 45aabb3c8 -> 8bcd1d62c


YARN-7476. Fix miscellaneous issues in ATSv2 after merge to branch-2. (Varun 
Saxena via Subru).

(cherry picked from commit 0282efab147203e7323450fa18704af46bc524ad)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bcd1d62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bcd1d62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bcd1d62

Branch: refs/heads/branch-2.9
Commit: 8bcd1d62c86bc9680c11a30f63e2be807edbdf3e
Parents: 8251601
Author: Subru Krishnan 
Authored: Sun Nov 12 09:19:30 2017 -0800
Committer: Subru Krishnan 
Committed: Sun Nov 12 09:25:12 2017 -0800

--
 .../src/main/proto/yarn_server_common_service_protos.proto | 2 +-
 .../server/nodemanager/timelineservice/NMTimelinePublisher.java| 2 +-
 .../server/resourcemanager/metrics/TimelineServiceV1Publisher.java | 2 +-
 .../server/resourcemanager/metrics/TimelineServiceV2Publisher.java | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bcd1d62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index 3072867..8e59f14 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -118,9 +118,9 @@ message NodeHeartbeatResponseProto {
   repeated SignalContainerRequestProto containers_to_signal = 13;
   optional ResourceProto resource = 14;
   optional ContainerQueuingLimitProto container_queuing_limit = 15;
+  repeated AppCollectorDataProto app_collectors = 16;
   // to be used in place of containers_to_decrease
   repeated ContainerProto containers_to_update = 17;
-  repeated AppCollectorDataProto app_collectors = 18;
 }
 
 message ContainerQueuingLimitProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bcd1d62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index 515ff8d..2124c1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -187,7 +187,7 @@ public class NMTimelinePublisher extends CompositeService {
 
 Map entityInfo = new HashMap();
 entityInfo.put(ContainerMetricsConstants.ALLOCATED_MEMORY_INFO,
-resource.getMemory());
+resource.getMemorySize());
 entityInfo.put(ContainerMetricsConstants.ALLOCATED_VCORE_INFO,
 resource.getVirtualCores());
 entityInfo.put(ContainerMetricsConstants.ALLOCATED_HOST_INFO,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bcd1d62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
index 4c371a7..e1fe512 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemana

[2/2] hadoop git commit: YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).

2017-11-12 Thread subru
YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).

(cherry picked from commit 3c5b46c2edd69bb238d635ae61ff91656dec23df)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82516010
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82516010
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82516010

Branch: refs/heads/branch-2.9
Commit: 82516010036fa0f66d99d1713a4b5cc6ed9aea0f
Parents: 45aabb3
Author: Subru Krishnan 
Authored: Sun Nov 12 09:18:08 2017 -0800
Committer: Subru Krishnan 
Committed: Sun Nov 12 09:25:12 2017 -0800

--
 .../src/main/webapp/app/adapters/yarn-container-log.js   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82516010/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
index 8d1b12b..df46127 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
@@ -42,9 +42,9 @@ export default DS.RESTAdapter.extend({
 var nodeHttpAddr = splits[0];
 var containerId = splits[1];
 var filename = splits[2];
-this.host = this.get('host') + nodeHttpAddr;
 var url = this._buildURL();
-url = url + "/containerlogs/" + containerId + "/" + filename;
+url = url.replace("{nodeAddress}", nodeHttpAddr)  + "/containerlogs/"
+   + containerId + "/" + filename;
 return url;
   },
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).

2017-11-12 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5cd92ed55 -> 0282efab1


YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c5b46c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c5b46c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c5b46c2

Branch: refs/heads/branch-2
Commit: 3c5b46c2edd69bb238d635ae61ff91656dec23df
Parents: 5cd92ed
Author: Subru Krishnan 
Authored: Sun Nov 12 09:18:08 2017 -0800
Committer: Subru Krishnan 
Committed: Sun Nov 12 09:18:08 2017 -0800

--
 .../src/main/webapp/app/adapters/yarn-container-log.js   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c5b46c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
index 8d1b12b..df46127 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
@@ -42,9 +42,9 @@ export default DS.RESTAdapter.extend({
 var nodeHttpAddr = splits[0];
 var containerId = splits[1];
 var filename = splits[2];
-this.host = this.get('host') + nodeHttpAddr;
 var url = this._buildURL();
-url = url + "/containerlogs/" + containerId + "/" + filename;
+url = url.replace("{nodeAddress}", nodeHttpAddr)  + "/containerlogs/"
+   + containerId + "/" + filename;
 return url;
   },
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-7476. Fix miscellaneous issues in ATSv2 after merge to branch-2. (Varun Saxena via Subru).

2017-11-12 Thread subru
YARN-7476. Fix miscellaneous issues in ATSv2 after merge to branch-2. (Varun 
Saxena via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0282efab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0282efab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0282efab

Branch: refs/heads/branch-2
Commit: 0282efab147203e7323450fa18704af46bc524ad
Parents: 3c5b46c
Author: Subru Krishnan 
Authored: Sun Nov 12 09:19:30 2017 -0800
Committer: Subru Krishnan 
Committed: Sun Nov 12 09:19:30 2017 -0800

--
 .../src/main/proto/yarn_server_common_service_protos.proto | 2 +-
 .../server/nodemanager/timelineservice/NMTimelinePublisher.java| 2 +-
 .../server/resourcemanager/metrics/TimelineServiceV1Publisher.java | 2 +-
 .../server/resourcemanager/metrics/TimelineServiceV2Publisher.java | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0282efab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index 3072867..8e59f14 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -118,9 +118,9 @@ message NodeHeartbeatResponseProto {
   repeated SignalContainerRequestProto containers_to_signal = 13;
   optional ResourceProto resource = 14;
   optional ContainerQueuingLimitProto container_queuing_limit = 15;
+  repeated AppCollectorDataProto app_collectors = 16;
   // to be used in place of containers_to_decrease
   repeated ContainerProto containers_to_update = 17;
-  repeated AppCollectorDataProto app_collectors = 18;
 }
 
 message ContainerQueuingLimitProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0282efab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index 515ff8d..2124c1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -187,7 +187,7 @@ public class NMTimelinePublisher extends CompositeService {
 
 Map entityInfo = new HashMap();
 entityInfo.put(ContainerMetricsConstants.ALLOCATED_MEMORY_INFO,
-resource.getMemory());
+resource.getMemorySize());
 entityInfo.put(ContainerMetricsConstants.ALLOCATED_VCORE_INFO,
 resource.getVirtualCores());
 entityInfo.put(ContainerMetricsConstants.ALLOCATED_HOST_INFO,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0282efab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
index 4c371a7..e1fe512 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache

hadoop git commit: Updating the hadoop-cloud-storage-project pom for 2.9.0 release.

2017-11-10 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.0 1741998d7 -> 1aaf81963


Updating the hadoop-cloud-storage-project pom for 2.9.0 release.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1aaf8196
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1aaf8196
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1aaf8196

Branch: refs/heads/branch-2.9.0
Commit: 1aaf8196315f8482ac3f5b4d9cea3d29e48ca27f
Parents: 1741998
Author: Subru Krishnan 
Authored: Fri Nov 10 14:25:24 2017 -0800
Committer: Subru Krishnan 
Committed: Fri Nov 10 14:25:24 2017 -0800

--
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml | 4 ++--
 hadoop-cloud-storage-project/pom.xml  | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1aaf8196/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
--
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml 
b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
index afd88cf..e347e1a 100644
--- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -18,12 +18,12 @@
   
 org.apache.hadoop
 hadoop-project
-2.10.0-SNAPSHOT
+2.9.0
 ../../hadoop-project
   
   org.apache.hadoop
   hadoop-cloud-storage
-  2.10.0-SNAPSHOT
+  2.9.0
   jar
 
   Apache Hadoop Cloud Storage

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1aaf8196/hadoop-cloud-storage-project/pom.xml
--
diff --git a/hadoop-cloud-storage-project/pom.xml 
b/hadoop-cloud-storage-project/pom.xml
index 594b0b3..1666b19 100644
--- a/hadoop-cloud-storage-project/pom.xml
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -20,12 +20,12 @@
   
 org.apache.hadoop
 hadoop-project
-2.10.0-SNAPSHOT
+2.9.0
 ../hadoop-project
   
   org.apache.hadoop
   hadoop-cloud-storage-project
-  2.10.0-SNAPSHOT
+  2.9.0
   Apache Hadoop Cloud Storage Project
   Apache Hadoop Cloud Storage Project
   pom


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15030. [branch-2] Include hadoop-cloud-storage-project in the main hadoop pom modules.

2017-11-10 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.0 29f6c5e85 -> 1741998d7


HADOOP-15030. [branch-2] Include hadoop-cloud-storage-project in the main 
hadoop pom modules.

(cherry picked from commit 132c2e7a2a2dcb8050770a85aca8e66a7c33934b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1741998d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1741998d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1741998d

Branch: refs/heads/branch-2.9.0
Commit: 1741998d75a43bc9e011c508a5775c1f209e864d
Parents: 29f6c5e
Author: Subru Krishnan 
Authored: Fri Nov 10 13:48:46 2017 -0800
Committer: Subru Krishnan 
Committed: Fri Nov 10 13:57:05 2017 -0800

--
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml | 4 ++--
 hadoop-cloud-storage-project/pom.xml  | 4 ++--
 pom.xml   | 1 +
 3 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1741998d/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
--
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml 
b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
index 7993b83..afd88cf 100644
--- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -18,12 +18,12 @@
   
 org.apache.hadoop
 hadoop-project
-3.0.0-alpha2-SNAPSHOT
+2.10.0-SNAPSHOT
 ../../hadoop-project
   
   org.apache.hadoop
   hadoop-cloud-storage
-  3.0.0-alpha2-SNAPSHOT
+  2.10.0-SNAPSHOT
   jar
 
   Apache Hadoop Cloud Storage

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1741998d/hadoop-cloud-storage-project/pom.xml
--
diff --git a/hadoop-cloud-storage-project/pom.xml 
b/hadoop-cloud-storage-project/pom.xml
index 94d4c02..594b0b3 100644
--- a/hadoop-cloud-storage-project/pom.xml
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -20,12 +20,12 @@
   
 org.apache.hadoop
 hadoop-project
-3.0.0-alpha2-SNAPSHOT
+2.10.0-SNAPSHOT
 ../hadoop-project
   
   org.apache.hadoop
   hadoop-cloud-storage-project
-  3.0.0-alpha2-SNAPSHOT
+  2.10.0-SNAPSHOT
   Apache Hadoop Cloud Storage Project
   Apache Hadoop Cloud Storage Project
   pom

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1741998d/pom.xml
--
diff --git a/pom.xml b/pom.xml
index bd85a2e..a5c5fa8 100644
--- a/pom.xml
+++ b/pom.xml
@@ -125,6 +125,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 hadoop-client
 hadoop-minicluster
 hadoop-build-tools
+hadoop-cloud-storage-project
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15030. [branch-2] Include hadoop-cloud-storage-project in the main hadoop pom modules.

2017-11-10 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 dc65df2a1 -> b35c5ba47


HADOOP-15030. [branch-2] Include hadoop-cloud-storage-project in the main 
hadoop pom modules.

(cherry picked from commit 132c2e7a2a2dcb8050770a85aca8e66a7c33934b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b35c5ba4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b35c5ba4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b35c5ba4

Branch: refs/heads/branch-2.9
Commit: b35c5ba47216aa8ab82ad86d7f5f780ada33bfc8
Parents: dc65df2
Author: Subru Krishnan 
Authored: Fri Nov 10 13:48:46 2017 -0800
Committer: Subru Krishnan 
Committed: Fri Nov 10 13:52:39 2017 -0800

--
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml | 4 ++--
 hadoop-cloud-storage-project/pom.xml  | 4 ++--
 pom.xml   | 1 +
 3 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b35c5ba4/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
--
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml 
b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
index 7993b83..4c06df2 100644
--- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -18,12 +18,12 @@
   
 org.apache.hadoop
 hadoop-project
-3.0.0-alpha2-SNAPSHOT
+2.9.1-SNAPSHOT
 ../../hadoop-project
   
   org.apache.hadoop
   hadoop-cloud-storage
-  3.0.0-alpha2-SNAPSHOT
+  2.9.1-SNAPSHOT
   jar
 
   Apache Hadoop Cloud Storage

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b35c5ba4/hadoop-cloud-storage-project/pom.xml
--
diff --git a/hadoop-cloud-storage-project/pom.xml 
b/hadoop-cloud-storage-project/pom.xml
index 94d4c02..d9bf47e 100644
--- a/hadoop-cloud-storage-project/pom.xml
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -20,12 +20,12 @@
   
 org.apache.hadoop
 hadoop-project
-3.0.0-alpha2-SNAPSHOT
+2.9.1-SNAPSHOT
 ../hadoop-project
   
   org.apache.hadoop
   hadoop-cloud-storage-project
-  3.0.0-alpha2-SNAPSHOT
+  2.9.1-SNAPSHOT
   Apache Hadoop Cloud Storage Project
   Apache Hadoop Cloud Storage Project
   pom

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b35c5ba4/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 8da45ec..9b309bc 100644
--- a/pom.xml
+++ b/pom.xml
@@ -125,6 +125,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 hadoop-client
 hadoop-minicluster
 hadoop-build-tools
+hadoop-cloud-storage-project
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15030. [branch-2] Include hadoop-cloud-storage-project in the main hadoop pom modules.

2017-11-10 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0411c710d -> 132c2e7a2


HADOOP-15030. [branch-2] Include hadoop-cloud-storage-project in the main 
hadoop pom modules.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/132c2e7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/132c2e7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/132c2e7a

Branch: refs/heads/branch-2
Commit: 132c2e7a2a2dcb8050770a85aca8e66a7c33934b
Parents: 0411c71
Author: Subru Krishnan 
Authored: Fri Nov 10 13:48:46 2017 -0800
Committer: Subru Krishnan 
Committed: Fri Nov 10 13:48:46 2017 -0800

--
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml | 4 ++--
 hadoop-cloud-storage-project/pom.xml  | 4 ++--
 pom.xml   | 1 +
 3 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/132c2e7a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
--
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml 
b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
index 7993b83..afd88cf 100644
--- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -18,12 +18,12 @@
   
 org.apache.hadoop
 hadoop-project
-3.0.0-alpha2-SNAPSHOT
+2.10.0-SNAPSHOT
 ../../hadoop-project
   
   org.apache.hadoop
   hadoop-cloud-storage
-  3.0.0-alpha2-SNAPSHOT
+  2.10.0-SNAPSHOT
   jar
 
   Apache Hadoop Cloud Storage

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132c2e7a/hadoop-cloud-storage-project/pom.xml
--
diff --git a/hadoop-cloud-storage-project/pom.xml 
b/hadoop-cloud-storage-project/pom.xml
index 94d4c02..594b0b3 100644
--- a/hadoop-cloud-storage-project/pom.xml
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -20,12 +20,12 @@
   
 org.apache.hadoop
 hadoop-project
-3.0.0-alpha2-SNAPSHOT
+2.10.0-SNAPSHOT
 ../hadoop-project
   
   org.apache.hadoop
   hadoop-cloud-storage-project
-  3.0.0-alpha2-SNAPSHOT
+  2.10.0-SNAPSHOT
   Apache Hadoop Cloud Storage Project
   Apache Hadoop Cloud Storage Project
   pom

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132c2e7a/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 1f073a0..bb229dc 100644
--- a/pom.xml
+++ b/pom.xml
@@ -125,6 +125,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 hadoop-client
 hadoop-minicluster
 hadoop-build-tools
+hadoop-cloud-storage-project
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15026. Rebase ResourceEstimator start/stop scripts for branch-2. (Rui Li via Subru).

2017-11-09 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 d45672a14 -> 26110ec33


HADOOP-15026. Rebase ResourceEstimator start/stop scripts for branch-2. (Rui Li 
via Subru).

(cherry picked from commit 5991c218a0cd72fc6a2b10ef77729ee6181c9443)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26110ec3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26110ec3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26110ec3

Branch: refs/heads/branch-2.9
Commit: 26110ec335c6f45de7e553086d20cb51cbf09de7
Parents: d45672a
Author: Subru Krishnan 
Authored: Thu Nov 9 00:32:34 2017 -0800
Committer: Subru Krishnan 
Committed: Thu Nov 9 00:33:23 2017 -0800

--
 .../src/main/bin/estimator-daemon.sh| 79 
 .../src/main/bin/estimator.sh   | 52 +
 .../src/main/bin/start-estimator.sh | 30 +---
 .../src/main/bin/stop-estimator.sh  | 29 +--
 4 files changed, 104 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26110ec3/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh 
b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh
new file mode 100755
index 000..34a98c0
--- /dev/null
+++ b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License. See accompanying LICENSE file.
+#
+
+bin=$(dirname "${BASH_SOURCE-$0}")
+bin=$(cd "$bin" || exit; pwd)
+
+# get arguments
+startStop=$1
+shift
+command=$1
+shift
+
+pid=/tmp/hadoop-dogtail-estimator.pid
+
+LOG_DIR=$bin/../../../../../logs
+if [ ! -w "$LOG_DIR" ] ; then
+  mkdir -p "$LOG_DIR"
+fi
+
+log=$LOG_DIR/hadoop-resourceestimator.out
+
+case $startStop in
+
+  (start)
+
+if [ -f $pid ]; then
+  # shellcheck disable=SC2046
+  if kill -0 $(cat $pid) > /dev/null 2>&1; then
+echo "$command running as process $(cat $pid).  Stop it first."
+exit 1
+  fi
+fi
+
+echo "starting $command, logging to $log"
+bin/estimator.sh "$@" > "$log" 2>&1 < /dev/null &
+echo $! > $pid
+sleep 1
+head "$log"
+;;
+
+  (stop)
+
+if [ -f $pid ]; then
+  TARGET_PID=$(cat $pid)
+  # shellcheck disable=SC2086
+  if kill -0 $TARGET_PID > /dev/null 2>&1; then
+kill "$TARGET_PID"
+sleep 5
+# shellcheck disable=SC2086
+if kill -0 $TARGET_PID > /dev/null 2>&1; then
+  echo "$command did not stop gracefully after 5 seconds: killing with 
kill -9"
+  "kill -9 $TARGET_PID"
+fi
+  else
+echo "no $command to stop"
+  fi
+  rm -f $pid
+else
+  echo "no $command to stop"
+fi
+;;
+
+  (*)
+exit 1
+;;
+
+esac
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26110ec3/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
--
diff --git a/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh 
b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
index 3e1ec27..e48238c 100644
--- a/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
+++ b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
@@ -13,32 +13,21 @@
 #  limitations under the License. See accompanying LICENSE file.
 #
 
-## @audience public
-## @stabilitystable
-function hadoop_usage()
-{
-  echo "Usage: estimator.sh"
- #hadoop-daemon.sh. need both start and stop, status (query the status). run 
as background process.
-}
+bin=$(dirname "${BASH_SOURCE-$0}")
+bin=$(cd "$bin" || exit; pwd)
 
-## @audience public
-## @stabilitystable
-function calculate_classpath
-{
-  hadoop_add_client_opts
-  hadoop_add_to_classpath_tools hadoop-resourceestimator

hadoop git commit: HADOOP-15026. Rebase ResourceEstimator start/stop scripts for branch-2. (Rui Li via Subru).

2017-11-09 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.0 29875e9a2 -> 21d93207f


HADOOP-15026. Rebase ResourceEstimator start/stop scripts for branch-2. (Rui Li 
via Subru).

(cherry picked from commit 5991c218a0cd72fc6a2b10ef77729ee6181c9443)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21d93207
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21d93207
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21d93207

Branch: refs/heads/branch-2.9.0
Commit: 21d93207fa90368ea10a6f8ebd221d2d94ea28f0
Parents: 29875e9
Author: Subru Krishnan 
Authored: Thu Nov 9 00:32:34 2017 -0800
Committer: Subru Krishnan 
Committed: Thu Nov 9 00:33:49 2017 -0800

--
 .../src/main/bin/estimator-daemon.sh| 79 
 .../src/main/bin/estimator.sh   | 52 +
 .../src/main/bin/start-estimator.sh | 30 +---
 .../src/main/bin/stop-estimator.sh  | 29 +--
 4 files changed, 104 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21d93207/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh 
b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh
new file mode 100755
index 000..34a98c0
--- /dev/null
+++ b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License. See accompanying LICENSE file.
+#
+
+bin=$(dirname "${BASH_SOURCE-$0}")
+bin=$(cd "$bin" || exit; pwd)
+
+# get arguments
+startStop=$1
+shift
+command=$1
+shift
+
+pid=/tmp/hadoop-dogtail-estimator.pid
+
+LOG_DIR=$bin/../../../../../logs
+if [ ! -w "$LOG_DIR" ] ; then
+  mkdir -p "$LOG_DIR"
+fi
+
+log=$LOG_DIR/hadoop-resourceestimator.out
+
+case $startStop in
+
+  (start)
+
+if [ -f $pid ]; then
+  # shellcheck disable=SC2046
+  if kill -0 $(cat $pid) > /dev/null 2>&1; then
+echo "$command running as process $(cat $pid).  Stop it first."
+exit 1
+  fi
+fi
+
+echo "starting $command, logging to $log"
+bin/estimator.sh "$@" > "$log" 2>&1 < /dev/null &
+echo $! > $pid
+sleep 1
+head "$log"
+;;
+
+  (stop)
+
+if [ -f $pid ]; then
+  TARGET_PID=$(cat $pid)
+  # shellcheck disable=SC2086
+  if kill -0 $TARGET_PID > /dev/null 2>&1; then
+kill "$TARGET_PID"
+sleep 5
+# shellcheck disable=SC2086
+if kill -0 $TARGET_PID > /dev/null 2>&1; then
+  echo "$command did not stop gracefully after 5 seconds: killing with 
kill -9"
+  "kill -9 $TARGET_PID"
+fi
+  else
+echo "no $command to stop"
+  fi
+  rm -f $pid
+else
+  echo "no $command to stop"
+fi
+;;
+
+  (*)
+exit 1
+;;
+
+esac
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21d93207/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
--
diff --git a/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh 
b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
index 3e1ec27..e48238c 100644
--- a/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
+++ b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
@@ -13,32 +13,21 @@
 #  limitations under the License. See accompanying LICENSE file.
 #
 
-## @audience public
-## @stabilitystable
-function hadoop_usage()
-{
-  echo "Usage: estimator.sh"
- #hadoop-daemon.sh. need both start and stop, status (query the status). run 
as background process.
-}
+bin=$(dirname "${BASH_SOURCE-$0}")
+bin=$(cd "$bin" || exit; pwd)
 
-## @audience public
-## @stabilitystable
-function calculate_classpath
-{
-  hadoop_add_client_opts
-  hadoop_add_to_classpath_tools hadoop-resourceestimator

hadoop git commit: HADOOP-15026. Rebase ResourceEstimator start/stop scripts for branch-2. (Rui Li via Subru).

2017-11-09 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 46a740a82 -> 5991c218a


HADOOP-15026. Rebase ResourceEstimator start/stop scripts for branch-2. (Rui Li 
via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5991c218
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5991c218
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5991c218

Branch: refs/heads/branch-2
Commit: 5991c218a0cd72fc6a2b10ef77729ee6181c9443
Parents: 46a740a
Author: Subru Krishnan 
Authored: Thu Nov 9 00:32:34 2017 -0800
Committer: Subru Krishnan 
Committed: Thu Nov 9 00:32:34 2017 -0800

--
 .../src/main/bin/estimator-daemon.sh| 79 
 .../src/main/bin/estimator.sh   | 52 +
 .../src/main/bin/start-estimator.sh | 30 +---
 .../src/main/bin/stop-estimator.sh  | 29 +--
 4 files changed, 104 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5991c218/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh 
b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh
new file mode 100755
index 000..34a98c0
--- /dev/null
+++ b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator-daemon.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License. See accompanying LICENSE file.
+#
+
+bin=$(dirname "${BASH_SOURCE-$0}")
+bin=$(cd "$bin" || exit; pwd)
+
+# get arguments
+startStop=$1
+shift
+command=$1
+shift
+
+pid=/tmp/hadoop-dogtail-estimator.pid
+
+LOG_DIR=$bin/../../../../../logs
+if [ ! -w "$LOG_DIR" ] ; then
+  mkdir -p "$LOG_DIR"
+fi
+
+log=$LOG_DIR/hadoop-resourceestimator.out
+
+case $startStop in
+
+  (start)
+
+if [ -f $pid ]; then
+  # shellcheck disable=SC2046
+  if kill -0 $(cat $pid) > /dev/null 2>&1; then
+echo "$command running as process $(cat $pid).  Stop it first."
+exit 1
+  fi
+fi
+
+echo "starting $command, logging to $log"
+bin/estimator.sh "$@" > "$log" 2>&1 < /dev/null &
+echo $! > $pid
+sleep 1
+head "$log"
+;;
+
+  (stop)
+
+if [ -f $pid ]; then
+  TARGET_PID=$(cat $pid)
+  # shellcheck disable=SC2086
+  if kill -0 $TARGET_PID > /dev/null 2>&1; then
+kill "$TARGET_PID"
+sleep 5
+# shellcheck disable=SC2086
+if kill -0 $TARGET_PID > /dev/null 2>&1; then
+  echo "$command did not stop gracefully after 5 seconds: killing with 
kill -9"
+  "kill -9 $TARGET_PID"
+fi
+  else
+echo "no $command to stop"
+  fi
+  rm -f $pid
+else
+  echo "no $command to stop"
+fi
+;;
+
+  (*)
+exit 1
+;;
+
+esac
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5991c218/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
--
diff --git a/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh 
b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
index 3e1ec27..e48238c 100644
--- a/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
+++ b/hadoop-tools/hadoop-resourceestimator/src/main/bin/estimator.sh
@@ -13,32 +13,21 @@
 #  limitations under the License. See accompanying LICENSE file.
 #
 
-## @audience public
-## @stabilitystable
-function hadoop_usage()
-{
-  echo "Usage: estimator.sh"
- #hadoop-daemon.sh. need both start and stop, status (query the status). run 
as background process.
-}
+bin=$(dirname "${BASH_SOURCE-$0}")
+bin=$(cd "$bin" || exit; pwd)
 
-## @audience public
-## @stabilitystable
-function calculate_classpath
-{
-  hadoop_add_client_opts
-  hadoop_add_to_classpath_tools hadoop-resourceestimator
-}
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; t

hadoop git commit: HADOOP-15025. Ensure singleton for ResourceEstimatorService. (Rui Li via Subru).

2017-11-08 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.0 e1852f63a -> 29875e9a2


HADOOP-15025. Ensure singleton for ResourceEstimatorService. (Rui Li via Subru).

(cherry picked from commit f2df6b8983aace73ad27934bd9f7f4d766e0b25f)
(cherry picked from commit 46a740a82ebb5143038296bbedc9d455f68bae63)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29875e9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29875e9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29875e9a

Branch: refs/heads/branch-2.9.0
Commit: 29875e9a2021901fd168e356653ab6221ce5a5b2
Parents: e1852f6
Author: Subru Krishnan 
Authored: Wed Nov 8 18:07:12 2017 -0800
Committer: Subru Krishnan 
Committed: Wed Nov 8 18:11:56 2017 -0800

--
 .../service/ResourceEstimatorService.java   |  5 ++--
 .../service/TestResourceEstimatorService.java   | 25 +---
 2 files changed, 4 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29875e9a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
index 0e0e094..5d3aea4 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -34,6 +34,7 @@ import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 
+import com.sun.jersey.spi.resource.Singleton;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
 import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
@@ -56,13 +57,13 @@ import org.slf4j.LoggerFactory;
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.Singleton;
 
 /**
  * Resource Estimator Service which provides a set of REST APIs for users to
  * use the estimation service.
  */
-@Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
+@Singleton
+@Path("/resourceestimator") public class ResourceEstimatorService {
   private static final Logger LOGGER =
   LoggerFactory.getLogger(ResourceEstimatorService.class);
   private final SkylineStore skylineStore;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29875e9a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
index 91a486e..785641c 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
@@ -37,18 +37,12 @@ import 
org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.JerseyTest;
-import com.sun.jersey.test.framework.WebAppDescriptor;
 
 /**
  * Test ResourceEstimatorService.
@@ -70,29 +64,12 @@ public class TestResourceEstimatorService extends 
JerseyTest {
   private long containerMemAlloc;
   private int containerCPUAlloc;
 
-  private static class WebServletModule extends ServletModule {
-@Override protected void configureServlets() {
-  bind(ResourceEstimatorService.class);
-  serve("/*").with(GuiceContainer.class);
-}
-  }
-
-  static {
-GuiceServletConfig
-.setInjector(Guice.createInjector(new WebServletModule()));
-  }
-
   pu

hadoop git commit: HADOOP-15025. Ensure singleton for ResourceEstimatorService. (Rui Li via Subru).

2017-11-08 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 1ddd0a795 -> d45672a14


HADOOP-15025. Ensure singleton for ResourceEstimatorService. (Rui Li via Subru).

(cherry picked from commit f2df6b8983aace73ad27934bd9f7f4d766e0b25f)
(cherry picked from commit 46a740a82ebb5143038296bbedc9d455f68bae63)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d45672a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d45672a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d45672a1

Branch: refs/heads/branch-2.9
Commit: d45672a144845ef6620df5273325178173cd0adc
Parents: 1ddd0a7
Author: Subru Krishnan 
Authored: Wed Nov 8 18:07:12 2017 -0800
Committer: Subru Krishnan 
Committed: Wed Nov 8 18:11:32 2017 -0800

--
 .../service/ResourceEstimatorService.java   |  5 ++--
 .../service/TestResourceEstimatorService.java   | 25 +---
 2 files changed, 4 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d45672a1/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
index 0e0e094..5d3aea4 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -34,6 +34,7 @@ import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 
+import com.sun.jersey.spi.resource.Singleton;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
 import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
@@ -56,13 +57,13 @@ import org.slf4j.LoggerFactory;
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.Singleton;
 
 /**
  * Resource Estimator Service which provides a set of REST APIs for users to
  * use the estimation service.
  */
-@Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
+@Singleton
+@Path("/resourceestimator") public class ResourceEstimatorService {
   private static final Logger LOGGER =
   LoggerFactory.getLogger(ResourceEstimatorService.class);
   private final SkylineStore skylineStore;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d45672a1/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
index 91a486e..785641c 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
@@ -37,18 +37,12 @@ import 
org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.JerseyTest;
-import com.sun.jersey.test.framework.WebAppDescriptor;
 
 /**
  * Test ResourceEstimatorService.
@@ -70,29 +64,12 @@ public class TestResourceEstimatorService extends 
JerseyTest {
   private long containerMemAlloc;
   private int containerCPUAlloc;
 
-  private static class WebServletModule extends ServletModule {
-@Override protected void configureServlets() {
-  bind(ResourceEstimatorService.class);
-  serve("/*").with(GuiceContainer.class);
-}
-  }
-
-  static {
-GuiceServletConfig
-.setInjector(Guice.createInjector(new WebServletModule()));
-  }
-
   pu

hadoop git commit: HADOOP-15025. Ensure singleton for ResourceEstimatorService. (Rui Li via Subru).

2017-11-08 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 548654207 -> 46a740a82


HADOOP-15025. Ensure singleton for ResourceEstimatorService. (Rui Li via Subru).

(cherry picked from commit f2df6b8983aace73ad27934bd9f7f4d766e0b25f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46a740a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46a740a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46a740a8

Branch: refs/heads/branch-2
Commit: 46a740a82ebb5143038296bbedc9d455f68bae63
Parents: 5486542
Author: Subru Krishnan 
Authored: Wed Nov 8 18:07:12 2017 -0800
Committer: Subru Krishnan 
Committed: Wed Nov 8 18:09:02 2017 -0800

--
 .../service/ResourceEstimatorService.java   |  5 ++--
 .../service/TestResourceEstimatorService.java   | 25 +---
 2 files changed, 4 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a740a8/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
index 0e0e094..5d3aea4 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -34,6 +34,7 @@ import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 
+import com.sun.jersey.spi.resource.Singleton;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
 import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
@@ -56,13 +57,13 @@ import org.slf4j.LoggerFactory;
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.Singleton;
 
 /**
  * Resource Estimator Service which provides a set of REST APIs for users to
  * use the estimation service.
  */
-@Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
+@Singleton
+@Path("/resourceestimator") public class ResourceEstimatorService {
   private static final Logger LOGGER =
   LoggerFactory.getLogger(ResourceEstimatorService.class);
   private final SkylineStore skylineStore;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a740a8/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
index 91a486e..785641c 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
@@ -37,18 +37,12 @@ import 
org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.JerseyTest;
-import com.sun.jersey.test.framework.WebAppDescriptor;
 
 /**
  * Test ResourceEstimatorService.
@@ -70,29 +64,12 @@ public class TestResourceEstimatorService extends 
JerseyTest {
   private long containerMemAlloc;
   private int containerCPUAlloc;
 
-  private static class WebServletModule extends ServletModule {
-@Override protected void configureServlets() {
-  bind(ResourceEstimatorService.class);
-  serve("/*").with(GuiceContainer.class);
-}
-  }
-
-  static {
-GuiceServletConfig
-.setInjector(Guice.createInjector(new WebServletModule()));
-  }
-
   public TestResourceEstimatorService() {
-s

hadoop git commit: HADOOP-15025. Ensure singleton for ResourceEstimatorService. (Rui Li via Subru).

2017-11-08 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 9bcc9e3f4 -> 7441d14f1


HADOOP-15025. Ensure singleton for ResourceEstimatorService. (Rui Li via Subru).

(cherry picked from commit f2df6b8983aace73ad27934bd9f7f4d766e0b25f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7441d14f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7441d14f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7441d14f

Branch: refs/heads/branch-3.0
Commit: 7441d14f1c4b972d786935af9520ce23bf64ef20
Parents: 9bcc9e3
Author: Subru Krishnan 
Authored: Wed Nov 8 18:07:12 2017 -0800
Committer: Subru Krishnan 
Committed: Wed Nov 8 18:08:09 2017 -0800

--
 .../service/ResourceEstimatorService.java   |  5 ++--
 .../service/TestResourceEstimatorService.java   | 25 +---
 2 files changed, 4 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7441d14f/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
index 0e0e094..5d3aea4 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -34,6 +34,7 @@ import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 
+import com.sun.jersey.spi.resource.Singleton;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
 import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
@@ -56,13 +57,13 @@ import org.slf4j.LoggerFactory;
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.Singleton;
 
 /**
  * Resource Estimator Service which provides a set of REST APIs for users to
  * use the estimation service.
  */
-@Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
+@Singleton
+@Path("/resourceestimator") public class ResourceEstimatorService {
   private static final Logger LOGGER =
   LoggerFactory.getLogger(ResourceEstimatorService.class);
   private final SkylineStore skylineStore;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7441d14f/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
index 91a486e..785641c 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
@@ -37,18 +37,12 @@ import 
org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.JerseyTest;
-import com.sun.jersey.test.framework.WebAppDescriptor;
 
 /**
  * Test ResourceEstimatorService.
@@ -70,29 +64,12 @@ public class TestResourceEstimatorService extends 
JerseyTest {
   private long containerMemAlloc;
   private int containerCPUAlloc;
 
-  private static class WebServletModule extends ServletModule {
-@Override protected void configureServlets() {
-  bind(ResourceEstimatorService.class);
-  serve("/*").with(GuiceContainer.class);
-}
-  }
-
-  static {
-GuiceServletConfig
-.setInjector(Guice.createInjector(new WebServletModule()));
-  }
-
   public TestResourceEstimatorService() {
-s

hadoop git commit: HADOOP-15025. Ensure singleton for ResourceEstimatorService. (Rui Li via Subru).

2017-11-08 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 49b4c0b33 -> f2df6b898


HADOOP-15025. Ensure singleton for ResourceEstimatorService. (Rui Li via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2df6b89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2df6b89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2df6b89

Branch: refs/heads/trunk
Commit: f2df6b8983aace73ad27934bd9f7f4d766e0b25f
Parents: 49b4c0b
Author: Subru Krishnan 
Authored: Wed Nov 8 18:07:12 2017 -0800
Committer: Subru Krishnan 
Committed: Wed Nov 8 18:07:12 2017 -0800

--
 .../service/ResourceEstimatorService.java   |  5 ++--
 .../service/TestResourceEstimatorService.java   | 25 +---
 2 files changed, 4 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2df6b89/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
index 0e0e094..5d3aea4 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -34,6 +34,7 @@ import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 
+import com.sun.jersey.spi.resource.Singleton;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
 import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
@@ -56,13 +57,13 @@ import org.slf4j.LoggerFactory;
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.Singleton;
 
 /**
  * Resource Estimator Service which provides a set of REST APIs for users to
  * use the estimation service.
  */
-@Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
+@Singleton
+@Path("/resourceestimator") public class ResourceEstimatorService {
   private static final Logger LOGGER =
   LoggerFactory.getLogger(ResourceEstimatorService.class);
   private final SkylineStore skylineStore;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2df6b89/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
index 91a486e..785641c 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/service/TestResourceEstimatorService.java
@@ -37,18 +37,12 @@ import 
org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.JerseyTest;
-import com.sun.jersey.test.framework.WebAppDescriptor;
 
 /**
  * Test ResourceEstimatorService.
@@ -70,29 +64,12 @@ public class TestResourceEstimatorService extends 
JerseyTest {
   private long containerMemAlloc;
   private int containerCPUAlloc;
 
-  private static class WebServletModule extends ServletModule {
-@Override protected void configureServlets() {
-  bind(ResourceEstimatorService.class);
-  serve("/*").with(GuiceContainer.class);
-}
-  }
-
-  static {
-GuiceServletConfig
-.setInjector(Guice.createInjector(new WebServletModule()));
-  }
-
   public TestResourceEstimatorService() {
-super(new WebAppDescriptor.Builder(
-"org.apache.hadoop.resourceestimator.service")
-.co

hadoop git commit: Updating index.md.vm prior to RC.

2017-11-03 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.0 35b25e79c -> 6697f0c18


Updating index.md.vm prior to RC.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6697f0c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6697f0c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6697f0c1

Branch: refs/heads/branch-2.9.0
Commit: 6697f0c18b12f1bdb99cbdf81394091f4fef1f0a
Parents: 35b25e7
Author: Subru Krishnan 
Authored: Fri Nov 3 11:48:14 2017 -0700
Committer: Subru Krishnan 
Committed: Fri Nov 3 11:48:14 2017 -0700

--
 hadoop-project/src/site/markdown/index.md.vm | 22 +++---
 1 file changed, 15 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6697f0c1/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index c21770e..4fea404 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -20,6 +20,12 @@ line, building upon the previous stable release 2.8.2.
 
 Here is a short overview of the major features and improvements.
 
+*   Common
+
+*   HADOOP Resource Estimator. See the
+[user documentation](./hadoop-resourceestimator/ResourceEstimator.html)
+for more details.
+
 *   HDFS
 
 *   HDFS Router based federation. See the
@@ -44,14 +50,16 @@ Here is a short overview of the major features and 
improvements.
 [user documentation](./hadoop-yarn/hadoop-yarn-site/YarnUI2.html)
 for more details. 
 
-*   Capacity Scheduler - Changing queue configuration via API. See the
-[user 
documentation](./hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html)
-under the **Changing Queue Configuration** section for more details. 
+*   Capacity Scheduler 
+
+*   Changing queue configuration via API. See the
+[user 
documentation](./hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html)
+under the **Changing Queue Configuration** section for more 
details. 
 
-*   Capacity Scheduler - Update Resources and Execution Type of an 
-allocated/running container. See the
-[user 
documentation](./hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html)
-under the **Updating a Container** section for more details.
+*   Update Resources and Execution Type of an 
+allocated/running container. See the
+[user 
documentation](./hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html)
+under the **Updating a Container** section for more details.
 
 
 Getting Started


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. (Daniel Templeton via Subru).

2017-11-02 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 e44ff3f60 -> a60bb3b36


YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. 
(Daniel Templeton via Subru).

(cherry picked from commit ad0fff2b419e8fe8bb5c9a7e19b79dec16cbd96f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a60bb3b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a60bb3b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a60bb3b3

Branch: refs/heads/branch-2.9
Commit: a60bb3b36f42a44f0e86cadacdf8e5651c324002
Parents: e44ff3f
Author: Subru Krishnan 
Authored: Thu Nov 2 17:39:23 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Nov 2 17:42:38 2017 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a60bb3b3/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 6825a36..b3e04fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -174,6 +174,18 @@
 
 
   
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. (Daniel Templeton via Subru).

2017-11-02 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0200fbac5 -> 931987f47


YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. 
(Daniel Templeton via Subru).

(cherry picked from commit ad0fff2b419e8fe8bb5c9a7e19b79dec16cbd96f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/931987f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/931987f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/931987f4

Branch: refs/heads/branch-2
Commit: 931987f4702f1d5233503d19d0040c95cabb2259
Parents: 0200fba
Author: Subru Krishnan 
Authored: Thu Nov 2 17:39:23 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Nov 2 17:40:26 2017 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/931987f4/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 6825a36..b3e04fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -174,6 +174,18 @@
 
 
   
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. (Daniel Templeton via Subru).

2017-11-02 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 cbd81f305 -> 1b6de3fac


YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. 
(Daniel Templeton via Subru).

(cherry picked from commit ad0fff2b419e8fe8bb5c9a7e19b79dec16cbd96f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b6de3fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b6de3fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b6de3fa

Branch: refs/heads/branch-3.0
Commit: 1b6de3fac5122edd634d34cc5525d8152e842697
Parents: cbd81f3
Author: Subru Krishnan 
Authored: Thu Nov 2 17:39:23 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Nov 2 17:40:03 2017 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b6de3fa/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 2aa9a5c..45aa868 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -178,6 +178,18 @@
 
 
   
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. (Daniel Templeton via Subru).

2017-11-02 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 53c0fb7ef -> ad0fff2b4


YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. 
(Daniel Templeton via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad0fff2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad0fff2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad0fff2b

Branch: refs/heads/trunk
Commit: ad0fff2b419e8fe8bb5c9a7e19b79dec16cbd96f
Parents: 53c0fb7
Author: Subru Krishnan 
Authored: Thu Nov 2 17:39:23 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Nov 2 17:39:23 2017 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad0fff2b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 2aa9a5c..45aa868 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -178,6 +178,18 @@
 
 
   
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12744. More logs when short-circuit read is failed and disabled. Contributed by Weiwei Yang.

2017-11-01 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 d1accde1c -> 355539717


HDFS-12744. More logs when short-circuit read is failed and disabled. 
Contributed by Weiwei Yang.

(cherry picked from commit 56b88b06705441f6f171eec7fb2fa77946ca204b)
(cherry picked from commit 0f20434e18cfedcc36c969cc2df25123153e0b99)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35553971
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35553971
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35553971

Branch: refs/heads/branch-2.9
Commit: 3555397179bd17925f7d604da5012ae04b61bf1a
Parents: d1accde
Author: Weiwei Yang 
Authored: Wed Nov 1 16:41:45 2017 +0800
Committer: Subru Krishnan 
Committed: Wed Nov 1 18:13:28 2017 -0700

--
 .../org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java| 3 ++-
 .../java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java  | 2 ++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35553971/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index f4b62d9..8fd990d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -646,7 +646,8 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 default:
   LOG.warn(this + ": unknown response code " + resp.getStatus() +
   " while attempting to set up short-circuit access. " +
-  resp.getMessage());
+  resp.getMessage() + ". Disabling short-circuit read for DataNode "
+  + datanode + " temporarily.");
   clientContext.getDomainSocketFactory()
   .disableShortCircuitForPath(pathInfo.getPath());
   return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35553971/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 7e0c2bc..1d8db52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -384,6 +384,8 @@ class DataXceiver extends Receiver implements Runnable {
   } catch (IOException e) {
 bld.setStatus(ERROR);
 bld.setMessage(e.getMessage());
+LOG.error("Request short-circuit read file descriptor" +
+" failed with unknown error.", e);
   }
   bld.build().writeDelimitedTo(socketOut);
   if (fis != null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6413. FileSystem based Yarn Registry implementation. (Ellen Hui via Subru).

2017-10-31 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 15b839c44 -> ac4110742


YARN-6413. FileSystem based Yarn Registry implementation. (Ellen Hui via Subru).

(cherry picked from commit ed24da3dd73c137b44235e525112056ace6d3843)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac411074
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac411074
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac411074

Branch: refs/heads/branch-2
Commit: ac41107423890dc622bc87409f5e9f198574fc21
Parents: 15b839c
Author: Subru Krishnan 
Authored: Tue Oct 31 12:05:43 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Oct 31 12:11:40 2017 -0700

--
 .../impl/FSRegistryOperationsService.java   | 249 
 .../registry/client/types/ServiceRecord.java|  64 
 .../impl/TestFSRegistryOperationsService.java   | 298 +++
 3 files changed, 611 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac411074/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
new file mode 100644
index 000..cfff1bd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.api.BindFlags;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+/**
+ * Filesystem-based implementation of RegistryOperations. This class relies
+ * entirely on the configured FS for security and does no extra checks.
+ */
+public class FSRegistryOperationsService extends CompositeService
+implements RegistryOperations {
+
+  private FileSystem fs;
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FSRegistryOperationsService.class);
+  private final RegistryUtils.ServiceRecordMarshal serviceRecordMarshal =
+  new RegistryUtils.ServiceRecordMarshal();
+
+  public FSRegistryOperationsService() {
+super(FSRegistryOperationsService.class.getName());
+  }
+
+  @VisibleForTesting
+  

hadoop git commit: YARN-6413. FileSystem based Yarn Registry implementation. (Ellen Hui via Subru).

2017-10-31 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ae08fe518 -> 0d9503d6b


YARN-6413. FileSystem based Yarn Registry implementation. (Ellen Hui via Subru).

(cherry picked from commit ed24da3dd73c137b44235e525112056ace6d3843)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d9503d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d9503d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d9503d6

Branch: refs/heads/branch-3.0
Commit: 0d9503d6b52a5e3de2b2e1695515021cc281f6d0
Parents: ae08fe5
Author: Subru Krishnan 
Authored: Tue Oct 31 12:05:43 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Oct 31 12:08:32 2017 -0700

--
 .../impl/FSRegistryOperationsService.java   | 249 
 .../registry/client/types/ServiceRecord.java|  64 
 .../impl/TestFSRegistryOperationsService.java   | 298 +++
 3 files changed, 611 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9503d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
new file mode 100644
index 000..cfff1bd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.api.BindFlags;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+/**
+ * Filesystem-based implementation of RegistryOperations. This class relies
+ * entirely on the configured FS for security and does no extra checks.
+ */
+public class FSRegistryOperationsService extends CompositeService
+implements RegistryOperations {
+
+  private FileSystem fs;
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FSRegistryOperationsService.class);
+  private final RegistryUtils.ServiceRecordMarshal serviceRecordMarshal =
+  new RegistryUtils.ServiceRecordMarshal();
+
+  public FSRegistryOperationsService() {
+super(FSRegistryOperationsService.class.getName());
+  }
+
+  @VisibleForTesting
+  

hadoop git commit: YARN-6413. FileSystem based Yarn Registry implementation. (Ellen Hui via Subru).

2017-10-31 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5f681fa82 -> ed24da3dd


YARN-6413. FileSystem based Yarn Registry implementation. (Ellen Hui via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed24da3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed24da3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed24da3d

Branch: refs/heads/trunk
Commit: ed24da3dd73c137b44235e525112056ace6d3843
Parents: 5f681fa
Author: Subru Krishnan 
Authored: Tue Oct 31 12:05:43 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Oct 31 12:05:43 2017 -0700

--
 .../impl/FSRegistryOperationsService.java   | 249 
 .../registry/client/types/ServiceRecord.java|  64 
 .../impl/TestFSRegistryOperationsService.java   | 298 +++
 3 files changed, 611 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed24da3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
new file mode 100644
index 000..cfff1bd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.api.BindFlags;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+/**
+ * Filesystem-based implementation of RegistryOperations. This class relies
+ * entirely on the configured FS for security and does no extra checks.
+ */
+public class FSRegistryOperationsService extends CompositeService
+implements RegistryOperations {
+
+  private FileSystem fs;
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FSRegistryOperationsService.class);
+  private final RegistryUtils.ServiceRecordMarshal serviceRecordMarshal =
+  new RegistryUtils.ServiceRecordMarshal();
+
+  public FSRegistryOperationsService() {
+super(FSRegistryOperationsService.class.getName());
+  }
+
+  @VisibleForTesting
+  public FileSystem getFs() {
+return this.fs;
+  }
+
+  @Override
+  pro

[3/3] hadoop git commit: YARN-5516. Add REST API for supporting recurring reservations. (Sean Po via Subru).

2017-10-26 Thread subru
YARN-5516. Add REST API for supporting recurring reservations. (Sean Po via 
Subru).

(cherry picked from commit 25932da6d1ee56299c8f9911576a42792c435407)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0dceec5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0dceec5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0dceec5

Branch: refs/heads/branch-2
Commit: a0dceec586ed0145acd0cccde65a9a526ced1c0d
Parents: f6fde85
Author: Subru Krishnan 
Authored: Thu Oct 26 12:10:14 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Oct 26 20:16:01 2017 -0700

--
 .../yarn/api/records/ReservationDefinition.java |  12 +-
 .../impl/pb/ReservationDefinitionPBImpl.java|   5 +
 .../reservation/InMemoryPlan.java   |  48 +++-
 .../reservation/PlanContext.java|  11 +
 .../reservation/ReservationInputValidator.java  |   8 +
 .../resourcemanager/webapp/RMWebServices.java   |  14 +-
 .../webapp/dao/ReservationDefinitionInfo.java   |  12 +
 .../reservation/TestInMemoryPlan.java   | 261 +++
 .../TestReservationInputValidator.java  |  23 ++
 .../webapp/TestRMWebServicesReservation.java|  68 +++--
 .../src/test/resources/submit-reservation.json  |   1 +
 .../src/site/markdown/ResourceManagerRest.md|   1 +
 12 files changed, 429 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0dceec5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
index bb9bca2..1fa7cfd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
@@ -155,7 +155,11 @@ public abstract class ReservationDefinition {
* are explicitly cancelled and have higher priority than non-periodic jobs
* (during initial placement and replanning). Periodic job allocations are
* consistent across runs (flexibility in allocation is leveraged only during
-   * initial placement, allocations remain consistent thereafter).
+   * initial placement, allocations remain consistent thereafter). Note that
+   * as a long, the recurrence expression must be greater than the duration of
+   * the reservation (deadline - arrival). Also note that the configured max
+   * period must be divisible by the recurrence expression if expressed as a
+   * long.
*
* @return recurrence of this reservation
*/
@@ -173,7 +177,11 @@ public abstract class ReservationDefinition {
* are explicitly cancelled and have higher priority than non-periodic jobs
* (during initial placement and replanning). Periodic job allocations are
* consistent across runs (flexibility in allocation is leveraged only during
-   * initial placement, allocations remain consistent thereafter).
+   * initial placement, allocations remain consistent thereafter). Note that
+   * as a long, the recurrence expression must be greater than the duration of
+   * the reservation (deadline - arrival). Also note that the configured max
+   * period must be divisible by the recurrence expression if expressed as a
+   * long.
*
* @param recurrenceExpression recurrence interval of this reservation
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0dceec5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
index 49aef11..450f400 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
@@ -226,6 +226,11 @@ public class ReservationDefinitionPBImpl extends 
ReservationDefinition {
 
   @Override
   public void setRecurrenceExpression(String recurrenceExpression

[2/3] hadoop git commit: YARN-4823. Refactor the nested reservation id field in listReservation to simple string field. (subru via asuresh)

2017-10-26 Thread subru
YARN-4823. Refactor the nested reservation id field in listReservation to 
simple string field. (subru via asuresh)

(cherry picked from commit 00bebb7e58ba6899904e1619d151aa1b2f5b6acd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6fde85c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6fde85c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6fde85c

Branch: refs/heads/branch-2
Commit: f6fde85c5c7dd8cacd5c12011697f0884ad290a1
Parents: 098a6b8
Author: Arun Suresh 
Authored: Fri Mar 25 15:54:38 2016 -0700
Committer: Subru Krishnan 
Committed: Thu Oct 26 20:04:33 2017 -0700

--
 .../webapp/dao/ReservationIdInfo.java   | 64 
 .../webapp/dao/ReservationInfo.java |  7 +--
 .../webapp/TestRMWebServicesReservation.java| 15 ++---
 3 files changed, 9 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6fde85c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationIdInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationIdInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationIdInfo.java
deleted file mode 100644
index 3a2596a..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationIdInfo.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
-
-import org.apache.hadoop.yarn.api.records.ReservationId;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * Simple class that represent a reservation ID.
- */
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class ReservationIdInfo {
-  @XmlElement(name = "cluster-timestamp")
-  private long clusterTimestamp;
-
-  @XmlElement(name = "reservation-id")
-  private long reservationId;
-
-  public ReservationIdInfo() {
-this.clusterTimestamp = 0;
-this.reservationId = 0;
-  }
-
-  public ReservationIdInfo(ReservationId reservationId) {
-this.clusterTimestamp = reservationId.getClusterTimestamp();
-this.reservationId = reservationId.getId();
-  }
-
-  public long getClusterTimestamp() {
-return this.clusterTimestamp;
-  }
-
-  public void setClusterTimestamp(long newClusterTimestamp) {
-this.clusterTimestamp = newClusterTimestamp;
-  }
-
-  public long getReservationId() {
-return this.reservationId;
-  }
-
-  public void setReservationId(long newReservationId) {
-this.reservationId = newReservationId;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6fde85c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationInfo.java
index 1a31a8b..8b532ad 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src

[1/3] hadoop git commit: YARN-4825. Remove redundant code in ClientRMService::listReservations. (subru via asuresh)

2017-10-26 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9897538a9 -> a0dceec58


YARN-4825. Remove redundant code in ClientRMService::listReservations. (subru 
via asuresh)

(cherry picked from commit d82e797b652f248e238bdf1818e6b4a5b91cea7a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/098a6b8f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/098a6b8f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/098a6b8f

Branch: refs/heads/branch-2
Commit: 098a6b8f193fd58400161eab1aaa7b05b432eb3c
Parents: 9897538
Author: Arun Suresh 
Authored: Thu Mar 24 09:59:55 2016 -0700
Committer: Subru Krishnan 
Committed: Thu Oct 26 20:04:19 2017 -0700

--
 .../hadoop/yarn/server/resourcemanager/ClientRMService.java | 9 +
 1 file changed, 1 insertion(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/098a6b8f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 1fe2237..5b4123d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1381,20 +1381,13 @@ public class ClientRMService extends AbstractService 
implements
 checkReservationACLs(requestInfo.getQueue(),
 AuditConstants.LIST_RESERVATION_REQUEST, reservationId);
 
-ReservationId requestedId = null;
-if (requestInfo.getReservationId() != null
-&& !requestInfo.getReservationId().isEmpty()) {
-  requestedId = ReservationId.parseReservationId(requestInfo
-.getReservationId());
-}
-
 long startTime = Math.max(requestInfo.getStartTime(), 0);
 long endTime = requestInfo.getEndTime() <= -1? Long.MAX_VALUE : requestInfo
 .getEndTime();
 
 Set reservations;
 
-reservations = plan.getReservations(requestedId, new ReservationInterval(
+reservations = plan.getReservations(reservationId, new ReservationInterval(
 startTime, endTime));
 
 List info =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-26 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9897538a/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md 
b/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
new file mode 100644
index 000..12f8dd5
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
@@ -0,0 +1,181 @@
+
+
+Resource Estimator Service
+==
+
+* [Resource Estimator Service](#Resource_Estimator_Service)
+* [Overview](#Overview)
+* [Motivation](#Motivation)
+* [Goals](#Goals)
+* [Architecture](#Architecture)
+* [Usage](#Usage)
+* [Example](#Example)
+* [Advanced Configuration](#AdvancedConfig)
+* [Future work](#Future)
+
+Overview
+
+
+### Motivation
+Estimating job resource requirements remains an important and challenging 
problem for enterprise clusters. This is amplified by the ever-increasing 
complexity of workloads, i.e. from traditional batch jobs to interactive 
queries to streaming and recently machine learning jobs. This results in jobs 
relying on multiple computation frameworks such as Tez, MapReduce, Spark, etc., 
and the problem is further compounded by sharing nature of the clusters. 
Current state-of-art solution relies on user expertise to make resource 
requirement estimations for the jobs (for e.g.: number of reducers or container 
memory size, etc.), which is both tedious and inefficient.
+
+Based on the analysis of our cluster workloads, we observe that a large 
portion of jobs (more than 60%) are recurring jobs, giving us the opportunity 
to automatically estimate job resource requirements based on job's history 
runs. It is worth noting that jobs usually come from different computation 
frameworks, and the version may change across runs as well. Therefore, we want 
to come up with a framework agnostic black-box solution to automatically make 
resource requirement estimation for the recurring jobs.
+
+### Goals
+
+*   For a periodic job, analyze its history logs and predict its resource 
requirement for the new run.
+*   Support various types of job logs.
+*   Scale to terabytes of job logs.
+
+### Architecture
+
+The following figure illustrates the implementation architecture of the 
resource estimator.
+
+![The architecture of the resource 
estimator](images/resourceestimator_arch.png)
+
+Hadoop-resourceestimator mainly consists of three modules: Translator, 
SkylineStore and Estimator.
+
+1. `ResourceSkyline` is used to characterize job's resource utilizations 
during its lifespan. More specifically, it uses `RLESparseResourceAllocation` 
()
 to record the container allocation information. `RecurrenceId` is used to 
identify a specific run of a recurring pipeline. A pipeline could consist of 
multiple jobs, each has a `ResourceSkyline` to characterize its resource 
utilization.
+2. `Translator` parses the job logs, extracts their `ResourceSkylines` and 
stores them to the SkylineStore. `SingleLineParser` parses one line in the log 
stream and extract the `ResourceSkyline`. `LogParser` recursively parses each 
line in the log stream using `SingleLineParser`. Note that logs could have 
different storage formats, so `LogParser` takes a stream of strings as input, 
instead of File or other formats. Since job logs may have various formats thus 
require different `SingleLineParser` implementations, `LogParser` initiates the 
`SingleLineParser` based on user configuration. Currently 
Hadoop-resourceestimator provides two implementations for `SingleLineParser`: 
`NativeSingleLineParser` supports an optimized native format, and 
`RMSingleLineParser` parses the YARN ResourceManager logs generated in Hadoop 
systems since RM logs are widely available (in production deployments).
+3. `SkylineStore` serves as the storage layer for Hadoop-resourceestimator and 
consists of 2 parts. `HistorySkylineStore` stores the `ResourceSkylines` 
extracted by the `Translator`. It supports four actions: addHistory, 
deleteHistory, updateHistory and getHistory. addHistory appends new 
`ResourceSkylines` to the recurring pipelines, while updateHistory deletes all 
the `ResourceSkylines` of a specific recurring pipeline, and re-insert new 
`ResourceSkylines`. `PredictionSkylineStore` stores the predicted 
`RLESparseResourceAllocation` generated by the Estimator. It supports two 
actions: addEstimation and getEstimation.
+
+Currently Hadoop-resourceestimator provides in-memory implementation for 
the SkylineStore.
+4. `Estimator` predicts re

[3/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-26 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9897538a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
new file mode 100644
index 000..c944d20
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
@@ -0,0 +1,340 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.solver.impl;
+
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
+import 
org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorConfiguration;
+import 
org.apache.hadoop.resourceestimator.skylinestore.api.PredictionSkylineStore;
+import 
org.apache.hadoop.resourceestimator.skylinestore.exceptions.SkylineStoreException;
+import org.apache.hadoop.resourceestimator.solver.api.Solver;
+import org.apache.hadoop.resourceestimator.solver.exceptions.SolverException;
+import 
org.apache.hadoop.resourceestimator.solver.preprocess.SolverPreprocessor;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
+import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
+import org.ojalgo.optimisation.Expression;
+import org.ojalgo.optimisation.ExpressionsBasedModel;
+import org.ojalgo.optimisation.Optimisation.Result;
+import org.ojalgo.optimisation.Variable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A LP(Linear Programming) solution to predict recurring pipeline's
+ * {@link Resource} requirements, and generate Hadoop {@code RDL} requests 
which
+ * will be used to make recurring resource reservation.
+ */
+public class LpSolver extends BaseSolver implements Solver {
+  private static final Logger LOGGER = LoggerFactory.getLogger(LpSolver.class);
+  private final SolverPreprocessor preprocessor = new SolverPreprocessor();
+  /**
+   * Controls the balance between over-allocation and under-allocation.
+   */
+  private double alpha;
+  /**
+   * Controls the generalization of the solver.
+   */
+  private double beta;
+  /**
+   * The minimum number of job runs required to run the solver.
+   */
+  private int minJobRuns;
+  /**
+   * The time interval which is used to discretize job execution.
+   */
+  private int timeInterval;
+  /**
+   * The PredictionSkylineStore to store the predicted ResourceSkyline for new
+   * run.
+   */
+  private PredictionSkylineStore predictionSkylineStore;
+
+  @Override public final void init(final Configuration config,
+  PredictionSkylineStore skylineStore) {
+this.alpha =
+config.getDouble(ResourceEstimatorConfiguration.SOLVER_ALPHA_KEY, 0.1);
+this.beta =
+config.getDouble(ResourceEstimatorConfiguration.SOLVER_BETA_KEY, 0.1);
+this.minJobRuns =
+config.getInt(ResourceEstimatorConfiguration.SOLVER_MIN_JOB_RUN_KEY, 
1);
+this.timeInterval =
+config.getInt(ResourceEstimatorConfiguration.TIME_INTERVAL_KEY, 5);
+this.predictionSkylineStore = skylineStore;
+  }
+
+  /**
+   * Generate over-allocation constraints.
+   *
+   * @param lpModelthe LP model.
+   * @param cJobITimeK actual container allocation for job i in time
+   *   interval k.
+   * @param oa container over-allocation.
+   * @param x  predicted container allocation.
+   * @param indexJobITimeK index for job i at time interval k.
+   * @param timeK  index for time interval k.
+ 

[5/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-26 Thread subru
HADOOP-14840. Tool to estimate resource requirements of an application pipeline 
based on prior executions. (Rui Li via Subru).

(cherry picked from commit 625039ef20e6011ab360131d70582a6e4bf2ec1d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9897538a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9897538a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9897538a

Branch: refs/heads/branch-2
Commit: 9897538a968cbde58aba997a37143951648564d9
Parents: 7c26ae5
Author: Subru Krishnan 
Authored: Wed Oct 25 15:51:27 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Oct 26 19:14:24 2017 -0700

--
 LICENSE.txt |   1 +
 .../assemblies/hadoop-resourceestimator.xml |  42 ++
 .../main/resources/assemblies/hadoop-tools.xml  |  11 +
 hadoop-project/pom.xml  |   5 +
 hadoop-project/src/site/site.xml|   1 +
 hadoop-tools/hadoop-resourceestimator/README.md |  19 +
 hadoop-tools/hadoop-resourceestimator/pom.xml   | 175 +++
 .../src/config/checkstyle.xml   |  50 ++
 .../src/main/bin/estimator.cmd  |  52 +++
 .../src/main/bin/estimator.sh   |  71 +++
 .../src/main/bin/start-estimator.cmd|  37 ++
 .../src/main/bin/start-estimator.sh |  42 ++
 .../src/main/bin/stop-estimator.cmd |  37 ++
 .../src/main/bin/stop-estimator.sh  |  42 ++
 .../src/main/conf/resourceestimator-config.xml  |  85 
 .../src/main/data/resourceEstimatorService.txt  |   2 +
 .../common/api/RecurrenceId.java|  95 
 .../common/api/ResourceSkyline.java | 211 +
 .../common/api/package-info.java|  23 +
 .../config/ResourceEstimatorConfiguration.java  | 125 +
 .../common/config/ResourceEstimatorUtil.java|  81 
 .../common/config/package-info.java |  23 +
 .../exception/ResourceEstimatorException.java   |  35 ++
 .../common/exception/package-info.java  |  23 +
 .../RLESparseResourceAllocationSerDe.java   |  77 +++
 .../common/serialization/ResourceSerDe.java |  61 +++
 .../common/serialization/package-info.java  |  24 +
 .../service/ResourceEstimatorServer.java| 146 ++
 .../service/ResourceEstimatorService.java   | 238 ++
 .../resourceestimator/service/ShutdownHook.java |  45 ++
 .../resourceestimator/service/package-info.java |  23 +
 .../skylinestore/api/HistorySkylineStore.java   |  99 
 .../api/PredictionSkylineStore.java |  60 +++
 .../skylinestore/api/SkylineStore.java  |  30 ++
 .../skylinestore/api/package-info.java  |  23 +
 .../DuplicateRecurrenceIdException.java |  33 ++
 .../EmptyResourceSkylineException.java  |  33 ++
 .../exceptions/NullPipelineIdException.java |  32 ++
 ...ullRLESparseResourceAllocationException.java |  33 ++
 .../exceptions/NullRecurrenceIdException.java   |  32 ++
 .../NullResourceSkylineException.java   |  32 ++
 .../RecurrenceIdNotFoundException.java  |  33 ++
 .../exceptions/SkylineStoreException.java   |  33 ++
 .../skylinestore/exceptions/package-info.java   |  24 +
 .../skylinestore/impl/InMemoryStore.java| 256 ++
 .../skylinestore/impl/package-info.java |  23 +
 .../validator/SkylineStoreValidator.java| 118 +
 .../skylinestore/validator/package-info.java|  23 +
 .../resourceestimator/solver/api/Solver.java|  76 +++
 .../solver/api/package-info.java|  23 +
 .../exceptions/InvalidInputException.java   |  34 ++
 .../exceptions/InvalidSolverException.java  |  34 ++
 .../solver/exceptions/SolverException.java  |  34 ++
 .../solver/exceptions/package-info.java |  24 +
 .../solver/impl/BaseSolver.java |  94 
 .../resourceestimator/solver/impl/LpSolver.java | 340 ++
 .../solver/impl/package-info.java   |  23 +
 .../solver/preprocess/SolverPreprocessor.java   | 222 +
 .../solver/preprocess/package-info.java |  23 +
 .../translator/api/JobMetaData.java | 163 +++
 .../translator/api/LogParser.java   |  65 +++
 .../translator/api/SingleLineParser.java|  52 +++
 .../translator/api/package-info.java|  23 +
 .../exceptions/DataFieldNotFoundException.java  |  32 ++
 .../translator/exceptions/package-info.java |  23 +
 .../translator/impl/BaseLogParser.java  | 125 +
 .../translator/impl/LogParserUtil.java  |  97 
 .../translator/impl/NativeSingleLineParser.java | 120 +
 .../translator/impl/RmSingleLineParser.java | 203 
 .../translator/impl/package-info.java   |  23 +
 .../translator/validator/ParserValidator.java   |  41 ++
 .../translator/validator/package-info.java

[1/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-26 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7c26ae506 -> 9897538a9


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9897538a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
new file mode 100644
index 000..69ba480
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
@@ -0,0 +1,163 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.translator.api;
+
+import java.text.ParseException;
+
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.translator.impl.LogParserUtil;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test JobMetaData.
+ */
+public class TestJobMetaData {
+  /**
+   * TODO: parametrize this test.
+   */
+  private LogParserUtil logParserUtil = new LogParserUtil();
+
+  private JobMetaData jobMetaData;
+  private RecurrenceId recurrenceId;
+
+  @Before public final void setup() throws ParseException {
+recurrenceId = new RecurrenceId("Fraud Detection", "17/07/16 16:27:25");
+jobMetaData = new JobMetaData(
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:25"));
+jobMetaData.setRecurrenceId(recurrenceId);
+jobMetaData.setContainerStart("C1",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:30"));
+jobMetaData.setContainerEnd("C1",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:30"));
+jobMetaData.setContainerStart("C2",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:40"));
+jobMetaData.setContainerEnd("C2",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:40"));
+jobMetaData.setJobFinishTime(
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:45"));
+final Resource containerAlloc = Resource.newInstance(1, 1);
+jobMetaData.getResourceSkyline().setContainerSpec(containerAlloc);
+jobMetaData.getResourceSkyline().setJobInputDataSize(1024.5);
+jobMetaData.createSkyline();
+  }
+
+  @Test public final void testGetContainerSpec() {
+final Resource containerAlloc =
+jobMetaData.getResourceSkyline().getContainerSpec();
+final Resource containerAlloc2 = Resource.newInstance(1, 1);
+Assert.assertEquals(containerAlloc.getMemorySize(),
+containerAlloc2.getMemorySize());
+Assert.assertEquals(containerAlloc.getVirtualCores(),
+containerAlloc2.getVirtualCores());
+  }
+
+  @Test public final void testGetJobSize() {
+Assert.assertEquals(jobMetaData.getResourceSkyline().getJobInputDataSize(),
+1024.5, 0);
+  }
+
+  @Test public final void testGetRecurrenceeId() {
+final RecurrenceId recurrenceIdTest =
+new RecurrenceId("Fraud Detection", "17/07/16 16:27:25");
+Assert.assertEquals(recurrenceIdTest, jobMetaData.getRecurrenceId());
+  }
+
+  @Test public final void testStringToUnixTimestamp() throws ParseException {
+final long submissionTime =
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:25");
+
Assert.assertEquals(jobMetaData.getResourceSkyline().getJobSubmissionTime(),
+submissionTime);
+  }
+
+  @Test public final void testResourceSkyline() {
+final RLESparseResourceAllocation skylineList =
+jobMetaData.getResourceSkyline().getSkylineList();
+final int containerCPU =
+jobMetaData.getResourceSkyline().getContainerSpec().getVirtualCores();
+int k;
+for (k = 0; k < 5; k++) {
+  Assert.assertEquals(0,
+  skylineList.getCapacityAtTime(k).getVirtualCores() / co

[4/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-26 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9897538a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
new file mode 100644
index 000..92e
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -0,0 +1,238 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.service;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Type;
+import java.util.List;
+import java.util.Map;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
+import 
org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorConfiguration;
+import org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorUtil;
+import 
org.apache.hadoop.resourceestimator.common.exception.ResourceEstimatorException;
+import 
org.apache.hadoop.resourceestimator.common.serialization.RLESparseResourceAllocationSerDe;
+import org.apache.hadoop.resourceestimator.common.serialization.ResourceSerDe;
+import org.apache.hadoop.resourceestimator.skylinestore.api.SkylineStore;
+import 
org.apache.hadoop.resourceestimator.skylinestore.exceptions.SkylineStoreException;
+import org.apache.hadoop.resourceestimator.solver.api.Solver;
+import org.apache.hadoop.resourceestimator.solver.exceptions.SolverException;
+import org.apache.hadoop.resourceestimator.translator.api.LogParser;
+import org.apache.hadoop.resourceestimator.translator.impl.LogParserUtil;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.reflect.TypeToken;
+import com.google.inject.Singleton;
+
+/**
+ * Resource Estimator Service which provides a set of REST APIs for users to
+ * use the estimation service.
+ */
+@Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
+  private static final Logger LOGGER =
+  LoggerFactory.getLogger(ResourceEstimatorService.class);
+  private static SkylineStore skylineStore;
+  private static Solver solver;
+  private static LogParser logParser;
+  private static LogParserUtil logParserUtil = new LogParserUtil();
+  private static Configuration config;
+  private static Gson gson;
+  private static Type rleType;
+  private static Type skylineStoreType;
+
+  public ResourceEstimatorService() throws ResourceEstimatorException {
+if (skylineStore == null) {
+  try {
+config = new Configuration();
+config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
+skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
+ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
+SkylineStore.class);
+logParser = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.TRANSLATOR_PROVIDER,
+ResourceEstimatorConfiguration.DEFAULT_TRANSLATOR_PROVIDER,
+LogParser.class);
+logParser.init(config, skylineStore);
+logParserUtil.setLogParser(logParser);
+solver = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.SOLVER_PROVIDER,
+

hadoop git commit: YARN-5516. Add REST API for supporting recurring reservations. (Sean Po via Subru).

2017-10-26 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d92dddaf7 -> 9665971a6


YARN-5516. Add REST API for supporting recurring reservations. (Sean Po via 
Subru).

(cherry picked from commit 25932da6d1ee56299c8f9911576a42792c435407)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9665971a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9665971a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9665971a

Branch: refs/heads/branch-3.0
Commit: 9665971a6550cf0d0fee3edbc30f246dbfde8299
Parents: d92ddda
Author: Subru Krishnan 
Authored: Thu Oct 26 12:10:14 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Oct 26 12:11:08 2017 -0700

--
 .../yarn/api/records/ReservationDefinition.java |  12 +-
 .../impl/pb/ReservationDefinitionPBImpl.java|   5 +
 .../reservation/InMemoryPlan.java   |  48 +++-
 .../reservation/PlanContext.java|  11 +
 .../reservation/ReservationInputValidator.java  |   8 +
 .../resourcemanager/webapp/RMWebServices.java   |  14 +-
 .../webapp/dao/ReservationDefinitionInfo.java   |  12 +
 .../reservation/TestInMemoryPlan.java   | 261 +++
 .../TestReservationInputValidator.java  |  23 ++
 .../webapp/TestRMWebServicesReservation.java|  68 +++--
 .../src/test/resources/submit-reservation.json  |   1 +
 .../src/site/markdown/ResourceManagerRest.md|   1 +
 12 files changed, 429 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9665971a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
index bb9bca2..1fa7cfd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
@@ -155,7 +155,11 @@ public abstract class ReservationDefinition {
* are explicitly cancelled and have higher priority than non-periodic jobs
* (during initial placement and replanning). Periodic job allocations are
* consistent across runs (flexibility in allocation is leveraged only during
-   * initial placement, allocations remain consistent thereafter).
+   * initial placement, allocations remain consistent thereafter). Note that
+   * as a long, the recurrence expression must be greater than the duration of
+   * the reservation (deadline - arrival). Also note that the configured max
+   * period must be divisible by the recurrence expression if expressed as a
+   * long.
*
* @return recurrence of this reservation
*/
@@ -173,7 +177,11 @@ public abstract class ReservationDefinition {
* are explicitly cancelled and have higher priority than non-periodic jobs
* (during initial placement and replanning). Periodic job allocations are
* consistent across runs (flexibility in allocation is leveraged only during
-   * initial placement, allocations remain consistent thereafter).
+   * initial placement, allocations remain consistent thereafter). Note that
+   * as a long, the recurrence expression must be greater than the duration of
+   * the reservation (deadline - arrival). Also note that the configured max
+   * period must be divisible by the recurrence expression if expressed as a
+   * long.
*
* @param recurrenceExpression recurrence interval of this reservation
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9665971a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
index 49aef11..450f400 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
@@ -226,6 +226,11 @@ public class ReservationDefinitionPBImpl extends 
ReservationDefinit

hadoop git commit: YARN-5516. Add REST API for supporting recurring reservations. (Sean Po via Subru).

2017-10-26 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2da654e34 -> 25932da6d


YARN-5516. Add REST API for supporting recurring reservations. (Sean Po via 
Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25932da6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25932da6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25932da6

Branch: refs/heads/trunk
Commit: 25932da6d1ee56299c8f9911576a42792c435407
Parents: 2da654e
Author: Subru Krishnan 
Authored: Thu Oct 26 12:10:14 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Oct 26 12:10:14 2017 -0700

--
 .../yarn/api/records/ReservationDefinition.java |  12 +-
 .../impl/pb/ReservationDefinitionPBImpl.java|   5 +
 .../reservation/InMemoryPlan.java   |  48 +++-
 .../reservation/PlanContext.java|  11 +
 .../reservation/ReservationInputValidator.java  |   8 +
 .../resourcemanager/webapp/RMWebServices.java   |  14 +-
 .../webapp/dao/ReservationDefinitionInfo.java   |  12 +
 .../reservation/TestInMemoryPlan.java   | 261 +++
 .../TestReservationInputValidator.java  |  23 ++
 .../webapp/TestRMWebServicesReservation.java|  68 +++--
 .../src/test/resources/submit-reservation.json  |   1 +
 .../src/site/markdown/ResourceManagerRest.md|   1 +
 12 files changed, 429 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25932da6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
index bb9bca2..1fa7cfd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
@@ -155,7 +155,11 @@ public abstract class ReservationDefinition {
* are explicitly cancelled and have higher priority than non-periodic jobs
* (during initial placement and replanning). Periodic job allocations are
* consistent across runs (flexibility in allocation is leveraged only during
-   * initial placement, allocations remain consistent thereafter).
+   * initial placement, allocations remain consistent thereafter). Note that
+   * as a long, the recurrence expression must be greater than the duration of
+   * the reservation (deadline - arrival). Also note that the configured max
+   * period must be divisible by the recurrence expression if expressed as a
+   * long.
*
* @return recurrence of this reservation
*/
@@ -173,7 +177,11 @@ public abstract class ReservationDefinition {
* are explicitly cancelled and have higher priority than non-periodic jobs
* (during initial placement and replanning). Periodic job allocations are
* consistent across runs (flexibility in allocation is leveraged only during
-   * initial placement, allocations remain consistent thereafter).
+   * initial placement, allocations remain consistent thereafter). Note that
+   * as a long, the recurrence expression must be greater than the duration of
+   * the reservation (deadline - arrival). Also note that the configured max
+   * period must be divisible by the recurrence expression if expressed as a
+   * long.
*
* @param recurrenceExpression recurrence interval of this reservation
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25932da6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
index 49aef11..450f400 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
@@ -226,6 +226,11 @@ public class ReservationDefinitionPBImpl extends 
ReservationDefinition {
 
   @Override
   public void setRecurrenceExpression(String recurrenceExpress

[4/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/da7f989d/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
new file mode 100644
index 000..92e
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -0,0 +1,238 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.service;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Type;
+import java.util.List;
+import java.util.Map;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
+import 
org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorConfiguration;
+import org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorUtil;
+import 
org.apache.hadoop.resourceestimator.common.exception.ResourceEstimatorException;
+import 
org.apache.hadoop.resourceestimator.common.serialization.RLESparseResourceAllocationSerDe;
+import org.apache.hadoop.resourceestimator.common.serialization.ResourceSerDe;
+import org.apache.hadoop.resourceestimator.skylinestore.api.SkylineStore;
+import 
org.apache.hadoop.resourceestimator.skylinestore.exceptions.SkylineStoreException;
+import org.apache.hadoop.resourceestimator.solver.api.Solver;
+import org.apache.hadoop.resourceestimator.solver.exceptions.SolverException;
+import org.apache.hadoop.resourceestimator.translator.api.LogParser;
+import org.apache.hadoop.resourceestimator.translator.impl.LogParserUtil;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.reflect.TypeToken;
+import com.google.inject.Singleton;
+
+/**
+ * Resource Estimator Service which provides a set of REST APIs for users to
+ * use the estimation service.
+ */
+@Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
+  private static final Logger LOGGER =
+  LoggerFactory.getLogger(ResourceEstimatorService.class);
+  private static SkylineStore skylineStore;
+  private static Solver solver;
+  private static LogParser logParser;
+  private static LogParserUtil logParserUtil = new LogParserUtil();
+  private static Configuration config;
+  private static Gson gson;
+  private static Type rleType;
+  private static Type skylineStoreType;
+
+  public ResourceEstimatorService() throws ResourceEstimatorException {
+if (skylineStore == null) {
+  try {
+config = new Configuration();
+config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
+skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
+ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
+SkylineStore.class);
+logParser = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.TRANSLATOR_PROVIDER,
+ResourceEstimatorConfiguration.DEFAULT_TRANSLATOR_PROVIDER,
+LogParser.class);
+logParser.init(config, skylineStore);
+logParserUtil.setLogParser(logParser);
+solver = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.SOLVER_PROVIDER,
+

[3/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/da7f989d/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
new file mode 100644
index 000..c944d20
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
@@ -0,0 +1,340 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.solver.impl;
+
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
+import 
org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorConfiguration;
+import 
org.apache.hadoop.resourceestimator.skylinestore.api.PredictionSkylineStore;
+import 
org.apache.hadoop.resourceestimator.skylinestore.exceptions.SkylineStoreException;
+import org.apache.hadoop.resourceestimator.solver.api.Solver;
+import org.apache.hadoop.resourceestimator.solver.exceptions.SolverException;
+import 
org.apache.hadoop.resourceestimator.solver.preprocess.SolverPreprocessor;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
+import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
+import org.ojalgo.optimisation.Expression;
+import org.ojalgo.optimisation.ExpressionsBasedModel;
+import org.ojalgo.optimisation.Optimisation.Result;
+import org.ojalgo.optimisation.Variable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A LP(Linear Programming) solution to predict recurring pipeline's
+ * {@link Resource} requirements, and generate Hadoop {@code RDL} requests 
which
+ * will be used to make recurring resource reservation.
+ */
+public class LpSolver extends BaseSolver implements Solver {
+  private static final Logger LOGGER = LoggerFactory.getLogger(LpSolver.class);
+  private final SolverPreprocessor preprocessor = new SolverPreprocessor();
+  /**
+   * Controls the balance between over-allocation and under-allocation.
+   */
+  private double alpha;
+  /**
+   * Controls the generalization of the solver.
+   */
+  private double beta;
+  /**
+   * The minimum number of job runs required to run the solver.
+   */
+  private int minJobRuns;
+  /**
+   * The time interval which is used to discretize job execution.
+   */
+  private int timeInterval;
+  /**
+   * The PredictionSkylineStore to store the predicted ResourceSkyline for new
+   * run.
+   */
+  private PredictionSkylineStore predictionSkylineStore;
+
+  @Override public final void init(final Configuration config,
+  PredictionSkylineStore skylineStore) {
+this.alpha =
+config.getDouble(ResourceEstimatorConfiguration.SOLVER_ALPHA_KEY, 0.1);
+this.beta =
+config.getDouble(ResourceEstimatorConfiguration.SOLVER_BETA_KEY, 0.1);
+this.minJobRuns =
+config.getInt(ResourceEstimatorConfiguration.SOLVER_MIN_JOB_RUN_KEY, 
1);
+this.timeInterval =
+config.getInt(ResourceEstimatorConfiguration.TIME_INTERVAL_KEY, 5);
+this.predictionSkylineStore = skylineStore;
+  }
+
+  /**
+   * Generate over-allocation constraints.
+   *
+   * @param lpModelthe LP model.
+   * @param cJobITimeK actual container allocation for job i in time
+   *   interval k.
+   * @param oa container over-allocation.
+   * @param x  predicted container allocation.
+   * @param indexJobITimeK index for job i at time interval k.
+   * @param timeK  index for time interval k.
+ 

[1/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 04dfb4898 -> da7f989d8


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da7f989d/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
new file mode 100644
index 000..69ba480
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
@@ -0,0 +1,163 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.translator.api;
+
+import java.text.ParseException;
+
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.translator.impl.LogParserUtil;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test JobMetaData.
+ */
+public class TestJobMetaData {
+  /**
+   * TODO: parametrize this test.
+   */
+  private LogParserUtil logParserUtil = new LogParserUtil();
+
+  private JobMetaData jobMetaData;
+  private RecurrenceId recurrenceId;
+
+  @Before public final void setup() throws ParseException {
+recurrenceId = new RecurrenceId("Fraud Detection", "17/07/16 16:27:25");
+jobMetaData = new JobMetaData(
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:25"));
+jobMetaData.setRecurrenceId(recurrenceId);
+jobMetaData.setContainerStart("C1",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:30"));
+jobMetaData.setContainerEnd("C1",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:30"));
+jobMetaData.setContainerStart("C2",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:40"));
+jobMetaData.setContainerEnd("C2",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:40"));
+jobMetaData.setJobFinishTime(
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:45"));
+final Resource containerAlloc = Resource.newInstance(1, 1);
+jobMetaData.getResourceSkyline().setContainerSpec(containerAlloc);
+jobMetaData.getResourceSkyline().setJobInputDataSize(1024.5);
+jobMetaData.createSkyline();
+  }
+
+  @Test public final void testGetContainerSpec() {
+final Resource containerAlloc =
+jobMetaData.getResourceSkyline().getContainerSpec();
+final Resource containerAlloc2 = Resource.newInstance(1, 1);
+Assert.assertEquals(containerAlloc.getMemorySize(),
+containerAlloc2.getMemorySize());
+Assert.assertEquals(containerAlloc.getVirtualCores(),
+containerAlloc2.getVirtualCores());
+  }
+
+  @Test public final void testGetJobSize() {
+Assert.assertEquals(jobMetaData.getResourceSkyline().getJobInputDataSize(),
+1024.5, 0);
+  }
+
+  @Test public final void testGetRecurrenceeId() {
+final RecurrenceId recurrenceIdTest =
+new RecurrenceId("Fraud Detection", "17/07/16 16:27:25");
+Assert.assertEquals(recurrenceIdTest, jobMetaData.getRecurrenceId());
+  }
+
+  @Test public final void testStringToUnixTimestamp() throws ParseException {
+final long submissionTime =
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:25");
+
Assert.assertEquals(jobMetaData.getResourceSkyline().getJobSubmissionTime(),
+submissionTime);
+  }
+
+  @Test public final void testResourceSkyline() {
+final RLESparseResourceAllocation skylineList =
+jobMetaData.getResourceSkyline().getSkylineList();
+final int containerCPU =
+jobMetaData.getResourceSkyline().getContainerSpec().getVirtualCores();
+int k;
+for (k = 0; k < 5; k++) {
+  Assert.assertEquals(0,
+  skylineList.getCapacityAtTime(k).getVirtualCores() / co

[5/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
HADOOP-14840. Tool to estimate resource requirements of an application pipeline 
based on prior executions. (Rui Li via Subru).

(cherry picked from commit 625039ef20e6011ab360131d70582a6e4bf2ec1d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da7f989d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da7f989d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da7f989d

Branch: refs/heads/branch-2
Commit: da7f989d86d28ee6f4a7ac410fc8a4a163e11012
Parents: 04dfb48
Author: Subru Krishnan 
Authored: Wed Oct 25 15:51:27 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Oct 25 17:05:35 2017 -0700

--
 LICENSE.txt |   1 +
 .../assemblies/hadoop-resourceestimator.xml |  42 ++
 .../main/resources/assemblies/hadoop-tools.xml  |  11 +
 hadoop-project/pom.xml  |   5 +
 hadoop-project/src/site/site.xml|   1 +
 hadoop-tools/hadoop-resourceestimator/README.md |  19 +
 hadoop-tools/hadoop-resourceestimator/pom.xml   | 175 +++
 .../src/config/checkstyle.xml   |  50 ++
 .../src/main/bin/estimator.cmd  |  52 +++
 .../src/main/bin/estimator.sh   |  71 +++
 .../src/main/bin/start-estimator.cmd|  37 ++
 .../src/main/bin/start-estimator.sh |  42 ++
 .../src/main/bin/stop-estimator.cmd |  37 ++
 .../src/main/bin/stop-estimator.sh  |  42 ++
 .../src/main/conf/resourceestimator-config.xml  |  85 
 .../src/main/data/resourceEstimatorService.txt  |   2 +
 .../common/api/RecurrenceId.java|  95 
 .../common/api/ResourceSkyline.java | 211 +
 .../common/api/package-info.java|  23 +
 .../config/ResourceEstimatorConfiguration.java  | 125 +
 .../common/config/ResourceEstimatorUtil.java|  81 
 .../common/config/package-info.java |  23 +
 .../exception/ResourceEstimatorException.java   |  35 ++
 .../common/exception/package-info.java  |  23 +
 .../RLESparseResourceAllocationSerDe.java   |  77 +++
 .../common/serialization/ResourceSerDe.java |  61 +++
 .../common/serialization/package-info.java  |  24 +
 .../service/ResourceEstimatorServer.java| 146 ++
 .../service/ResourceEstimatorService.java   | 238 ++
 .../resourceestimator/service/ShutdownHook.java |  45 ++
 .../resourceestimator/service/package-info.java |  23 +
 .../skylinestore/api/HistorySkylineStore.java   |  99 
 .../api/PredictionSkylineStore.java |  60 +++
 .../skylinestore/api/SkylineStore.java  |  30 ++
 .../skylinestore/api/package-info.java  |  23 +
 .../DuplicateRecurrenceIdException.java |  33 ++
 .../EmptyResourceSkylineException.java  |  33 ++
 .../exceptions/NullPipelineIdException.java |  32 ++
 ...ullRLESparseResourceAllocationException.java |  33 ++
 .../exceptions/NullRecurrenceIdException.java   |  32 ++
 .../NullResourceSkylineException.java   |  32 ++
 .../RecurrenceIdNotFoundException.java  |  33 ++
 .../exceptions/SkylineStoreException.java   |  33 ++
 .../skylinestore/exceptions/package-info.java   |  24 +
 .../skylinestore/impl/InMemoryStore.java| 256 ++
 .../skylinestore/impl/package-info.java |  23 +
 .../validator/SkylineStoreValidator.java| 118 +
 .../skylinestore/validator/package-info.java|  23 +
 .../resourceestimator/solver/api/Solver.java|  76 +++
 .../solver/api/package-info.java|  23 +
 .../exceptions/InvalidInputException.java   |  34 ++
 .../exceptions/InvalidSolverException.java  |  34 ++
 .../solver/exceptions/SolverException.java  |  34 ++
 .../solver/exceptions/package-info.java |  24 +
 .../solver/impl/BaseSolver.java |  94 
 .../resourceestimator/solver/impl/LpSolver.java | 340 ++
 .../solver/impl/package-info.java   |  23 +
 .../solver/preprocess/SolverPreprocessor.java   | 219 +
 .../solver/preprocess/package-info.java |  23 +
 .../translator/api/JobMetaData.java | 163 +++
 .../translator/api/LogParser.java   |  65 +++
 .../translator/api/SingleLineParser.java|  52 +++
 .../translator/api/package-info.java|  23 +
 .../exceptions/DataFieldNotFoundException.java  |  32 ++
 .../translator/exceptions/package-info.java |  23 +
 .../translator/impl/BaseLogParser.java  | 125 +
 .../translator/impl/LogParserUtil.java  |  97 
 .../translator/impl/NativeSingleLineParser.java | 120 +
 .../translator/impl/RmSingleLineParser.java | 203 
 .../translator/impl/package-info.java   |  23 +
 .../translator/validator/ParserValidator.java   |  41 ++
 .../translator/validator/package-info.java

[2/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/da7f989d/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md 
b/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
new file mode 100644
index 000..12f8dd5
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
@@ -0,0 +1,181 @@
+
+
+Resource Estimator Service
+==
+
+* [Resource Estimator Service](#Resource_Estimator_Service)
+* [Overview](#Overview)
+* [Motivation](#Motivation)
+* [Goals](#Goals)
+* [Architecture](#Architecture)
+* [Usage](#Usage)
+* [Example](#Example)
+* [Advanced Configuration](#AdvancedConfig)
+* [Future work](#Future)
+
+Overview
+
+
+### Motivation
+Estimating job resource requirements remains an important and challenging 
problem for enterprise clusters. This is amplified by the ever-increasing 
complexity of workloads, i.e. from traditional batch jobs to interactive 
queries to streaming and recently machine learning jobs. This results in jobs 
relying on multiple computation frameworks such as Tez, MapReduce, Spark, etc., 
and the problem is further compounded by sharing nature of the clusters. 
Current state-of-art solution relies on user expertise to make resource 
requirement estimations for the jobs (for e.g.: number of reducers or container 
memory size, etc.), which is both tedious and inefficient.
+
+Based on the analysis of our cluster workloads, we observe that a large 
portion of jobs (more than 60%) are recurring jobs, giving us the opportunity 
to automatically estimate job resource requirements based on job's history 
runs. It is worth noting that jobs usually come from different computation 
frameworks, and the version may change across runs as well. Therefore, we want 
to come up with a framework agnostic black-box solution to automatically make 
resource requirement estimation for the recurring jobs.
+
+### Goals
+
+*   For a periodic job, analyze its history logs and predict its resource 
requirement for the new run.
+*   Support various types of job logs.
+*   Scale to terabytes of job logs.
+
+### Architecture
+
+The following figure illustrates the implementation architecture of the 
resource estimator.
+
+![The architecture of the resource 
estimator](images/resourceestimator_arch.png)
+
+Hadoop-resourceestimator mainly consists of three modules: Translator, 
SkylineStore and Estimator.
+
+1. `ResourceSkyline` is used to characterize job's resource utilizations 
during its lifespan. More specifically, it uses `RLESparseResourceAllocation` 
()
 to record the container allocation information. `RecurrenceId` is used to 
identify a specific run of a recurring pipeline. A pipeline could consist of 
multiple jobs, each has a `ResourceSkyline` to characterize its resource 
utilization.
+2. `Translator` parses the job logs, extracts their `ResourceSkylines` and 
stores them to the SkylineStore. `SingleLineParser` parses one line in the log 
stream and extract the `ResourceSkyline`. `LogParser` recursively parses each 
line in the log stream using `SingleLineParser`. Note that logs could have 
different storage formats, so `LogParser` takes a stream of strings as input, 
instead of File or other formats. Since job logs may have various formats thus 
require different `SingleLineParser` implementations, `LogParser` initiates the 
`SingleLineParser` based on user configuration. Currently 
Hadoop-resourceestimator provides two implementations for `SingleLineParser`: 
`NativeSingleLineParser` supports an optimized native format, and 
`RMSingleLineParser` parses the YARN ResourceManager logs generated in Hadoop 
systems since RM logs are widely available (in production deployments).
+3. `SkylineStore` serves as the storage layer for Hadoop-resourceestimator and 
consists of 2 parts. `HistorySkylineStore` stores the `ResourceSkylines` 
extracted by the `Translator`. It supports four actions: addHistory, 
deleteHistory, updateHistory and getHistory. addHistory appends new 
`ResourceSkylines` to the recurring pipelines, while updateHistory deletes all 
the `ResourceSkylines` of a specific recurring pipeline, and re-insert new 
`ResourceSkylines`. `PredictionSkylineStore` stores the predicted 
`RLESparseResourceAllocation` generated by the Estimator. It supports two 
actions: addEstimation and getEstimation.
+
+Currently Hadoop-resourceestimator provides in-memory implementation for 
the SkylineStore.
+4. `Estimator` predicts re

hadoop git commit: Reverting to 6aa75106812007eeac07ce09ef4e0a4a44f056aa as YARN-4849 pushed a stale LICENSE file.

2017-10-25 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 74aee8673 -> 04dfb4898


Reverting to 6aa75106812007eeac07ce09ef4e0a4a44f056aa as YARN-4849 pushed a 
stale LICENSE file.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04dfb489
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04dfb489
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04dfb489

Branch: refs/heads/branch-2
Commit: 04dfb4898da921b812af5e083d65274cb4948ce1
Parents: 74aee86
Author: Subru Krishnan 
Authored: Wed Oct 25 16:47:58 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Oct 25 16:47:58 2017 -0700

--
 LICENSE.txt | 645 ++-
 1 file changed, 65 insertions(+), 580 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04dfb489/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 04d2daa..ebabcf9 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -246,11 +246,48 @@ For the org.apache.hadoop.util.bloom.* classes:
 For portions of the native implementation of slicing-by-8 CRC calculation
 in src/main/native/src/org/apache/hadoop/util:
 
-/**
- *   Copyright 2008,2009,2010 Massachusetts Institute of Technology.
- *   All rights reserved. Use of this source code is governed by a
- *   BSD-style license that can be found in the LICENSE file.
- */
+Copyright (c) 2008,2009,2010 Massachusetts Institute of Technology.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+* Neither the name of the Massachusetts Institute of Technology nor
+  the names of its contributors may be used to endorse or promote
+  products derived from this software without specific prior written
+  permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Other portions are under the same license from Intel:
+http://sourceforge.net/projects/slicing-by-8/
+/*++
+ *
+ * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
+ *
+ * This software program is licensed subject to the BSD License, 
+ * available at http://www.opensource.org/licenses/bsd-license.html
+ *
+ * Abstract: The main routine
+ * 
+ --*/
 
 For 
src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,lz4hc.c},
 
@@ -289,7 +326,7 @@ For 
src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,l
 */
 
 
-For 
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/gtest
+For hadoop-common-project/hadoop-common/src/main/native/gtest
 -
 Copyright 2008, Google Inc.
 All rights reserved.
@@ -320,43 +357,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
LIABILITY, OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-The binary distribution of this product bundles these dependencies under the
-following license:
-re2j 1.0
--
-This is a work derived from Russ Cox's RE2 in Go, whose license
-http://golang.org/LICENSE is as follows:
-
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
-   * Redistributions in binary form must reproduce the above copy

[3/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/82c9b3bb/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
new file mode 100644
index 000..c944d20
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
@@ -0,0 +1,340 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.solver.impl;
+
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
+import 
org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorConfiguration;
+import 
org.apache.hadoop.resourceestimator.skylinestore.api.PredictionSkylineStore;
+import 
org.apache.hadoop.resourceestimator.skylinestore.exceptions.SkylineStoreException;
+import org.apache.hadoop.resourceestimator.solver.api.Solver;
+import org.apache.hadoop.resourceestimator.solver.exceptions.SolverException;
+import 
org.apache.hadoop.resourceestimator.solver.preprocess.SolverPreprocessor;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
+import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
+import org.ojalgo.optimisation.Expression;
+import org.ojalgo.optimisation.ExpressionsBasedModel;
+import org.ojalgo.optimisation.Optimisation.Result;
+import org.ojalgo.optimisation.Variable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A LP(Linear Programming) solution to predict recurring pipeline's
+ * {@link Resource} requirements, and generate Hadoop {@code RDL} requests 
which
+ * will be used to make recurring resource reservation.
+ */
+public class LpSolver extends BaseSolver implements Solver {
+  private static final Logger LOGGER = LoggerFactory.getLogger(LpSolver.class);
+  private final SolverPreprocessor preprocessor = new SolverPreprocessor();
+  /**
+   * Controls the balance between over-allocation and under-allocation.
+   */
+  private double alpha;
+  /**
+   * Controls the generalization of the solver.
+   */
+  private double beta;
+  /**
+   * The minimum number of job runs required to run the solver.
+   */
+  private int minJobRuns;
+  /**
+   * The time interval which is used to discretize job execution.
+   */
+  private int timeInterval;
+  /**
+   * The PredictionSkylineStore to store the predicted ResourceSkyline for new
+   * run.
+   */
+  private PredictionSkylineStore predictionSkylineStore;
+
+  @Override public final void init(final Configuration config,
+  PredictionSkylineStore skylineStore) {
+this.alpha =
+config.getDouble(ResourceEstimatorConfiguration.SOLVER_ALPHA_KEY, 0.1);
+this.beta =
+config.getDouble(ResourceEstimatorConfiguration.SOLVER_BETA_KEY, 0.1);
+this.minJobRuns =
+config.getInt(ResourceEstimatorConfiguration.SOLVER_MIN_JOB_RUN_KEY, 
1);
+this.timeInterval =
+config.getInt(ResourceEstimatorConfiguration.TIME_INTERVAL_KEY, 5);
+this.predictionSkylineStore = skylineStore;
+  }
+
+  /**
+   * Generate over-allocation constraints.
+   *
+   * @param lpModelthe LP model.
+   * @param cJobITimeK actual container allocation for job i in time
+   *   interval k.
+   * @param oa container over-allocation.
+   * @param x  predicted container allocation.
+   * @param indexJobITimeK index for job i at time interval k.
+   * @param timeK  index for time interval k.
+ 

[1/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 926b18887 -> 82c9b3bbb


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82c9b3bb/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
new file mode 100644
index 000..69ba480
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
@@ -0,0 +1,163 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.translator.api;
+
+import java.text.ParseException;
+
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.translator.impl.LogParserUtil;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test JobMetaData.
+ */
+public class TestJobMetaData {
+  /**
+   * TODO: parametrize this test.
+   */
+  private LogParserUtil logParserUtil = new LogParserUtil();
+
+  private JobMetaData jobMetaData;
+  private RecurrenceId recurrenceId;
+
+  @Before public final void setup() throws ParseException {
+recurrenceId = new RecurrenceId("Fraud Detection", "17/07/16 16:27:25");
+jobMetaData = new JobMetaData(
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:25"));
+jobMetaData.setRecurrenceId(recurrenceId);
+jobMetaData.setContainerStart("C1",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:30"));
+jobMetaData.setContainerEnd("C1",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:30"));
+jobMetaData.setContainerStart("C2",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:40"));
+jobMetaData.setContainerEnd("C2",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:40"));
+jobMetaData.setJobFinishTime(
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:45"));
+final Resource containerAlloc = Resource.newInstance(1, 1);
+jobMetaData.getResourceSkyline().setContainerSpec(containerAlloc);
+jobMetaData.getResourceSkyline().setJobInputDataSize(1024.5);
+jobMetaData.createSkyline();
+  }
+
+  @Test public final void testGetContainerSpec() {
+final Resource containerAlloc =
+jobMetaData.getResourceSkyline().getContainerSpec();
+final Resource containerAlloc2 = Resource.newInstance(1, 1);
+Assert.assertEquals(containerAlloc.getMemorySize(),
+containerAlloc2.getMemorySize());
+Assert.assertEquals(containerAlloc.getVirtualCores(),
+containerAlloc2.getVirtualCores());
+  }
+
+  @Test public final void testGetJobSize() {
+Assert.assertEquals(jobMetaData.getResourceSkyline().getJobInputDataSize(),
+1024.5, 0);
+  }
+
+  @Test public final void testGetRecurrenceeId() {
+final RecurrenceId recurrenceIdTest =
+new RecurrenceId("Fraud Detection", "17/07/16 16:27:25");
+Assert.assertEquals(recurrenceIdTest, jobMetaData.getRecurrenceId());
+  }
+
+  @Test public final void testStringToUnixTimestamp() throws ParseException {
+final long submissionTime =
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:25");
+
Assert.assertEquals(jobMetaData.getResourceSkyline().getJobSubmissionTime(),
+submissionTime);
+  }
+
+  @Test public final void testResourceSkyline() {
+final RLESparseResourceAllocation skylineList =
+jobMetaData.getResourceSkyline().getSkylineList();
+final int containerCPU =
+jobMetaData.getResourceSkyline().getContainerSpec().getVirtualCores();
+int k;
+for (k = 0; k < 5; k++) {
+  Assert.assertEquals(0,
+  skylineList.getCapacityAtTime(k).getVirtualCores() / 

[5/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
HADOOP-14840. Tool to estimate resource requirements of an application pipeline 
based on prior executions. (Rui Li via Subru).

(cherry picked from commit 625039ef20e6011ab360131d70582a6e4bf2ec1d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82c9b3bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82c9b3bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82c9b3bb

Branch: refs/heads/branch-3.0
Commit: 82c9b3bbb58d305655399b8c6a9266a16a456913
Parents: 926b188
Author: Subru Krishnan 
Authored: Wed Oct 25 15:51:27 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Oct 25 16:01:13 2017 -0700

--
 LICENSE.txt |   1 +
 .../assemblies/hadoop-resourceestimator.xml |  42 ++
 .../main/resources/assemblies/hadoop-tools.xml  |  11 +
 hadoop-project/pom.xml  |   5 +
 hadoop-project/src/site/site.xml|   1 +
 hadoop-tools/hadoop-resourceestimator/README.md |  19 +
 hadoop-tools/hadoop-resourceestimator/pom.xml   | 174 +++
 .../src/config/checkstyle.xml   |  50 ++
 .../src/main/bin/estimator.cmd  |  52 +++
 .../src/main/bin/estimator.sh   |  71 +++
 .../src/main/bin/start-estimator.cmd|  37 ++
 .../src/main/bin/start-estimator.sh |  42 ++
 .../src/main/bin/stop-estimator.cmd |  37 ++
 .../src/main/bin/stop-estimator.sh  |  42 ++
 .../src/main/conf/resourceestimator-config.xml  |  85 
 .../src/main/data/resourceEstimatorService.txt  |   2 +
 .../common/api/RecurrenceId.java|  95 
 .../common/api/ResourceSkyline.java | 211 +
 .../common/api/package-info.java|  23 +
 .../config/ResourceEstimatorConfiguration.java  | 125 +
 .../common/config/ResourceEstimatorUtil.java|  81 
 .../common/config/package-info.java |  23 +
 .../exception/ResourceEstimatorException.java   |  35 ++
 .../common/exception/package-info.java  |  23 +
 .../RLESparseResourceAllocationSerDe.java   |  77 +++
 .../common/serialization/ResourceSerDe.java |  61 +++
 .../common/serialization/package-info.java  |  24 +
 .../service/ResourceEstimatorServer.java| 146 ++
 .../service/ResourceEstimatorService.java   | 238 ++
 .../resourceestimator/service/ShutdownHook.java |  45 ++
 .../resourceestimator/service/package-info.java |  23 +
 .../skylinestore/api/HistorySkylineStore.java   |  99 
 .../api/PredictionSkylineStore.java |  60 +++
 .../skylinestore/api/SkylineStore.java  |  30 ++
 .../skylinestore/api/package-info.java  |  23 +
 .../DuplicateRecurrenceIdException.java |  33 ++
 .../EmptyResourceSkylineException.java  |  33 ++
 .../exceptions/NullPipelineIdException.java |  32 ++
 ...ullRLESparseResourceAllocationException.java |  33 ++
 .../exceptions/NullRecurrenceIdException.java   |  32 ++
 .../NullResourceSkylineException.java   |  32 ++
 .../RecurrenceIdNotFoundException.java  |  33 ++
 .../exceptions/SkylineStoreException.java   |  33 ++
 .../skylinestore/exceptions/package-info.java   |  24 +
 .../skylinestore/impl/InMemoryStore.java| 256 ++
 .../skylinestore/impl/package-info.java |  23 +
 .../validator/SkylineStoreValidator.java| 118 +
 .../skylinestore/validator/package-info.java|  23 +
 .../resourceestimator/solver/api/Solver.java|  76 +++
 .../solver/api/package-info.java|  23 +
 .../exceptions/InvalidInputException.java   |  34 ++
 .../exceptions/InvalidSolverException.java  |  34 ++
 .../solver/exceptions/SolverException.java  |  34 ++
 .../solver/exceptions/package-info.java |  24 +
 .../solver/impl/BaseSolver.java |  94 
 .../resourceestimator/solver/impl/LpSolver.java | 340 ++
 .../solver/impl/package-info.java   |  23 +
 .../solver/preprocess/SolverPreprocessor.java   | 219 +
 .../solver/preprocess/package-info.java |  23 +
 .../translator/api/JobMetaData.java | 163 +++
 .../translator/api/LogParser.java   |  65 +++
 .../translator/api/SingleLineParser.java|  52 +++
 .../translator/api/package-info.java|  23 +
 .../exceptions/DataFieldNotFoundException.java  |  32 ++
 .../translator/exceptions/package-info.java |  23 +
 .../translator/impl/BaseLogParser.java  | 125 +
 .../translator/impl/LogParserUtil.java  |  97 
 .../translator/impl/NativeSingleLineParser.java | 120 +
 .../translator/impl/RmSingleLineParser.java | 203 
 .../translator/impl/package-info.java   |  23 +
 .../translator/validator/ParserValidator.java   |  41 ++
 .../translator/validator/package-info.java

[4/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/82c9b3bb/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
new file mode 100644
index 000..92e
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -0,0 +1,238 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.service;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Type;
+import java.util.List;
+import java.util.Map;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
+import 
org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorConfiguration;
+import org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorUtil;
+import 
org.apache.hadoop.resourceestimator.common.exception.ResourceEstimatorException;
+import 
org.apache.hadoop.resourceestimator.common.serialization.RLESparseResourceAllocationSerDe;
+import org.apache.hadoop.resourceestimator.common.serialization.ResourceSerDe;
+import org.apache.hadoop.resourceestimator.skylinestore.api.SkylineStore;
+import 
org.apache.hadoop.resourceestimator.skylinestore.exceptions.SkylineStoreException;
+import org.apache.hadoop.resourceestimator.solver.api.Solver;
+import org.apache.hadoop.resourceestimator.solver.exceptions.SolverException;
+import org.apache.hadoop.resourceestimator.translator.api.LogParser;
+import org.apache.hadoop.resourceestimator.translator.impl.LogParserUtil;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.reflect.TypeToken;
+import com.google.inject.Singleton;
+
+/**
+ * Resource Estimator Service which provides a set of REST APIs for users to
+ * use the estimation service.
+ */
+@Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
+  private static final Logger LOGGER =
+  LoggerFactory.getLogger(ResourceEstimatorService.class);
+  private static SkylineStore skylineStore;
+  private static Solver solver;
+  private static LogParser logParser;
+  private static LogParserUtil logParserUtil = new LogParserUtil();
+  private static Configuration config;
+  private static Gson gson;
+  private static Type rleType;
+  private static Type skylineStoreType;
+
+  public ResourceEstimatorService() throws ResourceEstimatorException {
+if (skylineStore == null) {
+  try {
+config = new Configuration();
+config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
+skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
+ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
+SkylineStore.class);
+logParser = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.TRANSLATOR_PROVIDER,
+ResourceEstimatorConfiguration.DEFAULT_TRANSLATOR_PROVIDER,
+LogParser.class);
+logParser.init(config, skylineStore);
+logParserUtil.setLogParser(logParser);
+solver = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.SOLVER_PROVIDER,
+

[2/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/82c9b3bb/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md 
b/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
new file mode 100644
index 000..12f8dd5
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
@@ -0,0 +1,181 @@
+
+
+Resource Estimator Service
+==
+
+* [Resource Estimator Service](#Resource_Estimator_Service)
+* [Overview](#Overview)
+* [Motivation](#Motivation)
+* [Goals](#Goals)
+* [Architecture](#Architecture)
+* [Usage](#Usage)
+* [Example](#Example)
+* [Advanced Configuration](#AdvancedConfig)
+* [Future work](#Future)
+
+Overview
+
+
+### Motivation
+Estimating job resource requirements remains an important and challenging 
problem for enterprise clusters. This is amplified by the ever-increasing 
complexity of workloads, i.e. from traditional batch jobs to interactive 
queries to streaming and recently machine learning jobs. This results in jobs 
relying on multiple computation frameworks such as Tez, MapReduce, Spark, etc., 
and the problem is further compounded by sharing nature of the clusters. 
Current state-of-art solution relies on user expertise to make resource 
requirement estimations for the jobs (for e.g.: number of reducers or container 
memory size, etc.), which is both tedious and inefficient.
+
+Based on the analysis of our cluster workloads, we observe that a large 
portion of jobs (more than 60%) are recurring jobs, giving us the opportunity 
to automatically estimate job resource requirements based on job's history 
runs. It is worth noting that jobs usually come from different computation 
frameworks, and the version may change across runs as well. Therefore, we want 
to come up with a framework agnostic black-box solution to automatically make 
resource requirement estimation for the recurring jobs.
+
+### Goals
+
+*   For a periodic job, analyze its history logs and predict its resource 
requirement for the new run.
+*   Support various types of job logs.
+*   Scale to terabytes of job logs.
+
+### Architecture
+
+The following figure illustrates the implementation architecture of the 
resource estimator.
+
+![The architecture of the resource 
estimator](images/resourceestimator_arch.png)
+
+Hadoop-resourceestimator mainly consists of three modules: Translator, 
SkylineStore and Estimator.
+
+1. `ResourceSkyline` is used to characterize job's resource utilizations 
during its lifespan. More specifically, it uses `RLESparseResourceAllocation` 
()
 to record the container allocation information. `RecurrenceId` is used to 
identify a specific run of a recurring pipeline. A pipeline could consist of 
multiple jobs, each has a `ResourceSkyline` to characterize its resource 
utilization.
+2. `Translator` parses the job logs, extracts their `ResourceSkylines` and 
stores them to the SkylineStore. `SingleLineParser` parses one line in the log 
stream and extract the `ResourceSkyline`. `LogParser` recursively parses each 
line in the log stream using `SingleLineParser`. Note that logs could have 
different storage formats, so `LogParser` takes a stream of strings as input, 
instead of File or other formats. Since job logs may have various formats thus 
require different `SingleLineParser` implementations, `LogParser` initiates the 
`SingleLineParser` based on user configuration. Currently 
Hadoop-resourceestimator provides two implementations for `SingleLineParser`: 
`NativeSingleLineParser` supports an optimized native format, and 
`RMSingleLineParser` parses the YARN ResourceManager logs generated in Hadoop 
systems since RM logs are widely available (in production deployments).
+3. `SkylineStore` serves as the storage layer for Hadoop-resourceestimator and 
consists of 2 parts. `HistorySkylineStore` stores the `ResourceSkylines` 
extracted by the `Translator`. It supports four actions: addHistory, 
deleteHistory, updateHistory and getHistory. addHistory appends new 
`ResourceSkylines` to the recurring pipelines, while updateHistory deletes all 
the `ResourceSkylines` of a specific recurring pipeline, and re-insert new 
`ResourceSkylines`. `PredictionSkylineStore` stores the predicted 
`RLESparseResourceAllocation` generated by the Estimator. It supports two 
actions: addEstimation and getEstimation.
+
+Currently Hadoop-resourceestimator provides in-memory implementation for 
the SkylineStore.
+4. `Estimator` predicts re

[3/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/625039ef/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
new file mode 100644
index 000..c944d20
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java
@@ -0,0 +1,340 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.solver.impl;
+
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
+import 
org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorConfiguration;
+import 
org.apache.hadoop.resourceestimator.skylinestore.api.PredictionSkylineStore;
+import 
org.apache.hadoop.resourceestimator.skylinestore.exceptions.SkylineStoreException;
+import org.apache.hadoop.resourceestimator.solver.api.Solver;
+import org.apache.hadoop.resourceestimator.solver.exceptions.SolverException;
+import 
org.apache.hadoop.resourceestimator.solver.preprocess.SolverPreprocessor;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
+import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
+import org.ojalgo.optimisation.Expression;
+import org.ojalgo.optimisation.ExpressionsBasedModel;
+import org.ojalgo.optimisation.Optimisation.Result;
+import org.ojalgo.optimisation.Variable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A LP(Linear Programming) solution to predict recurring pipeline's
+ * {@link Resource} requirements, and generate Hadoop {@code RDL} requests 
which
+ * will be used to make recurring resource reservation.
+ */
+public class LpSolver extends BaseSolver implements Solver {
+  private static final Logger LOGGER = LoggerFactory.getLogger(LpSolver.class);
+  private final SolverPreprocessor preprocessor = new SolverPreprocessor();
+  /**
+   * Controls the balance between over-allocation and under-allocation.
+   */
+  private double alpha;
+  /**
+   * Controls the generalization of the solver.
+   */
+  private double beta;
+  /**
+   * The minimum number of job runs required to run the solver.
+   */
+  private int minJobRuns;
+  /**
+   * The time interval which is used to discretize job execution.
+   */
+  private int timeInterval;
+  /**
+   * The PredictionSkylineStore to store the predicted ResourceSkyline for new
+   * run.
+   */
+  private PredictionSkylineStore predictionSkylineStore;
+
+  @Override public final void init(final Configuration config,
+  PredictionSkylineStore skylineStore) {
+this.alpha =
+config.getDouble(ResourceEstimatorConfiguration.SOLVER_ALPHA_KEY, 0.1);
+this.beta =
+config.getDouble(ResourceEstimatorConfiguration.SOLVER_BETA_KEY, 0.1);
+this.minJobRuns =
+config.getInt(ResourceEstimatorConfiguration.SOLVER_MIN_JOB_RUN_KEY, 
1);
+this.timeInterval =
+config.getInt(ResourceEstimatorConfiguration.TIME_INTERVAL_KEY, 5);
+this.predictionSkylineStore = skylineStore;
+  }
+
+  /**
+   * Generate over-allocation constraints.
+   *
+   * @param lpModelthe LP model.
+   * @param cJobITimeK actual container allocation for job i in time
+   *   interval k.
+   * @param oa container over-allocation.
+   * @param x  predicted container allocation.
+   * @param indexJobITimeK index for job i at time interval k.
+   * @param timeK  index for time interval k.
+ 

[5/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
HADOOP-14840. Tool to estimate resource requirements of an application pipeline 
based on prior executions. (Rui Li via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/625039ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/625039ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/625039ef

Branch: refs/heads/trunk
Commit: 625039ef20e6011ab360131d70582a6e4bf2ec1d
Parents: 3fae675
Author: Subru Krishnan 
Authored: Wed Oct 25 15:51:27 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Oct 25 15:51:27 2017 -0700

--
 LICENSE.txt |   1 +
 .../assemblies/hadoop-resourceestimator.xml |  42 ++
 .../main/resources/assemblies/hadoop-tools.xml  |  11 +
 hadoop-project/pom.xml  |   5 +
 hadoop-project/src/site/site.xml|   1 +
 hadoop-tools/hadoop-resourceestimator/README.md |  19 +
 hadoop-tools/hadoop-resourceestimator/pom.xml   | 174 +++
 .../src/config/checkstyle.xml   |  50 ++
 .../src/main/bin/estimator.cmd  |  52 +++
 .../src/main/bin/estimator.sh   |  71 +++
 .../src/main/bin/start-estimator.cmd|  37 ++
 .../src/main/bin/start-estimator.sh |  42 ++
 .../src/main/bin/stop-estimator.cmd |  37 ++
 .../src/main/bin/stop-estimator.sh  |  42 ++
 .../src/main/conf/resourceestimator-config.xml  |  85 
 .../src/main/data/resourceEstimatorService.txt  |   2 +
 .../common/api/RecurrenceId.java|  95 
 .../common/api/ResourceSkyline.java | 211 +
 .../common/api/package-info.java|  23 +
 .../config/ResourceEstimatorConfiguration.java  | 125 +
 .../common/config/ResourceEstimatorUtil.java|  81 
 .../common/config/package-info.java |  23 +
 .../exception/ResourceEstimatorException.java   |  35 ++
 .../common/exception/package-info.java  |  23 +
 .../RLESparseResourceAllocationSerDe.java   |  77 +++
 .../common/serialization/ResourceSerDe.java |  61 +++
 .../common/serialization/package-info.java  |  24 +
 .../service/ResourceEstimatorServer.java| 146 ++
 .../service/ResourceEstimatorService.java   | 238 ++
 .../resourceestimator/service/ShutdownHook.java |  45 ++
 .../resourceestimator/service/package-info.java |  23 +
 .../skylinestore/api/HistorySkylineStore.java   |  99 
 .../api/PredictionSkylineStore.java |  60 +++
 .../skylinestore/api/SkylineStore.java  |  30 ++
 .../skylinestore/api/package-info.java  |  23 +
 .../DuplicateRecurrenceIdException.java |  33 ++
 .../EmptyResourceSkylineException.java  |  33 ++
 .../exceptions/NullPipelineIdException.java |  32 ++
 ...ullRLESparseResourceAllocationException.java |  33 ++
 .../exceptions/NullRecurrenceIdException.java   |  32 ++
 .../NullResourceSkylineException.java   |  32 ++
 .../RecurrenceIdNotFoundException.java  |  33 ++
 .../exceptions/SkylineStoreException.java   |  33 ++
 .../skylinestore/exceptions/package-info.java   |  24 +
 .../skylinestore/impl/InMemoryStore.java| 256 ++
 .../skylinestore/impl/package-info.java |  23 +
 .../validator/SkylineStoreValidator.java| 118 +
 .../skylinestore/validator/package-info.java|  23 +
 .../resourceestimator/solver/api/Solver.java|  76 +++
 .../solver/api/package-info.java|  23 +
 .../exceptions/InvalidInputException.java   |  34 ++
 .../exceptions/InvalidSolverException.java  |  34 ++
 .../solver/exceptions/SolverException.java  |  34 ++
 .../solver/exceptions/package-info.java |  24 +
 .../solver/impl/BaseSolver.java |  94 
 .../resourceestimator/solver/impl/LpSolver.java | 340 ++
 .../solver/impl/package-info.java   |  23 +
 .../solver/preprocess/SolverPreprocessor.java   | 219 +
 .../solver/preprocess/package-info.java |  23 +
 .../translator/api/JobMetaData.java | 163 +++
 .../translator/api/LogParser.java   |  65 +++
 .../translator/api/SingleLineParser.java|  52 +++
 .../translator/api/package-info.java|  23 +
 .../exceptions/DataFieldNotFoundException.java  |  32 ++
 .../translator/exceptions/package-info.java |  23 +
 .../translator/impl/BaseLogParser.java  | 125 +
 .../translator/impl/LogParserUtil.java  |  97 
 .../translator/impl/NativeSingleLineParser.java | 120 +
 .../translator/impl/RmSingleLineParser.java | 203 
 .../translator/impl/package-info.java   |  23 +
 .../translator/validator/ParserValidator.java   |  41 ++
 .../translator/validator/package-info.java  |  23 +
 .../webapps/ResourceEstimatorServer/.gitignore  |  14

[1/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3fae67538 -> 625039ef2


http://git-wip-us.apache.org/repos/asf/hadoop/blob/625039ef/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
new file mode 100644
index 000..69ba480
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/translator/api/TestJobMetaData.java
@@ -0,0 +1,163 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.translator.api;
+
+import java.text.ParseException;
+
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.translator.impl.LogParserUtil;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test JobMetaData.
+ */
+public class TestJobMetaData {
+  /**
+   * TODO: parametrize this test.
+   */
+  private LogParserUtil logParserUtil = new LogParserUtil();
+
+  private JobMetaData jobMetaData;
+  private RecurrenceId recurrenceId;
+
+  @Before public final void setup() throws ParseException {
+recurrenceId = new RecurrenceId("Fraud Detection", "17/07/16 16:27:25");
+jobMetaData = new JobMetaData(
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:25"));
+jobMetaData.setRecurrenceId(recurrenceId);
+jobMetaData.setContainerStart("C1",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:30"));
+jobMetaData.setContainerEnd("C1",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:30"));
+jobMetaData.setContainerStart("C2",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:40"));
+jobMetaData.setContainerEnd("C2",
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:40"));
+jobMetaData.setJobFinishTime(
+logParserUtil.stringToUnixTimestamp("17/07/16 16:37:45"));
+final Resource containerAlloc = Resource.newInstance(1, 1);
+jobMetaData.getResourceSkyline().setContainerSpec(containerAlloc);
+jobMetaData.getResourceSkyline().setJobInputDataSize(1024.5);
+jobMetaData.createSkyline();
+  }
+
+  @Test public final void testGetContainerSpec() {
+final Resource containerAlloc =
+jobMetaData.getResourceSkyline().getContainerSpec();
+final Resource containerAlloc2 = Resource.newInstance(1, 1);
+Assert.assertEquals(containerAlloc.getMemorySize(),
+containerAlloc2.getMemorySize());
+Assert.assertEquals(containerAlloc.getVirtualCores(),
+containerAlloc2.getVirtualCores());
+  }
+
+  @Test public final void testGetJobSize() {
+Assert.assertEquals(jobMetaData.getResourceSkyline().getJobInputDataSize(),
+1024.5, 0);
+  }
+
+  @Test public final void testGetRecurrenceeId() {
+final RecurrenceId recurrenceIdTest =
+new RecurrenceId("Fraud Detection", "17/07/16 16:27:25");
+Assert.assertEquals(recurrenceIdTest, jobMetaData.getRecurrenceId());
+  }
+
+  @Test public final void testStringToUnixTimestamp() throws ParseException {
+final long submissionTime =
+logParserUtil.stringToUnixTimestamp("17/07/16 16:27:25");
+
Assert.assertEquals(jobMetaData.getResourceSkyline().getJobSubmissionTime(),
+submissionTime);
+  }
+
+  @Test public final void testResourceSkyline() {
+final RLESparseResourceAllocation skylineList =
+jobMetaData.getResourceSkyline().getSkylineList();
+final int containerCPU =
+jobMetaData.getResourceSkyline().getContainerSpec().getVirtualCores();
+int k;
+for (k = 0; k < 5; k++) {
+  Assert.assertEquals(0,
+  skylineList.getCapacityAtTime(k).getVirtualCores() / conta

[2/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/625039ef/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md 
b/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
new file mode 100644
index 000..12f8dd5
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/site/markdown/ResourceEstimator.md
@@ -0,0 +1,181 @@
+
+
+Resource Estimator Service
+==
+
+* [Resource Estimator Service](#Resource_Estimator_Service)
+* [Overview](#Overview)
+* [Motivation](#Motivation)
+* [Goals](#Goals)
+* [Architecture](#Architecture)
+* [Usage](#Usage)
+* [Example](#Example)
+* [Advanced Configuration](#AdvancedConfig)
+* [Future work](#Future)
+
+Overview
+
+
+### Motivation
+Estimating job resource requirements remains an important and challenging 
problem for enterprise clusters. This is amplified by the ever-increasing 
complexity of workloads, i.e. from traditional batch jobs to interactive 
queries to streaming and recently machine learning jobs. This results in jobs 
relying on multiple computation frameworks such as Tez, MapReduce, Spark, etc., 
and the problem is further compounded by sharing nature of the clusters. 
Current state-of-art solution relies on user expertise to make resource 
requirement estimations for the jobs (for e.g.: number of reducers or container 
memory size, etc.), which is both tedious and inefficient.
+
+Based on the analysis of our cluster workloads, we observe that a large 
portion of jobs (more than 60%) are recurring jobs, giving us the opportunity 
to automatically estimate job resource requirements based on job's history 
runs. It is worth noting that jobs usually come from different computation 
frameworks, and the version may change across runs as well. Therefore, we want 
to come up with a framework agnostic black-box solution to automatically make 
resource requirement estimation for the recurring jobs.
+
+### Goals
+
+*   For a periodic job, analyze its history logs and predict its resource 
requirement for the new run.
+*   Support various types of job logs.
+*   Scale to terabytes of job logs.
+
+### Architecture
+
+The following figure illustrates the implementation architecture of the 
resource estimator.
+
+![The architecture of the resource 
estimator](images/resourceestimator_arch.png)
+
+Hadoop-resourceestimator mainly consists of three modules: Translator, 
SkylineStore and Estimator.
+
+1. `ResourceSkyline` is used to characterize job's resource utilizations 
during its lifespan. More specifically, it uses `RLESparseResourceAllocation` 
()
 to record the container allocation information. `RecurrenceId` is used to 
identify a specific run of a recurring pipeline. A pipeline could consist of 
multiple jobs, each has a `ResourceSkyline` to characterize its resource 
utilization.
+2. `Translator` parses the job logs, extracts their `ResourceSkylines` and 
stores them to the SkylineStore. `SingleLineParser` parses one line in the log 
stream and extract the `ResourceSkyline`. `LogParser` recursively parses each 
line in the log stream using `SingleLineParser`. Note that logs could have 
different storage formats, so `LogParser` takes a stream of strings as input, 
instead of File or other formats. Since job logs may have various formats thus 
require different `SingleLineParser` implementations, `LogParser` initiates the 
`SingleLineParser` based on user configuration. Currently 
Hadoop-resourceestimator provides two implementations for `SingleLineParser`: 
`NativeSingleLineParser` supports an optimized native format, and 
`RMSingleLineParser` parses the YARN ResourceManager logs generated in Hadoop 
systems since RM logs are widely available (in production deployments).
+3. `SkylineStore` serves as the storage layer for Hadoop-resourceestimator and 
consists of 2 parts. `HistorySkylineStore` stores the `ResourceSkylines` 
extracted by the `Translator`. It supports four actions: addHistory, 
deleteHistory, updateHistory and getHistory. addHistory appends new 
`ResourceSkylines` to the recurring pipelines, while updateHistory deletes all 
the `ResourceSkylines` of a specific recurring pipeline, and re-insert new 
`ResourceSkylines`. `PredictionSkylineStore` stores the predicted 
`RLESparseResourceAllocation` generated by the Estimator. It supports two 
actions: addEstimation and getEstimation.
+
+Currently Hadoop-resourceestimator provides in-memory implementation for 
the SkylineStore.
+4. `Estimator` predicts re

[4/5] hadoop git commit: HADOOP-14840. Tool to estimate resource requirements of an application pipeline based on prior executions. (Rui Li via Subru).

2017-10-25 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/625039ef/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
new file mode 100644
index 000..92e
--- /dev/null
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -0,0 +1,238 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.resourceestimator.service;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Type;
+import java.util.List;
+import java.util.Map;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
+import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
+import 
org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorConfiguration;
+import org.apache.hadoop.resourceestimator.common.config.ResourceEstimatorUtil;
+import 
org.apache.hadoop.resourceestimator.common.exception.ResourceEstimatorException;
+import 
org.apache.hadoop.resourceestimator.common.serialization.RLESparseResourceAllocationSerDe;
+import org.apache.hadoop.resourceestimator.common.serialization.ResourceSerDe;
+import org.apache.hadoop.resourceestimator.skylinestore.api.SkylineStore;
+import 
org.apache.hadoop.resourceestimator.skylinestore.exceptions.SkylineStoreException;
+import org.apache.hadoop.resourceestimator.solver.api.Solver;
+import org.apache.hadoop.resourceestimator.solver.exceptions.SolverException;
+import org.apache.hadoop.resourceestimator.translator.api.LogParser;
+import org.apache.hadoop.resourceestimator.translator.impl.LogParserUtil;
+import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.reflect.TypeToken;
+import com.google.inject.Singleton;
+
+/**
+ * Resource Estimator Service which provides a set of REST APIs for users to
+ * use the estimation service.
+ */
+@Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
+  private static final Logger LOGGER =
+  LoggerFactory.getLogger(ResourceEstimatorService.class);
+  private static SkylineStore skylineStore;
+  private static Solver solver;
+  private static LogParser logParser;
+  private static LogParserUtil logParserUtil = new LogParserUtil();
+  private static Configuration config;
+  private static Gson gson;
+  private static Type rleType;
+  private static Type skylineStoreType;
+
+  public ResourceEstimatorService() throws ResourceEstimatorException {
+if (skylineStore == null) {
+  try {
+config = new Configuration();
+config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
+skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
+ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
+SkylineStore.class);
+logParser = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.TRANSLATOR_PROVIDER,
+ResourceEstimatorConfiguration.DEFAULT_TRANSLATOR_PROVIDER,
+LogParser.class);
+logParser.init(config, skylineStore);
+logParserUtil.setLogParser(logParser);
+solver = ResourceEstimatorUtil.createProviderInstance(config,
+ResourceEstimatorConfiguration.SOLVER_PROVIDER,
+

[2/2] hadoop git commit: YARN-4827. Document configuration of ReservationSystem for FairScheduler. (Yufei Gu via Subru).

2017-10-25 Thread subru
YARN-4827. Document configuration of ReservationSystem for FairScheduler. 
(Yufei Gu via Subru).

(cherry picked from commit 3fae675383489129b3ca3c66683a1215d0c6edf0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74aee867
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74aee867
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74aee867

Branch: refs/heads/branch-2
Commit: 74aee8673f047414a38fc4f5b0570a90465550ec
Parents: 774a575
Author: Subru Krishnan 
Authored: Wed Oct 25 15:07:50 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Oct 25 15:32:03 2017 -0700

--
 .../reservation/CapacityOverTimePolicy.java |  3 ---
 .../src/site/markdown/FairScheduler.md  | 24 
 .../src/site/markdown/ReservationSystem.md  |  2 +-
 3 files changed, 25 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74aee867/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
index acd5774..1f3f9bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
@@ -56,9 +56,6 @@ public class CapacityOverTimePolicy extends 
NoOverCommitPolicy {
   private float maxInst;
   private float maxAvg;
 
-  // For now this is CapacityScheduler specific, but given a hierarchy in the
-  // configuration structure of the schedulers (e.g., SchedulerConfiguration)
-  // it should be easy to remove this limitation
   @Override
   public void init(String reservationQueuePath,
   ReservationSchedulerConfiguration conf) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74aee867/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 941bfa2..e5cb31b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -107,6 +107,8 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **allowPreemptionFrom**: determines whether the scheduler is allowed to 
preempt resources from the queue. The default is true. If a queue has this 
property set to false, this property will apply recursively to all child queues.
 
+* **reservation**: indicates to the `ReservationSystem` that the queue's 
resources is available for users to reserve. This only applies for leaf queues. 
A leaf queue is not reservable if this property isn't configured.
+
 * **User elements**: which represent settings governing the behavior of 
individual users. They can contain a single property: maxRunningApps, a limit 
on the number of running apps for a particular user.
 
 * **A userMaxAppsDefault element**: which sets the default running app limit 
for any users whose limit is not otherwise specified.
@@ -125,6 +127,12 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **A defaultQueueSchedulingPolicy element**: which sets the default 
scheduling policy for queues; overridden by the schedulingPolicy element in 
each queue if specified. Defaults to "fair".
 
+* **A reservation-agent element**: which sets the class name of the 
implementation of the `ReservationAgent`, which attempts to place the user's 
reservation request in the `Plan`. The default value is 
`org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.AlignedPlannerWithGreedy`.
+
+* **A reservation-policy element**: which sets the class name of the 
implementation of the `SharingPolicy`, which validates if the new reservation 
doesn't violate any in

[1/2] hadoop git commit: YARN-4687. Document Reservation ACLs (Contributed by Sean Po)

2017-10-25 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fa86fdc3c -> 74aee8673


YARN-4687. Document Reservation ACLs (Contributed by Sean Po)

(cherry picked from commit 2e1d0ff4e901b8313c8d71869735b94ed8bc40a0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/774a5753
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/774a5753
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/774a5753

Branch: refs/heads/branch-2
Commit: 774a57536393f37100a6399f1fb206c649bc28bf
Parents: fa86fdc
Author: Arun Suresh 
Authored: Thu Mar 24 10:40:51 2016 -0700
Committer: Subru Krishnan 
Committed: Wed Oct 25 15:29:32 2017 -0700

--
 .../hadoop-yarn-site/src/site/markdown/FairScheduler.md| 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/774a5753/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index db3e1da..941bfa2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -189,12 +189,16 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 ###Queue Access Control Lists
 
-Queue Access Control Lists (ACLs) allow administrators to control who may take 
actions on particular queues. They are configured with the aclSubmitApps and 
aclAdministerApps properties, which can be set per queue. Currently the only 
supported administrative action is killing an application. Anybody who may 
administer a queue may also submit applications to it. These properties take 
values in a format like "user1,user2 group1,group2" or " group1,group2". An 
action on a queue will be permitted if its user or group is in the ACL of that 
queue or in the ACL of any of that queue's ancestors. So if queue2 is inside 
queue1, and user1 is in queue1's ACL, and user2 is in queue2's ACL, then both 
users may submit to queue2.
+Queue Access Control Lists (ACLs) allow administrators to control who may take 
actions on particular queues. They are configured with the aclSubmitApps and 
aclAdministerApps properties, which can be set per queue. Currently the only 
supported administrative action is killing an application. An administrator may 
also submit applications to it. These properties take values in a format like 
"user1,user2 group1,group2" or " group1,group2". Actions on a queue are 
permitted if the user/group is a member of the queue ACL or a member of the 
queue ACL of any of that queue's ancestors. So if queue2 is inside queue1, and 
user1 is in queue1's ACL, and user2 is in queue2's ACL, then both users may 
submit to queue2.
 
 **Note:** The delimiter is a space character. To specify only ACL groups, 
begin the value with a space character.
 
 The root queue's ACLs are "\*" by default which, because ACLs are passed down, 
means that everybody may submit to and kill applications from every queue. To 
start restricting access, change the root queue's ACLs to something other than 
"\*".
 
+###Reservation Access Control Lists
+
+Reservation Access Control Lists (ACLs) allow administrators to control who 
may take reservation actions on particular queues. They are configured with the 
aclAdministerReservations, aclListReservations, and the aclSubmitReservations 
properties, which can be set per queue. Currently the supported administrative 
actions are updating and deleting reservations. An administrator may also 
submit and list *all* reservations on the queue. These properties take values 
in a format like "user1,user2 group1,group2" or " group1,group2". Actions on a 
queue are permitted if the user/group is a member of the reservation ACL. Note 
that any user can update, delete, or list their own reservations. If 
reservation ACLs are enabled but not defined, everyone will have access.
+
 ##Administration
 
 The fair scheduler provides support for administration at runtime through a 
few mechanisms:


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-4827. Document configuration of ReservationSystem for FairScheduler. (Yufei Gu via Subru).

2017-10-25 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e18d2c548 -> 926b18887


YARN-4827. Document configuration of ReservationSystem for FairScheduler. 
(Yufei Gu via Subru).

(cherry picked from commit 3fae675383489129b3ca3c66683a1215d0c6edf0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/926b1888
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/926b1888
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/926b1888

Branch: refs/heads/branch-3.0
Commit: 926b1888711fa7e8d0ffbe623052da2796b3b402
Parents: e18d2c5
Author: Subru Krishnan 
Authored: Wed Oct 25 15:07:50 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Oct 25 15:08:19 2017 -0700

--
 .../reservation/CapacityOverTimePolicy.java |  3 ---
 .../src/site/markdown/FairScheduler.md  | 24 
 .../src/site/markdown/ReservationSystem.md  |  2 +-
 3 files changed, 25 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/926b1888/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
index acd5774..1f3f9bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
@@ -56,9 +56,6 @@ public class CapacityOverTimePolicy extends 
NoOverCommitPolicy {
   private float maxInst;
   private float maxAvg;
 
-  // For now this is CapacityScheduler specific, but given a hierarchy in the
-  // configuration structure of the schedulers (e.g., SchedulerConfiguration)
-  // it should be easy to remove this limitation
   @Override
   public void init(String reservationQueuePath,
   ReservationSchedulerConfiguration conf) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/926b1888/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 17db7ee..e59f86b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -109,6 +109,8 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **allowPreemptionFrom**: determines whether the scheduler is allowed to 
preempt resources from the queue. The default is true. If a queue has this 
property set to false, this property will apply recursively to all child queues.
 
+* **reservation**: indicates to the `ReservationSystem` that the queue's 
resources is available for users to reserve. This only applies for leaf queues. 
A leaf queue is not reservable if this property isn't configured.
+
 * **User elements**: which represent settings governing the behavior of 
individual users. They can contain a single property: maxRunningApps, a limit 
on the number of running apps for a particular user.
 
 * **A userMaxAppsDefault element**: which sets the default running app limit 
for any users whose limit is not otherwise specified.
@@ -127,6 +129,12 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **A defaultQueueSchedulingPolicy element**: which sets the default 
scheduling policy for queues; overridden by the schedulingPolicy element in 
each queue if specified. Defaults to "fair".
 
+* **A reservation-agent element**: which sets the class name of the 
implementation of the `ReservationAgent`, which attempts to place the user's 
reservation request in the `Plan`. The default value is 
`org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.AlignedPlannerWithGreedy`.
+
+* **A reservation-policy element**: which sets the class name of the 
implementation of the `SharingPolicy`, which validates if 

hadoop git commit: YARN-4827. Document configuration of ReservationSystem for FairScheduler. (Yufei Gu via Subru).

2017-10-25 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7da15eac7 -> 3fae67538


YARN-4827. Document configuration of ReservationSystem for FairScheduler. 
(Yufei Gu via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fae6753
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fae6753
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fae6753

Branch: refs/heads/trunk
Commit: 3fae675383489129b3ca3c66683a1215d0c6edf0
Parents: 7da15ea
Author: Subru Krishnan 
Authored: Wed Oct 25 15:07:50 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Oct 25 15:07:50 2017 -0700

--
 .../reservation/CapacityOverTimePolicy.java |  3 ---
 .../src/site/markdown/FairScheduler.md  | 24 
 .../src/site/markdown/ReservationSystem.md  |  2 +-
 3 files changed, 25 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fae6753/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
index acd5774..1f3f9bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
@@ -56,9 +56,6 @@ public class CapacityOverTimePolicy extends 
NoOverCommitPolicy {
   private float maxInst;
   private float maxAvg;
 
-  // For now this is CapacityScheduler specific, but given a hierarchy in the
-  // configuration structure of the schedulers (e.g., SchedulerConfiguration)
-  // it should be easy to remove this limitation
   @Override
   public void init(String reservationQueuePath,
   ReservationSchedulerConfiguration conf) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fae6753/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 64c450b..8d53e57 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -109,6 +109,8 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **allowPreemptionFrom**: determines whether the scheduler is allowed to 
preempt resources from the queue. The default is true. If a queue has this 
property set to false, this property will apply recursively to all child queues.
 
+* **reservation**: indicates to the `ReservationSystem` that the queue's 
resources is available for users to reserve. This only applies for leaf queues. 
A leaf queue is not reservable if this property isn't configured.
+
 * **User elements**: which represent settings governing the behavior of 
individual users. They can contain a single property: maxRunningApps, a limit 
on the number of running apps for a particular user.
 
 * **A userMaxAppsDefault element**: which sets the default running app limit 
for any users whose limit is not otherwise specified.
@@ -127,6 +129,12 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **A defaultQueueSchedulingPolicy element**: which sets the default 
scheduling policy for queues; overridden by the schedulingPolicy element in 
each queue if specified. Defaults to "fair".
 
+* **A reservation-agent element**: which sets the class name of the 
implementation of the `ReservationAgent`, which attempts to place the user's 
reservation request in the `Plan`. The default value is 
`org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.AlignedPlannerWithGreedy`.
+
+* **A reservation-policy element**: which sets the class name of the 
implementation of the `SharingPolicy`, which validates if the new reservation 
doesn't violate any

hadoop git commit: YARN-6871. Add additional deSelects params in RMWebServices#getAppReport. Contributed by Tanuj Nayak.

2017-10-20 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 a7e34be69 -> 4b872e5e9


YARN-6871. Add additional deSelects params in RMWebServices#getAppReport. 
Contributed by Tanuj Nayak.

(cherry picked from commit 8facf1f976d7e12a846f12baabf54be1b7a49f9d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b872e5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b872e5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b872e5e

Branch: refs/heads/branch-3.0
Commit: 4b872e5e9f90626d725ba4dd4965cee4a0d249c7
Parents: a7e34be
Author: Sunil G 
Authored: Wed Sep 27 14:37:32 2017 +0530
Committer: Subru Krishnan 
Committed: Fri Oct 20 17:20:39 2017 -0700

--
 .../resourcemanager/webapp/DeSelectFields.java  |  11 +-
 .../resourcemanager/webapp/dao/AppInfo.java | 108 +--
 .../webapp/TestRMWebServicesApps.java   |  77 -
 3 files changed, 160 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b872e5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
index 258bbfa..c991766 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
@@ -91,7 +91,16 @@ public class DeSelectFields {
  * RESOURCE_REQUESTS is the first
  * supported type from YARN-6280.
  */
-RESOURCE_REQUESTS("resourceRequests");
+RESOURCE_REQUESTS("resourceRequests"),
+/**
+ * APP_TIMEOUTS, APP_NODE_LABEL_EXPRESSION, AM_NODE_LABEL_EXPRESSION,
+ * RESOURCE_INFO are additionally supported parameters added in
+ * YARN-6871.
+ */
+TIMEOUTS("timeouts"),
+APP_NODE_LABEL_EXPRESSION("appNodeLabelExpression"),
+AM_NODE_LABEL_EXPRESSION("amNodeLabelExpression"),
+RESOURCE_INFO("resourceInfo");
 
 private final String literals;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b872e5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 9fb8fb5..db62914 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -121,7 +121,7 @@ public class AppInfo {
   protected String amNodeLabelExpression;
 
   protected ResourcesInfo resourceInfo = null;
-  protected AppTimeoutsInfo timeouts = new AppTimeoutsInfo();
+  private AppTimeoutsInfo timeouts;
 
   public AppInfo() {
   } // JAXB needs this
@@ -243,47 +243,87 @@ public class AppInfo {
   unmanagedApplication = appSubmissionContext.getUnmanagedAM();
   appNodeLabelExpression =
   app.getApplicationSubmissionContext().getNodeLabelExpression();
-  amNodeLabelExpression = (unmanagedApplication) ? null
-  : app.getAMResourceRequests().get(0).getNodeLabelExpression();
+  /*
+   * When the deSelects parameter contains "amNodeLabelExpression", objects
+   * pertaining to the amNodeLabelExpression are not returned. By default,
+   * this is not skipped. (YARN-6871)
+   */
+  if(!deSelects.contains(DeSelectType.AM_NODE_LABEL_EXPRESSION)) {
+amNodeLabelExpression = (unmanagedApplication) ?
+null :
+app.getAMResourceRequests

hadoop git commit: YARN-6871. Add additional deSelects params in RMWebServices#getAppReport. (Tanuj Nayak via Subru).

2017-10-20 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 062d9c9ba -> 9bd77a6d9


YARN-6871. Add additional deSelects params in RMWebServices#getAppReport. 
(Tanuj Nayak via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9bd77a6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9bd77a6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9bd77a6d

Branch: refs/heads/branch-2
Commit: 9bd77a6d9deb1e1d4ca1aa79bc491742c3fde85d
Parents: 062d9c9
Author: Subru Krishnan 
Authored: Fri Oct 20 17:19:29 2017 -0700
Committer: Subru Krishnan 
Committed: Fri Oct 20 17:19:29 2017 -0700

--
 .../resourcemanager/webapp/DeSelectFields.java  |  11 +-
 .../resourcemanager/webapp/dao/AppInfo.java | 107 +--
 .../webapp/TestRMWebServicesApps.java   |  73 -
 3 files changed, 156 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bd77a6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
index 258bbfa..c991766 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
@@ -91,7 +91,16 @@ public class DeSelectFields {
  * RESOURCE_REQUESTS is the first
  * supported type from YARN-6280.
  */
-RESOURCE_REQUESTS("resourceRequests");
+RESOURCE_REQUESTS("resourceRequests"),
+/**
+ * APP_TIMEOUTS, APP_NODE_LABEL_EXPRESSION, AM_NODE_LABEL_EXPRESSION,
+ * RESOURCE_INFO are additionally supported parameters added in
+ * YARN-6871.
+ */
+TIMEOUTS("timeouts"),
+APP_NODE_LABEL_EXPRESSION("appNodeLabelExpression"),
+AM_NODE_LABEL_EXPRESSION("amNodeLabelExpression"),
+RESOURCE_INFO("resourceInfo");
 
 private final String literals;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bd77a6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index af1b2fa..a1bb875 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -121,7 +121,7 @@ public class AppInfo {
   protected String amNodeLabelExpression;
 
   protected ResourcesInfo resourceInfo = null;
-  protected AppTimeoutsInfo timeouts = new AppTimeoutsInfo();
+  private AppTimeoutsInfo timeouts;
 
   public AppInfo() {
   } // JAXB needs this
@@ -241,44 +241,85 @@ public class AppInfo {
   unmanagedApplication = appSubmissionContext.getUnmanagedAM();
   appNodeLabelExpression =
   app.getApplicationSubmissionContext().getNodeLabelExpression();
-  amNodeLabelExpression = (unmanagedApplication) ? null
-  : app.getAMResourceRequests().get(0).getNodeLabelExpression();
+  /*
+   * When the deSelects parameter contains "amNodeLabelExpression", objects
+   * pertaining to the amNodeLabelExpression are not returned. By default,
+   * this is not skipped. (YARN-6871)
+   */
+  if(!deSelects.contains(DeSelectType.AM_NODE_LABEL_EXPRESSION)) {
+amNodeLabelExpression = (unmanagedApplication) ?
+null :
+app.getAMResourceRequests().get(0).getNodeLabelExpression();
+  }
+  /*
+   *

hadoop git commit: YARN-7311. Fix TestRMWebServicesReservation parametrization for fair scheduler. (Yufei Gu via Subru).

2017-10-17 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c0ee58365 -> 5e7491d53


YARN-7311. Fix TestRMWebServicesReservation parametrization for fair scheduler. 
(Yufei Gu via Subru).

(cherry picked from commit 75323394fbc4211596a2c8fbb5e584f3183f742f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e7491d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e7491d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e7491d5

Branch: refs/heads/branch-2
Commit: 5e7491d5320d0c9bbceacebb7d884978677f804b
Parents: c0ee583
Author: Subru Krishnan 
Authored: Tue Oct 17 12:38:06 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Oct 17 12:43:36 2017 -0700

--
 .../reservation/FairReservationSystem.java  | 13 +
 .../webapp/TestRMWebServicesReservation.java| 28 +++-
 2 files changed, 28 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e7491d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
index 9bf92c2..611fca8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 
@@ -87,4 +88,16 @@ public class FairReservationSystem extends 
AbstractReservationSystem {
 .getSteadyFairShare();
   }
 
+  @Override
+  public Plan getPlan(String planName) {
+// make sure plan name is a full queue name in fair scheduler. For example,
+// "root.default" is the full queue name for "default".
+FSQueue queue = fairScheduler.getQueueManager().getQueue(planName);
+
+if (queue != null) {
+  return super.getPlan(queue.getQueueName());
+} else {
+  return null;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e7491d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
index d055130..492e78a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
@@ -152,21 +152,9 @@ public class TestRMWebServicesReservation extends 
JerseyTestBase {
   bind(GenericExceptionHandler.class);
   conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
   YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-  Configuration conf = new Configuration();
   conf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
-  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
-  YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-  conf.setClass(YarnConfiguration.RM_SCHEDULER, 

hadoop git commit: YARN-7311. Fix TestRMWebServicesReservation parametrization for fair scheduler. (Yufei Gu via Subru).

2017-10-17 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 5334deb08 -> 81a86860b


YARN-7311. Fix TestRMWebServicesReservation parametrization for fair scheduler. 
(Yufei Gu via Subru).

(cherry picked from commit 75323394fbc4211596a2c8fbb5e584f3183f742f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81a86860
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81a86860
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81a86860

Branch: refs/heads/branch-3.0
Commit: 81a86860bf4a4d03e002b0bc7fc5aa4a4c103f57
Parents: 5334deb
Author: Subru Krishnan 
Authored: Tue Oct 17 12:38:06 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Oct 17 12:39:40 2017 -0700

--
 .../reservation/FairReservationSystem.java  | 13 +
 .../webapp/TestRMWebServicesReservation.java| 28 +++-
 2 files changed, 28 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81a86860/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
index 9bf92c2..611fca8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 
@@ -87,4 +88,16 @@ public class FairReservationSystem extends 
AbstractReservationSystem {
 .getSteadyFairShare();
   }
 
+  @Override
+  public Plan getPlan(String planName) {
+// make sure plan name is a full queue name in fair scheduler. For example,
+// "root.default" is the full queue name for "default".
+FSQueue queue = fairScheduler.getQueueManager().getQueue(planName);
+
+if (queue != null) {
+  return super.getPlan(queue.getQueueName());
+} else {
+  return null;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81a86860/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
index 657bec4..02aa65f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
@@ -145,21 +145,9 @@ public class TestRMWebServicesReservation extends 
JerseyTestBase {
   bind(GenericExceptionHandler.class);
   conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
   YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-  Configuration conf = new Configuration();
   conf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
-  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
-  YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-  conf.setClass(YarnConfiguration.RM_SCHEDULER, 

hadoop git commit: YARN-7311. Fix TestRMWebServicesReservation parametrization for fair scheduler. (Yufei Gu via Subru).

2017-10-17 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk acabc657f -> 75323394f


YARN-7311. Fix TestRMWebServicesReservation parametrization for fair scheduler. 
(Yufei Gu via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75323394
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75323394
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75323394

Branch: refs/heads/trunk
Commit: 75323394fbc4211596a2c8fbb5e584f3183f742f
Parents: acabc65
Author: Subru Krishnan 
Authored: Tue Oct 17 12:38:06 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Oct 17 12:38:06 2017 -0700

--
 .../reservation/FairReservationSystem.java  | 13 +
 .../webapp/TestRMWebServicesReservation.java| 28 +++-
 2 files changed, 28 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75323394/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
index 9bf92c2..611fca8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 
@@ -87,4 +88,16 @@ public class FairReservationSystem extends 
AbstractReservationSystem {
 .getSteadyFairShare();
   }
 
+  @Override
+  public Plan getPlan(String planName) {
+// make sure plan name is a full queue name in fair scheduler. For example,
+// "root.default" is the full queue name for "default".
+FSQueue queue = fairScheduler.getQueueManager().getQueue(planName);
+
+if (queue != null) {
+  return super.getPlan(queue.getQueueName());
+} else {
+  return null;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75323394/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
index 657bec4..02aa65f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
@@ -145,21 +145,9 @@ public class TestRMWebServicesReservation extends 
JerseyTestBase {
   bind(GenericExceptionHandler.class);
   conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
   YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-  Configuration conf = new Configuration();
   conf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
-  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
-  YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
-  ResourceScheduler.class);
-  CapacitySchedulerConfiguration 

hadoop git commit: YARN-7341. TestRouterWebServiceUtil#testMergeMetrics is flakey. (Robert Kanter via Haibo Chen)

2017-10-17 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 adf5ea73d -> c0ee58365


YARN-7341. TestRouterWebServiceUtil#testMergeMetrics is flakey. (Robert Kanter 
via Haibo Chen)

(cherry picked from commit acabc657ff5433f36ce1b238cecd3a3b5bbe87ae)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0ee5836
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0ee5836
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0ee5836

Branch: refs/heads/branch-2
Commit: c0ee58365619b2b6b9038d18e23c551d9a708e99
Parents: adf5ea7
Author: Haibo Chen 
Authored: Tue Oct 17 10:15:53 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Oct 17 12:30:26 2017 -0700

--
 .../router/webapp/RouterWebServiceUtil.java | 36 +++-
 .../router/webapp/TestRouterWebServiceUtil.java | 17 ++---
 2 files changed, 33 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ee5836/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
index 1149468..efc3ea3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
@@ -407,22 +407,26 @@ public final class RouterWebServiceUtil {
 metrics.setContainersPending(metrics.getPendingContainers()
 + metricsResponse.getPendingContainers());
 
-metrics.setTotalMB(metrics.getTotalMB() + metricsResponse.getTotalMB());
-metrics.setTotalVirtualCores(
-metrics.getTotalVirtualCores() + metrics.getTotalVirtualCores());
-metrics.setTotalNodes(metrics.getTotalNodes() + metrics.getTotalNodes());
-metrics.setLostNodes(metrics.getLostNodes() + metrics.getLostNodes());
-metrics.setUnhealthyNodes(
-metrics.getUnhealthyNodes() + metrics.getUnhealthyNodes());
-metrics.setDecommissioningNodes(
-metrics.getDecommissioningNodes() + metrics.getDecommissioningNodes());
-metrics.setDecommissionedNodes(
-metrics.getDecommissionedNodes() + metrics.getDecommissionedNodes());
-metrics.setRebootedNodes(
-metrics.getRebootedNodes() + metrics.getRebootedNodes());
-metrics.setActiveNodes(metrics.getActiveNodes() + 
metrics.getActiveNodes());
-metrics.setShutdownNodes(
-metrics.getShutdownNodes() + metrics.getShutdownNodes());
+metrics.setTotalMB(metrics.getTotalMB()
++ metricsResponse.getTotalMB());
+metrics.setTotalVirtualCores(metrics.getTotalVirtualCores()
++ metricsResponse.getTotalVirtualCores());
+metrics.setTotalNodes(metrics.getTotalNodes()
++ metricsResponse.getTotalNodes());
+metrics.setLostNodes(metrics.getLostNodes()
++ metricsResponse.getLostNodes());
+metrics.setUnhealthyNodes(metrics.getUnhealthyNodes()
++ metricsResponse.getUnhealthyNodes());
+metrics.setDecommissioningNodes(metrics.getDecommissioningNodes()
++ metricsResponse.getDecommissioningNodes());
+metrics.setDecommissionedNodes(metrics.getDecommissionedNodes()
++ metricsResponse.getDecommissionedNodes());
+metrics.setRebootedNodes(metrics.getRebootedNodes()
++ metricsResponse.getRebootedNodes());
+metrics.setActiveNodes(metrics.getActiveNodes()
++ metricsResponse.getActiveNodes());
+metrics.setShutdownNodes(metrics.getShutdownNodes()
++ metricsResponse.getShutdownNodes());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ee5836/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServiceUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServiceUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServiceUtil.java
index 7073b3b..edf3804 100644
--- 
a/hadoop-yarn-project/hadoop-y

hadoop git commit: YARN-7095. Federation: routing getNode/getNodes/getMetrics REST invocations transparently to multiple RMs. (Giovanni Matteo Fumarola via Subru).

2017-10-17 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 54a694172 -> adf5ea73d


YARN-7095. Federation: routing getNode/getNodes/getMetrics REST invocations 
transparently to multiple RMs. (Giovanni Matteo Fumarola via Subru).

(cherry picked from commit bac4e8cca8b54405f5e37b90e545b93bbadee0f4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/adf5ea73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/adf5ea73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/adf5ea73

Branch: refs/heads/branch-2
Commit: adf5ea73d6b8e3d3be88bc06a96283ad06f81d64
Parents: 54a6941
Author: Subru Krishnan 
Authored: Thu Aug 31 15:05:41 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Oct 17 12:14:05 2017 -0700

--
 .../webapp/dao/ClusterMetricsInfo.java  | 162 +---
 .../resourcemanager/webapp/dao/NodeInfo.java|  17 +-
 .../resourcemanager/webapp/dao/NodesInfo.java   |   4 +
 .../webapp/FederationInterceptorREST.java   | 236 -
 .../router/webapp/RouterWebServiceUtil.java | 101 ++-
 .../MockDefaultRequestInterceptorREST.java  |  43 +++
 .../webapp/TestFederationInterceptorREST.java   |  54 +++-
 .../TestFederationInterceptorRESTRetry.java | 207 ++-
 .../router/webapp/TestRouterWebServiceUtil.java | 262 +++
 9 files changed, 1031 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/adf5ea73/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
index dc42eb6..3214cb9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
@@ -31,35 +31,35 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 @XmlAccessorType(XmlAccessType.FIELD)
 public class ClusterMetricsInfo {
 
-  protected int appsSubmitted;
-  protected int appsCompleted;
-  protected int appsPending;
-  protected int appsRunning;
-  protected int appsFailed;
-  protected int appsKilled;
-
-  protected long reservedMB;
-  protected long availableMB;
-  protected long allocatedMB;
-
-  protected long reservedVirtualCores;
-  protected long availableVirtualCores;
-  protected long allocatedVirtualCores;
-
-  protected int containersAllocated;
-  protected int containersReserved;
-  protected int containersPending;
-
-  protected long totalMB;
-  protected long totalVirtualCores;
-  protected int totalNodes;
-  protected int lostNodes;
-  protected int unhealthyNodes;
-  protected int decommissioningNodes;
-  protected int decommissionedNodes;
-  protected int rebootedNodes;
-  protected int activeNodes;
-  protected int shutdownNodes;
+  private int appsSubmitted;
+  private int appsCompleted;
+  private int appsPending;
+  private int appsRunning;
+  private int appsFailed;
+  private int appsKilled;
+
+  private long reservedMB;
+  private long availableMB;
+  private long allocatedMB;
+
+  private long reservedVirtualCores;
+  private long availableVirtualCores;
+  private long allocatedVirtualCores;
+
+  private int containersAllocated;
+  private int containersReserved;
+  private int containersPending;
+
+  private long totalMB;
+  private long totalVirtualCores;
+  private int totalNodes;
+  private int lostNodes;
+  private int unhealthyNodes;
+  private int decommissioningNodes;
+  private int decommissionedNodes;
+  private int rebootedNodes;
+  private int activeNodes;
+  private int shutdownNodes;
 
   public ClusterMetricsInfo() {
   } // JAXB needs this
@@ -93,8 +93,8 @@ public class ClusterMetricsInfo {
 
 if (rs instanceof CapacityScheduler) {
   this.totalMB = availableMB + allocatedMB + reservedMB;
-  this.totalVirtualCores = availableVirtualCores + allocatedVirtualCores
-  + containersReserved;
+  this.totalVirtualCores =
+  availableVirtualCores + allocatedVirtualCores + containersReserved;
 } else {
   this.totalMB = availableMB + allocate

hadoop git commit: YARN-5329. Placement Agent enhancements required to support recurring reservations in ReservationSystem. (Carlo Curino via Subru).

2017-10-04 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7836a6b59 -> 7fd4a997f


YARN-5329. Placement Agent enhancements required to support recurring 
reservations in ReservationSystem. (Carlo Curino via Subru).

(cherry picked from commit e6e614e380ed1d746973b50f666a9c40d272073e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fd4a997
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fd4a997
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fd4a997

Branch: refs/heads/branch-2
Commit: 7fd4a997f46ad9313a05d486e18d8fed8ffecbdb
Parents: 7836a6b
Author: Subru Krishnan 
Authored: Wed Oct 4 19:28:27 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Oct 4 19:30:57 2017 -0700

--
 .../reservation/InMemoryPlan.java   |   6 +
 .../PeriodicRLESparseResourceAllocation.java|   3 +-
 .../RLESparseResourceAllocation.java|   6 +-
 .../reservation/planning/IterativePlanner.java  |  27 ++-
 .../reservation/planning/PlanningAlgorithm.java |  79 ---
 .../reservation/planning/StageAllocator.java|   3 +-
 .../planning/StageAllocatorGreedy.java  |   4 +-
 .../planning/StageAllocatorGreedyRLE.java   |   7 +-
 .../planning/StageAllocatorLowCostAligned.java  |   6 +-
 .../reservation/BaseSharingPolicyTest.java  |   8 +-
 .../reservation/TestCapacityOverTimePolicy.java |  29 +--
 .../reservation/TestInMemoryPlan.java   |  69 +++---
 .../planning/TestAlignedPlanner.java|  40 +++-
 .../planning/TestGreedyReservationAgent.java|  69 +++---
 .../planning/TestReservationAgents.java | 213 +++
 15 files changed, 442 insertions(+), 127 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd4a997/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
index 9eb1820..7187510 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
@@ -723,6 +723,12 @@ public class InMemoryPlan implements Plan {
   + periodicRle.getTimePeriod() + ")");
 }
 
+if (period < (end - start)) {
+  throw new PlanningException(
+  "Invalid input: (end - start) = (" + end + " - " + start + ") = "
+  + (end - start) + " > period = " + period);
+}
+
 // find the minimum resources available among all the instances that 
fit
 // in the LCM
 long numInstInLCM = periodicRle.getTimePeriod() / period;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd4a997/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
index 7bc44f5..d326944 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
@@ -221,7 +221,8 @@ public class PeriodicRLESparseResourceAllocation
   NavigableMap cumulativeMap = this.getCumulative();
   Long previous = cumulativeMap.floorKey(relativeStart);
   previous = (previous != null) ? previous : 0;

hadoop git commit: YARN-5329. Placement Agent enhancements required to support recurring reservations in ReservationSystem. (Carlo Curino via Subru).

2017-10-04 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 5c2f07eed -> 829a8e26b


YARN-5329. Placement Agent enhancements required to support recurring 
reservations in ReservationSystem. (Carlo Curino via Subru).

(cherry picked from commit e6e614e380ed1d746973b50f666a9c40d272073e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/829a8e26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/829a8e26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/829a8e26

Branch: refs/heads/branch-3.0
Commit: 829a8e26b9fcd0f834c396e557ee6fb1509906d4
Parents: 5c2f07e
Author: Subru Krishnan 
Authored: Wed Oct 4 19:28:27 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Oct 4 19:29:03 2017 -0700

--
 .../reservation/InMemoryPlan.java   |   6 +
 .../PeriodicRLESparseResourceAllocation.java|   3 +-
 .../RLESparseResourceAllocation.java|   6 +-
 .../reservation/planning/IterativePlanner.java  |  27 ++-
 .../reservation/planning/PlanningAlgorithm.java |  79 ---
 .../reservation/planning/StageAllocator.java|   3 +-
 .../planning/StageAllocatorGreedy.java  |   4 +-
 .../planning/StageAllocatorGreedyRLE.java   |   7 +-
 .../planning/StageAllocatorLowCostAligned.java  |   6 +-
 .../reservation/BaseSharingPolicyTest.java  |   8 +-
 .../reservation/TestCapacityOverTimePolicy.java |  29 +--
 .../reservation/TestInMemoryPlan.java   |  69 +++---
 .../planning/TestAlignedPlanner.java|  40 +++-
 .../planning/TestGreedyReservationAgent.java|  69 +++---
 .../planning/TestReservationAgents.java | 213 +++
 15 files changed, 442 insertions(+), 127 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/829a8e26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
index 9eb1820..7187510 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
@@ -723,6 +723,12 @@ public class InMemoryPlan implements Plan {
   + periodicRle.getTimePeriod() + ")");
 }
 
+if (period < (end - start)) {
+  throw new PlanningException(
+  "Invalid input: (end - start) = (" + end + " - " + start + ") = "
+  + (end - start) + " > period = " + period);
+}
+
 // find the minimum resources available among all the instances that 
fit
 // in the LCM
 long numInstInLCM = periodicRle.getTimePeriod() / period;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/829a8e26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
index 7bc44f5..d326944 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
@@ -221,7 +221,8 @@ public class PeriodicRLESparseResourceAllocation
   NavigableMap cumulativeMap = this.getCumulative();
   Long previous = cumulativeMap.floorKey(relativeStart);
   previous = (previous != null) ? previous : 0;

  1   2   3   4   5   6   7   8   9   10   >