[09/50] [abbrv] hadoop git commit: HDFS-8870. Lease is leaked on write failure. Contributed by Kuhu Shukla.

2016-11-22 Thread sjlee
HDFS-8870. Lease is leaked on write failure. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4fcea8a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4fcea8a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4fcea8a0

Branch: refs/heads/HADOOP-13070
Commit: 4fcea8a0c8019d6d9a5e6f315c83659938b93a40
Parents: 5af572b
Author: Kihwal Lee 
Authored: Tue Nov 15 14:47:53 2016 -0600
Committer: Kihwal Lee 
Committed: Tue Nov 15 14:47:53 2016 -0600

--
 .../org/apache/hadoop/hdfs/DFSOutputStream.java  |  5 +
 .../apache/hadoop/hdfs/TestDFSOutputStream.java  | 19 +++
 2 files changed, 20 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fcea8a0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index a73ab95..e4929e1 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -749,8 +749,6 @@ public class DFSOutputStream extends FSOutputSummer
 b.add(e);
   }
 }
-
-dfsClient.endFileLease(fileId);
 final IOException ioe = b.build();
 if (ioe != null) {
   throw ioe;
@@ -763,6 +761,7 @@ public class DFSOutputStream extends FSOutputSummer
 
   void setClosed() {
 closed = true;
+dfsClient.endFileLease(fileId);
 getStreamer().release();
   }
 
@@ -796,8 +795,6 @@ public class DFSOutputStream extends FSOutputSummer
 b.add(e);
   }
 }
-
-dfsClient.endFileLease(fileId);
 final IOException ioe = b.build();
 if (ioe != null) {
   throw ioe;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fcea8a0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
index d9df1ff..750103d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
@@ -50,6 +50,11 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyLong;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 import org.mockito.internal.util.reflection.Whitebox;
 
 import static org.junit.Assert.assertEquals;
@@ -202,6 +207,20 @@ public class TestDFSOutputStream {
 assertEquals(1, 3 - numDataNodesWithData);
   }
 
+  @Test
+  public void testEndLeaseCall() throws Exception {
+Configuration conf = new Configuration();
+DFSClient client = new DFSClient(cluster.getNameNode(0)
+.getNameNodeAddress(), conf);
+DFSClient spyClient = Mockito.spy(client);
+DFSOutputStream dfsOutputStream = spyClient.create("/file2",
+FsPermission.getFileDefault(),
+EnumSet.of(CreateFlag.CREATE), (short) 3, 1024, null , 1024, null);
+DFSOutputStream spyDFSOutputStream = Mockito.spy(dfsOutputStream);
+spyDFSOutputStream.closeThreads(anyBoolean());
+verify(spyClient, times(1)).endFileLease(anyLong());
+  }
+
   @AfterClass
   public static void tearDown() {
 if (cluster != null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: HDFS-11144. TestFileCreationDelete#testFileCreationDeleteParent fails wind bind exception. Contributed By Brahma Reddy Battula.

2016-11-22 Thread sjlee
HDFS-11144. TestFileCreationDelete#testFileCreationDeleteParent fails wind bind 
exception. Contributed By Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c68dad18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c68dad18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c68dad18

Branch: refs/heads/HADOOP-13070
Commit: c68dad18ab5cdf01f3dea1bb5988f896609956b4
Parents: d232625
Author: Brahma Reddy Battula 
Authored: Mon Nov 21 10:58:34 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Mon Nov 21 10:58:34 2016 +0530

--
 .../org/apache/hadoop/hdfs/TestFileCreationDelete.java   | 11 +++
 1 file changed, 3 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c68dad18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
index 47ce947..58e1194 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
@@ -46,7 +46,6 @@ public class TestFileCreationDelete {
 try {
   cluster.waitActive();
   fs = cluster.getFileSystem();
-  final int nnport = cluster.getNameNodePort();
 
   // create file1.
   Path dir = new Path("/foo");
@@ -68,22 +67,18 @@ public class TestFileCreationDelete {
   // rm dir
   fs.delete(dir, true);
 
-  // restart cluster with the same namenode port as before.
+  // restart cluster.
   // This ensures that leases are persisted in fsimage.
   cluster.shutdown();
   try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
-  cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
-.format(false)
-.build();
+  cluster = new MiniDFSCluster.Builder(conf).format(false).build();
   cluster.waitActive();
 
   // restart cluster yet again. This triggers the code to read in
   // persistent leases from fsimage.
   cluster.shutdown();
   try {Thread.sleep(5000);} catch (InterruptedException e) {}
-  cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
-.format(false)
-.build();
+  cluster = new MiniDFSCluster.Builder(conf).format(false).build();
   cluster.waitActive();
   fs = cluster.getFileSystem();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: YARN-5874. RM -format-state-store and -remove-application-from-state-store commands fail with NPE. Contributed by Varun Saxena. [Forced Update!]

2016-11-22 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13070 f0e56e364 -> ceb2cb289 (forced update)


YARN-5874. RM -format-state-store and -remove-application-from-state-store 
commands fail with NPE. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7070f33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7070f33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7070f33

Branch: refs/heads/HADOOP-13070
Commit: b7070f3308fc4c6a8a9a25021562169cae87d223
Parents: 296c5de
Author: Rohith Sharma K S 
Authored: Tue Nov 15 10:58:25 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Nov 15 10:58:25 2016 +0530

--
 .../server/resourcemanager/ResourceManager.java |   8 +-
 .../resourcemanager/recovery/RMStateStore.java  |   3 +-
 .../recovery/ZKRMStateStore.java|   3 +-
 .../resourcemanager/TestRMStoreCommands.java| 103 +++
 .../recovery/TestZKRMStateStore.java|  42 +---
 5 files changed, 140 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7070f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 03daeb9..8ddbc20 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1442,8 +1442,10 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
* @param conf
* @throws Exception
*/
-  private static void deleteRMStateStore(Configuration conf) throws Exception {
+  @VisibleForTesting
+  static void deleteRMStateStore(Configuration conf) throws Exception {
 RMStateStore rmStore = RMStateStoreFactory.getStore(conf);
+rmStore.setResourceManager(new ResourceManager());
 rmStore.init(conf);
 rmStore.start();
 try {
@@ -1455,9 +1457,11 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 }
   }
 
-  private static void removeApplication(Configuration conf, String 
applicationId)
+  @VisibleForTesting
+  static void removeApplication(Configuration conf, String applicationId)
   throws Exception {
 RMStateStore rmStore = RMStateStoreFactory.getStore(conf);
+rmStore.setResourceManager(new ResourceManager());
 rmStore.init(conf);
 rmStore.start();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7070f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index fc12522..a6527d8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -86,7 +86,8 @@ import org.apache.hadoop.yarn.state.StateMachineFactory;
 public abstract class RMStateStore extends AbstractService {
 
   // constants for RM App state and RMDTSecretManagerState.
-  protected static final String RM_APP_ROOT = "RMAppRoot";
+  @VisibleForTesting
+  public static final String RM_APP_ROOT = "RMAppRoot";
   protected static final String RM_DT_SECRET_MANAGER_ROOT = 
"RMDTSecretManagerRoot";
   protected static final String DELEGATION_KEY_PREFIX = "DelegationKey_";
   protected static final String DELEGATION_TOKEN_PREFIX = "RMDelegationToken_";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7070f33/hadoop-yarn-pr

[29/50] [abbrv] hadoop git commit: YARN-5904. Reduce the number of default server threads for AMRMProxyService.

2016-11-22 Thread sjlee
YARN-5904. Reduce the number of default server threads for AMRMProxyService.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/140b9939
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/140b9939
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/140b9939

Branch: refs/heads/HADOOP-13070
Commit: 140b9939da71ec51c178162501740a429b344cac
Parents: f05a9ce
Author: Subru Krishnan 
Authored: Thu Nov 17 18:26:25 2016 -0800
Committer: Subru Krishnan 
Committed: Thu Nov 17 18:26:25 2016 -0800

--
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/140b9939/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a54104d..fce78c9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1755,7 +1755,7 @@ public class YarnConfiguration extends Configuration {
 
   public static final String AMRM_PROXY_CLIENT_THREAD_COUNT = NM_PREFIX
   + "amrmproxy.client.thread-count";
-  public static final int DEFAULT_AMRM_PROXY_CLIENT_THREAD_COUNT = 25;
+  public static final int DEFAULT_AMRM_PROXY_CLIENT_THREAD_COUNT = 3;
 
   public static final String AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE =
   NM_PREFIX + "amrmproxy.interceptor-class.pipeline";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: closes apache/hadoop#50 Addendum commit for HADOOP-11601.

2016-11-22 Thread sjlee
closes apache/hadoop#50 Addendum commit for HADOOP-11601.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f78470c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f78470c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f78470c2

Branch: refs/heads/HADOOP-13070
Commit: f78470c2644e18b03ae86af12c08893a8b2e536d
Parents: ae8849f
Author: Mingliang Liu 
Authored: Thu Nov 17 14:30:22 2016 -0800
Committer: Mingliang Liu 
Committed: Thu Nov 17 14:30:53 2016 -0800

--

--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HDFS-11105. TestRBWBlockInvalidation#testRWRInvalidation fails intermittently. Contributed by Yiqun Lin

2016-11-22 Thread sjlee
HDFS-11105. TestRBWBlockInvalidation#testRWRInvalidation fails intermittently. 
Contributed by Yiqun Lin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c90891e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c90891e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c90891e7

Branch: refs/heads/HADOOP-13070
Commit: c90891e7b3afaa66dbca967b83a9997e80e893d8
Parents: b8690a9
Author: Mingliang Liu 
Authored: Wed Nov 16 13:02:10 2016 -0800
Committer: Mingliang Liu 
Committed: Wed Nov 16 13:02:10 2016 -0800

--
 .../TestRBWBlockInvalidation.java   | 45 +---
 1 file changed, 39 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c90891e7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
index 9816af8..cb2ee9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
@@ -41,8 +41,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import 
org.apache.hadoop.hdfs.server.namenode.ha.TestDNFencing.RandomDeleterPolicy;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
+import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 
 /**
@@ -141,7 +143,7 @@ public class TestRBWBlockInvalidation {
* were RWR replicas with out-of-date genstamps, the NN could accidentally
* delete good replicas instead of the bad replicas.
*/
-  @Test(timeout=6)
+  @Test(timeout=12)
   public void testRWRInvalidation() throws Exception {
 Configuration conf = new HdfsConfiguration();
 
@@ -156,10 +158,11 @@ public class TestRBWBlockInvalidation {
 // Speed up the test a bit with faster heartbeats.
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
 
+int numFiles = 10;
 // Test with a bunch of separate files, since otherwise the test may
 // fail just due to "good luck", even if a bug is present.
 List testPaths = Lists.newArrayList();
-for (int i = 0; i < 10; i++) {
+for (int i = 0; i < numFiles; i++) {
   testPaths.add(new Path("/test" + i));
 }
 
@@ -176,8 +179,11 @@ public class TestRBWBlockInvalidation {
   out.writeBytes("old gs data\n");
   out.hflush();
 }
-
-
+
+for (Path path : testPaths) {
+  DFSTestUtil.waitReplication(cluster.getFileSystem(), path, (short)2);
+}
+
 // Shutdown one of the nodes in the pipeline
 DataNodeProperties oldGenstampNode = cluster.stopDataNode(0);
 
@@ -195,7 +201,11 @@ public class TestRBWBlockInvalidation {
   cluster.getFileSystem().setReplication(path, (short)1);
   out.close();
 }
-
+
+for (Path path : testPaths) {
+  DFSTestUtil.waitReplication(cluster.getFileSystem(), path, (short)1);
+}
+
 // Upon restart, there will be two replicas, one with an old genstamp
 // and one current copy. This test wants to ensure that the old 
genstamp
 // copy is the one that is deleted.
@@ -218,7 +228,8 @@ public class TestRBWBlockInvalidation {
 cluster.triggerHeartbeats();
 HATestUtil.waitForDNDeletions(cluster);
 cluster.triggerDeletionReports();
-
+
+waitForNumTotalBlocks(cluster, numFiles);
 // Make sure we can still read the blocks.
 for (Path path : testPaths) {
   String ret = DFSTestUtil.readFile(cluster.getFileSystem(), path);
@@ -232,4 +243,26 @@ public class TestRBWBlockInvalidation {
 }
 
   }
+
+  private void waitForNumTotalBlocks(final MiniDFSCluster cluster,
+  final int numTotalBlocks) throws Exception {
+GenericTestUtils.waitFor(new Supplier() {
+
+  @Override
+  public Boolean get() {
+try {
+  cluster.triggerBlockReports();
+
+  // Wait total blocks
+  if (cluster.getNamesystem().getBlocksTotal() == numTotalBlocks) {
+return true;
+  }
+} catch (Exception ignored) {
+  // Ignore the exception
+}
+
+return false;
+  }
+}, 1000, 6);
+  }
 }


---

[13/50] [abbrv] hadoop git commit: YARN-5375. invoke MockRM#drainEvents implicitly in MockRM methods to reduce test failures. Contributed by sandflee.

2016-11-22 Thread sjlee
YARN-5375. invoke MockRM#drainEvents implicitly in MockRM methods to reduce 
test failures. Contributed by sandflee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6560351
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6560351
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6560351

Branch: refs/heads/HADOOP-13070
Commit: d65603517e52843f11cd9d3b6f6e28fca9336ee3
Parents: 61c0bed
Author: Rohith Sharma K S 
Authored: Wed Nov 16 15:14:00 2016 +0530
Committer: Rohith Sharma K S 
Committed: Wed Nov 16 15:14:00 2016 +0530

--
 .../hadoop/yarn/event/DrainDispatcher.java  | 15 ++-
 .../resourcemanager/recovery/RMStateStore.java  | 24 +++--
 .../yarn/server/resourcemanager/MockRM.java | 97 ++--
 .../resourcemanager/TestApplicationCleanup.java | 15 ---
 .../TestNodeBlacklistingOnAMFailures.java   | 14 ---
 .../yarn/server/resourcemanager/TestRM.java |  8 +-
 .../server/resourcemanager/TestRMRestart.java   |  1 +
 .../TestAMRMRPCNodeUpdates.java | 13 ---
 .../scheduler/TestAbstractYarnScheduler.java|  2 +-
 .../scheduler/fair/TestFairScheduler.java   |  7 ++
 .../webapp/TestRMWebServicesNodes.java  |  1 +
 11 files changed, 134 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6560351/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
index f769492..1369465 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
@@ -17,6 +17,8 @@
 */
 package org.apache.hadoop.yarn.event;
 
+import org.apache.hadoop.conf.Configuration;
+
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
@@ -37,6 +39,13 @@ public class DrainDispatcher extends AsyncDispatcher {
 this.mutex = this;
   }
 
+  @Override
+  public void serviceInit(Configuration conf)
+  throws Exception {
+conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, false);
+super.serviceInit(conf);
+  }
+
   /**
*  Wait till event thread enters WAITING state (i.e. waiting for new 
events).
*/
@@ -50,7 +59,7 @@ public class DrainDispatcher extends AsyncDispatcher {
* Busy loop waiting for all queued events to drain.
*/
   public void await() {
-while (!drained) {
+while (!isDrained()) {
   Thread.yield();
 }
   }
@@ -96,7 +105,9 @@ public class DrainDispatcher extends AsyncDispatcher {
 
   @Override
   protected boolean isDrained() {
-return drained;
+synchronized (mutex) {
+  return drained;
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6560351/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index a6527d8..0fd346f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -671,14 +671,18 @@ public abstract class RMStateStore extends 
AbstractService {
   }
   
   AsyncDispatcher dispatcher;
+  @SuppressWarnings("rawtypes")
+  @VisibleForTesting
+  protected EventHandler rmStateStoreEventHandler;
 
   @Override
   protected void serviceInit(Configuration conf) throws Exception{
 // create async handler
 dispatcher = new AsyncDispatcher();
 dispatcher.init(conf);
+rmStateStoreEventHandler = new ForwardingEventHandler();
 dispatcher.register(RMStateStoreEventType.class, 
-new ForwardingEventHandler());
+rmStateStoreEventHan

[24/50] [abbrv] hadoop git commit: HADOOP-11601. Enhance FS spec & tests to mandate FileStatus.getBlocksize() >0 for non-empty files. Contributed by Steve Loughran

2016-11-22 Thread sjlee
HADOOP-11601. Enhance FS spec & tests to mandate FileStatus.getBlocksize() >0 
for non-empty files. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae8849fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae8849fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae8849fe

Branch: refs/heads/HADOOP-13070
Commit: ae8849fe378e11b9db642ef7784c8e6a08199b96
Parents: bd37355
Author: Mingliang Liu 
Authored: Wed Nov 16 15:04:30 2016 -0800
Committer: Mingliang Liu 
Committed: Thu Nov 17 14:11:38 2016 -0800

--
 .../src/site/markdown/filesystem/filesystem.md  | 17 ++--
 .../fs/contract/AbstractContractCreateTest.java | 96 +++-
 .../hadoop/fs/contract/ContractTestUtils.java   | 31 +++
 3 files changed, 134 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae8849fe/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index 063bd97..b18b5f6 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -78,6 +78,7 @@ Get the status of a path
 if isFile(FS, p) :
 stat.length = len(FS.Files[p])
 stat.isdir = False
+stat.blockSize > 0
 elif isDir(FS, p) :
 stat.length = 0
 stat.isdir = True
@@ -451,13 +452,13 @@ split calculations to divide work optimally across a set 
of worker processes.
 
  Postconditions
 
-result = integer >= 0
+result = integer > 0
 
 Although there is no defined minimum value for this result, as it
 is used to partition work during job submission, a block size
-that is too small will result in either too many jobs being submitted
-for efficient work, or the `JobSubmissionClient` running out of memory.
-
+that is too small will result in badly partitioned workload,
+or even the `JobSubmissionClient` and equivalent
+running out of memory as it calculates the partitions.
 
 Any FileSystem that does not actually break files into blocks SHOULD
 return a number for this that results in efficient processing.
@@ -503,12 +504,12 @@ on the filesystem.
 
  Postconditions
 
-
+if len(FS, P) > 0:  getFileStatus(P).getBlockSize() > 0
 result == getFileStatus(P).getBlockSize()
 
-The outcome of this operation MUST be identical to that contained in
-the `FileStatus` returned from `getFileStatus(P)`.
-
+1. The outcome of this operation MUST be identical to the value of
+   `getFileStatus(P).getBlockSize()`.
+1. By inference, it MUST be > 0 for any file of length > 0.
 
 ## State Changing Operations
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae8849fe/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
index 84dc775..2230fd4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
 import org.junit.Test;
 import org.junit.internal.AssumptionViolatedException;
 
@@ -30,16 +30,22 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
+import static 
org.apache.hadoop.fs.contract.ContractTestUtils.getFileStatusEventually;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.writeTextFile;
 
 /**
- * Test creating files, overwrite options &c
+ * Test creating files, overwrite options etc.
  */
 public abstract class AbstractContractCreateTest extends
  AbstractFSContrac

[40/50] [abbrv] hadoop git commit: HDFS-10966. Enhance Dispatcher logic on deciding when to give up a source DataNode. Contributed by Mark Wagner and Zhe Zhang.

2016-11-22 Thread sjlee
HDFS-10966. Enhance Dispatcher logic on deciding when to give up a source 
DataNode. Contributed by  Mark Wagner and Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49a09179
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49a09179
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49a09179

Branch: refs/heads/HADOOP-13070
Commit: 49a09179e3fadae090126261be0a7fe0aa48798e
Parents: f922067
Author: Kihwal Lee 
Authored: Mon Nov 21 10:13:24 2016 -0600
Committer: Kihwal Lee 
Committed: Mon Nov 21 10:13:24 2016 -0600

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 +++
 .../hadoop/hdfs/server/balancer/Balancer.java   |  5 ++-
 .../hadoop/hdfs/server/balancer/Dispatcher.java | 34 
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  5 ++-
 .../src/main/resources/hdfs-default.xml | 20 
 .../hdfs/server/balancer/TestBalancer.java  |  2 ++
 6 files changed, 55 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49a09179/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b1fb3f4..b9fd939 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -502,6 +502,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_BALANCER_KERBEROS_PRINCIPAL_KEY = 
"dfs.balancer.kerberos.principal";
   public static final String  DFS_BALANCER_BLOCK_MOVE_TIMEOUT = 
"dfs.balancer.block-move.timeout";
   public static final int DFS_BALANCER_BLOCK_MOVE_TIMEOUT_DEFAULT = 0;
+  public static final String  DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY = 
"dfs.balancer.max-no-move-interval";
+  public static final intDFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT = 
60*1000; // One minute
 
 
   public static final String  DFS_MOVER_MOVEDWINWIDTH_KEY = 
"dfs.mover.movedWinWidth";
@@ -519,6 +521,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   "dfs.mover.keytab.file";
   public static final String  DFS_MOVER_KERBEROS_PRINCIPAL_KEY =
   "dfs.mover.kerberos.principal";
+  public static final String  DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY = 
"dfs.mover.max-no-move-interval";
+  public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49a09179/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 583ade3..61352f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -285,13 +285,16 @@ public class Balancer {
 final int blockMoveTimeout = conf.getInt(
 DFSConfigKeys.DFS_BALANCER_BLOCK_MOVE_TIMEOUT,
 DFSConfigKeys.DFS_BALANCER_BLOCK_MOVE_TIMEOUT_DEFAULT);
+final int maxNoMoveInterval = conf.getInt(
+DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY,
+DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_DEFAULT);
 
 this.nnc = theblockpool;
 this.dispatcher =
 new Dispatcher(theblockpool, p.getIncludedNodes(),
 p.getExcludedNodes(), movedWinWidth, moverThreads,
 dispatcherThreads, maxConcurrentMovesPerNode, getBlocksSize,
-getBlocksMinBlockSize, blockMoveTimeout, conf);
+getBlocksMinBlockSize, blockMoveTimeout, maxNoMoveInterval, conf);
 this.threshold = p.getThreshold();
 this.policy = p.getBalancingPolicy();
 this.sourceNodes = p.getSourceNodes();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49a09179/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 

[23/50] [abbrv] hadoop git commit: HADOOP-13742. Expose NumOpenConnectionsPerUser as a metric. Brahma Reddy Battula.

2016-11-22 Thread sjlee
HADOOP-13742. Expose NumOpenConnectionsPerUser as a metric. Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd373555
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd373555
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd373555

Branch: refs/heads/HADOOP-13070
Commit: bd3735554fa5c3bc064c57ec78f4308430b14b48
Parents: b2d4b7b
Author: Kihwal Lee 
Authored: Thu Nov 17 12:16:38 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Nov 17 12:16:38 2016 -0600

--
 .../main/java/org/apache/hadoop/ipc/Server.java | 61 +++-
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   |  5 ++
 .../java/org/apache/hadoop/ipc/TestRPC.java | 29 +-
 .../org/apache/hadoop/test/MetricsAsserts.java  |  7 +++
 4 files changed, 98 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd373555/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 1c7e76a..8f1956e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -122,6 +122,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedOutputStream;
 import com.google.protobuf.Message;
+import org.codehaus.jackson.map.ObjectMapper;
 
 /** An abstract IPC service.  IPC calls take a single {@link Writable} as a
  * parameter, and return a {@link Writable} as their value.  A service runs on
@@ -2151,6 +2152,9 @@ public abstract class Server {
   authorizeConnection();
   // don't set until after authz because connection isn't established
   connectionContextRead = true;
+  if (user != null) {
+connectionManager.incrUserConnections(user.getShortUserName());
+  }
 }
 
 /**
@@ -3019,7 +3023,20 @@ public abstract class Server {
   public int getNumOpenConnections() {
 return connectionManager.size();
   }
-  
+
+  /**
+   * Get the NumOpenConnections/User.
+   */
+  public String getNumOpenConnectionsPerUser() {
+ObjectMapper mapper = new ObjectMapper();
+try {
+  return mapper
+  .writeValueAsString(connectionManager.getUserToConnectionsMap());
+} catch (IOException ignored) {
+}
+return null;
+  }
+
   /**
* The number of rpc calls in the queue.
* @return The number of rpc calls in the queue.
@@ -3139,6 +3156,9 @@ public abstract class Server {
   private class ConnectionManager {
 final private AtomicInteger count = new AtomicInteger();
 final private Set connections;
+/* Map to maintain the statistics per User */
+final private Map userToConnectionsMap;
+final private Object userToConnectionsMapLock = new Object();
 
 final private Timer idleScanTimer;
 final private int idleScanThreshold;
@@ -3170,6 +3190,7 @@ public abstract class Server {
   this.connections = Collections.newSetFromMap(
   new ConcurrentHashMap(
   maxQueueSize, 0.75f, readThreads+2));
+  this.userToConnectionsMap = new ConcurrentHashMap<>();
 }
 
 private boolean add(Connection connection) {
@@ -3187,7 +3208,39 @@ public abstract class Server {
   }
   return removed;
 }
-
+
+void incrUserConnections(String user) {
+  synchronized (userToConnectionsMapLock) {
+Integer count = userToConnectionsMap.get(user);
+if (count == null) {
+  count = 1;
+} else {
+  count++;
+}
+userToConnectionsMap.put(user, count);
+  }
+}
+
+void decrUserConnections(String user) {
+  synchronized (userToConnectionsMapLock) {
+Integer count = userToConnectionsMap.get(user);
+if (count == null) {
+  return;
+} else {
+  count--;
+}
+if (count == 0) {
+  userToConnectionsMap.remove(user);
+} else {
+  userToConnectionsMap.put(user, count);
+}
+  }
+}
+
+Map getUserToConnectionsMap() {
+  return userToConnectionsMap;
+}
+
 int size() {
   return count.get();
 }
@@ -3226,6 +3279,10 @@ public abstract class Server {
 // only close if actually removed to avoid double-closing due
 // to possible races
 connection.close();
+// Remove authorized users only
+if (connection.user != null && connection.connectionContextRead) {
+  decrUserConnections(

[22/50] [abbrv] hadoop git commit: HDFS-11134. Fix bind exception threw in TestRenameWhileOpen. Contributed By Yiqun Lin.

2016-11-22 Thread sjlee
HDFS-11134. Fix bind exception threw in TestRenameWhileOpen. Contributed By 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2d4b7b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2d4b7b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2d4b7b1

Branch: refs/heads/HADOOP-13070
Commit: b2d4b7b1b902aa830d44889d7747a4e1b816ab9d
Parents: aab9737
Author: Brahma Reddy Battula 
Authored: Thu Nov 17 10:22:30 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Nov 17 10:22:30 2016 +0530

--
 .../apache/hadoop/hdfs/TestRenameWhileOpen.java | 45 ++--
 1 file changed, 12 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2d4b7b1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
index 949fc74..827577d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
@@ -72,8 +72,6 @@ public class TestRenameWhileOpen {
   doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean());
   DFSTestUtil.setEditLogForTesting(cluster.getNamesystem(), spyLog);
 
-  final int nnport = cluster.getNameNodePort();
-
   // create file1.
   Path dir1 = new Path("/user/a+b/dir1");
   Path file1 = new Path(dir1, "file1");
@@ -111,22 +109,18 @@ public class TestRenameWhileOpen {
   // for file3, since we just added a block to that file.
   cluster.getNameNode().stop();
 
-  // Restart cluster with the same namenode port as before.
+  // Restart cluster.
   cluster.shutdown();
 
   try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
-  cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
-.format(false)
-.build();
+  cluster = new MiniDFSCluster.Builder(conf).format(false).build();
   cluster.waitActive();
 
   // restart cluster yet again. This triggers the code to read in
   // persistent leases from the edit log.
   cluster.shutdown();
   try {Thread.sleep(5000);} catch (InterruptedException e) {}
-  cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
-.format(false)
-.build();
+  cluster = new MiniDFSCluster.Builder(conf).format(false).build();
   cluster.waitActive();
   fs = cluster.getFileSystem();
 
@@ -161,7 +155,6 @@ public class TestRenameWhileOpen {
 try {
   cluster.waitActive();
   fs = cluster.getFileSystem();
-  final int nnport = cluster.getNameNodePort();
 
   // create file1.
   Path dir1 = new Path("/user/dir1");
@@ -185,22 +178,18 @@ public class TestRenameWhileOpen {
   Path dir3 = new Path("/user/dir3");
   fs.rename(dir1, dir3);
 
-  // restart cluster with the same namenode port as before.
+  // restart cluster.
   // This ensures that leases are persisted in fsimage.
   cluster.shutdown();
   try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
-  cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
-.format(false)
-.build();
+  cluster = new MiniDFSCluster.Builder(conf).format(false).build();
   cluster.waitActive();
 
   // restart cluster yet again. This triggers the code to read in
   // persistent leases from fsimage.
   cluster.shutdown();
   try {Thread.sleep(5000);} catch (InterruptedException e) {}
-  cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
-.format(false)
-.build();
+  cluster = new MiniDFSCluster.Builder(conf).format(false).build();
   cluster.waitActive();
   fs = cluster.getFileSystem();
 
@@ -236,7 +225,6 @@ public class TestRenameWhileOpen {
 try {
   cluster.waitActive();
   fs = cluster.getFileSystem();
-  final int nnport = cluster.getNameNodePort();
 
   // create file1.
   Path dir1 = new Path("/user/dir1");
@@ -252,22 +240,18 @@ public class TestRenameWhileOpen {
 
   fs.rename(file1, dir2);
 
-  // restart cluster with the same namenode port as be

[27/50] [abbrv] hadoop git commit: closes apache/hadoop#116 *Invalid*

2016-11-22 Thread sjlee
closes apache/hadoop#116 *Invalid*


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4f1971f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4f1971f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4f1971f

Branch: refs/heads/HADOOP-13070
Commit: b4f1971ff1dd578353036d7a123fe83c27c1e803
Parents: cdb323f
Author: Mingliang Liu 
Authored: Thu Nov 17 14:58:16 2016 -0800
Committer: Mingliang Liu 
Committed: Thu Nov 17 14:58:16 2016 -0800

--

--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: HDFS-11117. Refactor striped file tests to allow flexibly test erasure coding policy. Contributed by Sammi Chen

2016-11-22 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
index 13dcccf..15a8756 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
@@ -54,13 +55,15 @@ public class TestAddOverReplicatedStripedBlocks {
   private DistributedFileSystem fs;
   private final Path dirPath = new Path("/striped");
   private Path filePath = new Path(dirPath, "file");
-  private final short DATA_BLK_NUM = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private final short PARITY_BLK_NUM = StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  private final short GROUP_SIZE = (short) (DATA_BLK_NUM + PARITY_BLK_NUM);
-  private final int CELLSIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private final int NUM_STRIPE_PER_BLOCK = 4;
-  private final int BLOCK_SIZE = NUM_STRIPE_PER_BLOCK * CELLSIZE;
-  private final int numDNs = GROUP_SIZE + 3;
+  private final ErasureCodingPolicy ecPolicy =
+  ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+  private final short groupSize = (short) (dataBlocks + parityBlocks);
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripesPerBlock = 4;
+  private final int blockSize = stripesPerBlock * cellSize;
+  private final int numDNs = groupSize + 3;
 
   @Rule
   public Timeout globalTimeout = new Timeout(30);
@@ -68,7 +71,7 @@ public class TestAddOverReplicatedStripedBlocks {
   @Before
   public void setup() throws IOException {
 Configuration conf = new Configuration();
-conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 // disable block recovery
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
@@ -92,17 +95,17 @@ public class TestAddOverReplicatedStripedBlocks {
   @Test
   public void testProcessOverReplicatedStripedBlock() throws Exception {
 // create a file which has exact one block group to the first GROUP_SIZE 
DNs
-long fileLen = DATA_BLK_NUM * BLOCK_SIZE;
+long fileLen = dataBlocks * blockSize;
 DFSTestUtil.createStripedFile(cluster, filePath, null, 1,
-NUM_STRIPE_PER_BLOCK, false);
+stripesPerBlock, false);
 LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(
 filePath.toString(), 0, fileLen);
 LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
 long gs = bg.getBlock().getGenerationStamp();
 String bpid = bg.getBlock().getBlockPoolId();
 long groupId = bg.getBlock().getBlockId();
-Block blk = new Block(groupId, BLOCK_SIZE, gs);
-for (int i = 0; i < GROUP_SIZE; i++) {
+Block blk = new Block(groupId, blockSize, gs);
+for (int i = 0; i < groupSize; i++) {
   blk.setBlockId(groupId + i);
   cluster.injectBlocks(i, Arrays.asList(blk), bpid);
 }
@@ -113,7 +116,7 @@ public class TestAddOverReplicatedStripedBlocks {
 cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
 cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
 // let a internal block be over replicated with 1 redundant block.
-blk.setBlockId(groupId + DATA_BLK_NUM);
+blk.setBlockId(groupId + dataBlocks);
 cluster.injectBlocks(numDNs - 1, Arrays.asList(blk), bpid);
 
 // update blocksMap
@@ -128,14 +131,14 @@ public class TestAddOverReplicatedStripedBlocks {
 // verify that all internal blocks exists
 lbs = cluster.getNameNodeRpc().getBlockLocations(
 filePath.toString(), 0, fileLen);
-StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, GROUP_SIZE);
+StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize);
   }
 
   @Test
   public void testProcessOverReplicatedSBSma

[11/50] [abbrv] hadoop git commit: YARN-5875. TestTokenClientRMService#testTokenRenewalWrongUser fails. Contributed by Gergely Novák.

2016-11-22 Thread sjlee
YARN-5875. TestTokenClientRMService#testTokenRenewalWrongUser fails. 
Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f121d0b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f121d0b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f121d0b0

Branch: refs/heads/HADOOP-13070
Commit: f121d0b036fe031dd24f2f549ae5729304bfa59c
Parents: 264ddb1
Author: Xiao Chen 
Authored: Tue Nov 15 13:54:51 2016 -0800
Committer: Xiao Chen 
Committed: Tue Nov 15 13:58:11 2016 -0800

--
 .../yarn/server/resourcemanager/TestTokenClientRMService.java   | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f121d0b0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
index 351f068..2a4e49d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
@@ -123,8 +123,9 @@ public class TestTokenClientRMService {
 return null;
   } catch (YarnException ex) {
 Assert.assertTrue(ex.getMessage().contains(
-owner.getUserName() + " tries to renew a token with renewer "
-+ other.getUserName()));
+owner.getUserName() + " tries to renew a token"));
+Assert.assertTrue(ex.getMessage().contains(
+"with non-matching renewer " + other.getUserName()));
 throw ex;
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: HADOOP-13660. Upgrade commons-configuration version. Contributed by Sean Mackrory.

2016-11-22 Thread sjlee
HADOOP-13660. Upgrade commons-configuration version. Contributed by Sean 
Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0b1a44f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0b1a44f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0b1a44f

Branch: refs/heads/HADOOP-13070
Commit: c0b1a44f6c6e6f9e4ac5cecea0d4a50e237a4c9c
Parents: 09520cb
Author: Wei-Chiu Chuang 
Authored: Thu Nov 17 22:48:35 2016 -0600
Committer: Wei-Chiu Chuang 
Committed: Thu Nov 17 22:48:35 2016 -0600

--
 .../dev-support/findbugsExcludeFile.xml |  6 
 hadoop-common-project/hadoop-common/pom.xml |  9 +++--
 .../apache/hadoop/metrics2/MetricsPlugin.java   |  2 +-
 .../metrics2/filter/AbstractPatternFilter.java  |  2 +-
 .../hadoop/metrics2/impl/MetricsConfig.java | 38 ++--
 .../hadoop/metrics2/impl/MetricsSystemImpl.java |  4 +--
 .../apache/hadoop/metrics2/sink/FileSink.java   |  2 +-
 .../hadoop/metrics2/sink/GraphiteSink.java  |  2 +-
 .../metrics2/sink/RollingFileSystemSink.java|  2 +-
 .../apache/hadoop/metrics2/sink/StatsDSink.java |  2 +-
 .../sink/ganglia/AbstractGangliaSink.java   |  2 +-
 .../metrics2/sink/ganglia/GangliaSink30.java| 26 +++---
 .../metrics2/filter/TestPatternFilter.java  |  2 +-
 .../hadoop/metrics2/impl/ConfigBuilder.java | 11 --
 .../apache/hadoop/metrics2/impl/ConfigUtil.java | 14 
 .../metrics2/impl/TestMetricsCollectorImpl.java |  2 +-
 .../hadoop/metrics2/impl/TestMetricsConfig.java |  2 +-
 .../metrics2/impl/TestMetricsSystemImpl.java|  2 +-
 .../sink/RollingFileSystemSinkTestBase.java |  2 +-
 .../sink/TestRollingFileSystemSink.java |  2 +-
 .../metrics2/sink/ganglia/TestGangliaSink.java  |  2 +-
 .../hadoop/hdfs/PositionStripeReader.java   |  1 -
 .../datanode/TestDataNodeFSDataSetSink.java |  2 +-
 hadoop-project/pom.xml  | 11 --
 hadoop-tools/hadoop-azure/pom.xml   |  6 
 .../fs/azure/AzureBlobStorageTestAccount.java   |  2 +-
 .../apache/hadoop/metrics2/sink/KafkaSink.java  |  2 +-
 .../hadoop/metrics2/impl/TestKafkaMetrics.java  |  2 +-
 28 files changed, 94 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0b1a44f/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index bded4b99..cdd88f3 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -410,4 +410,10 @@
 
 
   
+
+  
+
+
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0b1a44f/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 37f0b70..596f872 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -173,8 +173,13 @@
   compile
 
 
-  commons-configuration
-  commons-configuration
+  commons-beanutils
+  commons-beanutils
+  compile
+
+
+  org.apache.commons
+  commons-configuration2
   compile
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0b1a44f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsPlugin.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsPlugin.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsPlugin.java
index eca0394..ef0257c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsPlugin.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsPlugin.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.metrics2;
 
-import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.configuration2.SubsetConfiguration;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0b1a44f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src

[37/50] [abbrv] hadoop git commit: HDFS-11113. Document dfs.client.read.striped configuration in hdfs-default.xml. Contributed by Rakesh R.

2016-11-22 Thread sjlee
HDFS-3. Document dfs.client.read.striped configuration in hdfs-default.xml. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d232625f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d232625f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d232625f

Branch: refs/heads/HADOOP-13070
Commit: d232625f735e06b89360d8f5847c4331076ac477
Parents: c65d6b6
Author: Akira Ajisaka 
Authored: Mon Nov 21 14:05:15 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Nov 21 14:05:15 2016 +0900

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 9 +
 .../java/org/apache/hadoop/tools/TestHdfsConfigFields.java  | 1 +
 2 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d232625f/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index a605972..5d6376a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -3373,6 +3373,15 @@
 
 
 
+  dfs.client.read.striped.threadpool.size
+  18
+  
+The maximum number of threads used for parallel reading
+in striped layout.
+  
+
+
+
   dfs.client.replica.accessor.builder.classes
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d232625f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index bf29428..3bbb609 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -41,6 +41,7 @@ public class TestHdfsConfigFields extends 
TestConfigurationFieldsBase {
   public void initializeMemberVariables() {
 xmlFilename = new String("hdfs-default.xml");
 configurationClasses = new Class[] { HdfsClientConfigKeys.class,
+HdfsClientConfigKeys.StripedRead.class,
 DFSConfigKeys.class};
 
 // Set error modes


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: YARN-5271. ATS client doesn't work with Jersey 2 on the classpath. Contributed by Weiwei Yang.

2016-11-22 Thread sjlee
YARN-5271. ATS client doesn't work with Jersey 2 on the classpath.  Contributed 
by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09520cb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09520cb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09520cb4

Branch: refs/heads/HADOOP-13070
Commit: 09520cb439f8b002e3f2f3d8f5080ffc34f4bd5c
Parents: 140b993
Author: Wei-Chiu Chuang 
Authored: Thu Nov 17 22:17:23 2016 -0600
Committer: Wei-Chiu Chuang 
Committed: Thu Nov 17 22:17:23 2016 -0600

--
 .../yarn/client/api/impl/YarnClientImpl.java| 23 +++-
 .../yarn/client/api/impl/TestYarnClient.java| 21 ++
 2 files changed, 39 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09520cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 57f50c4..f0fce22 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -167,11 +167,24 @@ public class YarnClientImpl extends YarnClient {
 
 if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,
 YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) {
-  timelineServiceEnabled = true;
-  timelineClient = createTimelineClient();
-  timelineClient.init(conf);
-  timelineDTRenewer = getTimelineDelegationTokenRenewer(conf);
-  timelineService = TimelineUtils.buildTimelineTokenService(conf);
+  try {
+timelineServiceEnabled = true;
+timelineClient = createTimelineClient();
+timelineClient.init(conf);
+timelineDTRenewer = getTimelineDelegationTokenRenewer(conf);
+timelineService = TimelineUtils.buildTimelineTokenService(conf);
+  } catch (NoClassDefFoundError error) {
+// When attempt to initiate the timeline client with
+// different set of dependencies, it may fail with
+// NoClassDefFoundError. When some of them are not compatible
+// with timeline server. This is not necessarily a fatal error
+// to the client.
+LOG.warn("Timeline client could not be initialized "
++ "because dependency missing or incompatible,"
++ " disabling timeline client.",
+error);
+timelineServiceEnabled = false;
+  }
 }
 
 // The AHSClientService is enabled by default when we start the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09520cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 19966ad..e218036 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.client.api.impl;
 
+import static org.junit.Assert.assertFalse;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
@@ -155,6 +156,26 @@ public class TestYarnClient {
 rm.stop();
   }
 
+  @Test
+  public void testTimelineClientInitFailure() throws Exception{
+Configuration conf = new Configuration();
+conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+YarnClient client = YarnClient.createYarnClient();
+if(client instanceof YarnClientImpl) {
+  YarnClientImpl impl = (YarnClientImpl) client;
+  YarnClientImpl spyClient = spy(impl);
+  when(spyClient.createTimelineClient()).thenThrow(
+  new NoClassDefFoundError(
+  "Mock a failure when init timeline instance"));
+  spyClient.init(conf);
+  spyClient.start();
+  assertFalse("Timeline client should be disabled when"
+  + "it is failed to init",
+  

[35/50] [abbrv] hadoop git commit: MAPREDUCE-6801. Fix flaky TestKill.testKillJob (Haibo Chen via Varun Saxena)

2016-11-22 Thread sjlee
MAPREDUCE-6801. Fix flaky TestKill.testKillJob (Haibo Chen via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7584fbf4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7584fbf4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7584fbf4

Branch: refs/heads/HADOOP-13070
Commit: 7584fbf4cbafd34fac4b362cefe4e06cec16a2af
Parents: f6ffa11
Author: Varun Saxena 
Authored: Sat Nov 19 01:37:59 2016 +0530
Committer: Varun Saxena 
Committed: Sat Nov 19 01:37:59 2016 +0530

--
 .../java/org/apache/hadoop/mapreduce/v2/app/TestKill.java   | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7584fbf4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
index aae591e..0714647 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
@@ -22,6 +22,7 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 
+import org.apache.hadoop.service.Service;
 import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
@@ -66,8 +67,8 @@ public class TestKill {
 Job job = app.submit(new Configuration());
 
 //wait and vailidate for Job to become RUNNING
-app.waitForState(job, JobState.RUNNING);
-
+app.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING);
+
 //send the kill signal to Job
 app.getContext().getEventHandler().handle(
 new JobEvent(job.getID(), JobEventType.JOB_KILL));
@@ -77,6 +78,10 @@ public class TestKill {
 
 //wait and validate for Job to be KILLED
 app.waitForState(job, JobState.KILLED);
+// make sure all events are processed. The AM is stopped
+// only when all tasks and task attempts have been killed
+app.waitForState(Service.STATE.STOPPED);
+
 Map tasks = job.getTasks();
 Assert.assertEquals("No of tasks is not correct", 1, 
 tasks.size());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: YARN-4355. NPE while processing localizer heartbeat. Contributed by Varun Saxena & Jonathan Hung.

2016-11-22 Thread sjlee
YARN-4355. NPE while processing localizer heartbeat. Contributed by Varun 
Saxena & Jonathan Hung.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ffb9943
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ffb9943
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ffb9943

Branch: refs/heads/HADOOP-13070
Commit: 7ffb9943b8838a3bb56684e0722db40d800743a2
Parents: 43aef30
Author: Naganarasimha 
Authored: Tue Nov 15 15:41:56 2016 +0530
Committer: Naganarasimha 
Committed: Tue Nov 15 15:41:56 2016 +0530

--
 .../localizer/ResourceLocalizationService.java  |  42 +++
 .../TestResourceLocalizationService.java| 110 ++-
 2 files changed, 132 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ffb9943/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 4cd1acc..71971c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -1036,7 +1036,6 @@ public class ResourceLocalizationService extends 
CompositeService
 List remoteResourceStatuses) {
   LocalizerHeartbeatResponse response =
 recordFactory.newRecordInstance(LocalizerHeartbeatResponse.class);
-
   String user = context.getUser();
   ApplicationId applicationId =
   
context.getContainerId().getApplicationAttemptId().getApplicationId();
@@ -1059,14 +1058,19 @@ public class ResourceLocalizationService extends 
CompositeService
   LOG.error("Unknown resource reported: " + req);
   continue;
 }
+LocalResourcesTracker tracker =
+getLocalResourcesTracker(req.getVisibility(), user, applicationId);
+if (tracker == null) {
+  // This is likely due to a race between heartbeat and
+  // app cleaning up.
+  continue;
+}
 switch (stat.getStatus()) {
   case FETCH_SUCCESS:
 // notify resource
 try {
-getLocalResourcesTracker(req.getVisibility(), user, applicationId)
-  .handle(
-new ResourceLocalizedEvent(req, stat.getLocalPath().toPath(),
-stat.getLocalSize()));
+  tracker.handle(new ResourceLocalizedEvent(req,
+  stat.getLocalPath().toPath(), stat.getLocalSize()));
 } catch (URISyntaxException e) { }
 
 // unlocking the resource and removing it from scheduled resource
@@ -1080,9 +1084,8 @@ public class ResourceLocalizationService extends 
CompositeService
 final String diagnostics = stat.getException().toString();
 LOG.warn(req + " failed: " + diagnostics);
 fetchFailed = true;
-getLocalResourcesTracker(req.getVisibility(), user, applicationId)
-  .handle(new ResourceFailedLocalizationEvent(
-  req, diagnostics));
+tracker.handle(new ResourceFailedLocalizationEvent(req,
+diagnostics));
 
 // unlocking the resource and removing it from scheduled resource
 // list
@@ -1092,9 +1095,8 @@ public class ResourceLocalizationService extends 
CompositeService
   default:
 LOG.info("Unknown status: " + stat.getStatus());
 fetchFailed = true;
-getLocalResourcesTracker(req.getVisibility(), user, applicationId)
-  .handle(new ResourceFailedLocalizationEvent(
-  req, stat.getException().getMessage()));
+tracker.handle(new ResourceFailedLocalizationEvent(req,
+stat.getException().getMessage()));
 break;
 }
   }
@@ -1114,10 +1116,14 @@ public class ResourceLocalizationService extends 
CompositeService
   LocalResource next = findNextResource();
   if (next != null) {
 try {
-  ResourceLoc

[14/50] [abbrv] hadoop git commit: MAPREDUCE-6804. Add timeout when starting JobHistoryServer in MiniMRYarnCluster. Contributed by Andras Bokor

2016-11-22 Thread sjlee
MAPREDUCE-6804. Add timeout when starting JobHistoryServer in 
MiniMRYarnCluster. Contributed by Andras Bokor


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ef290cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ef290cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ef290cf

Branch: refs/heads/HADOOP-13070
Commit: 7ef290cfefdb03f89d08b3131dcd8440ecb37498
Parents: d656035
Author: Jason Lowe 
Authored: Wed Nov 16 15:23:12 2016 +
Committer: Jason Lowe 
Committed: Wed Nov 16 15:23:12 2016 +

--
 .../org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java| 8 +++-
 1 file changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ef290cf/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
index 2d3d6ed..06e37dd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.Service;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.JarFinder;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -255,11 +256,8 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
   };
 }.start();
 
-while (!jhsStarted) {
-  LOG.info("Waiting for HistoryServer to start...");
-  Thread.sleep(1500);
-}
-//TODO Add a timeout. State.STOPPED check ?
+GenericTestUtils.waitFor(() -> jhsStarted, 1500, 60_000);
+
 if (historyServer.getServiceState() != STATE.STARTED) {
   throw new IOException("HistoryServer failed to start");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: YARN-5836. Malicious AM can kill containers of other apps running in any node its containers are running. Contributed by Botong Huang

2016-11-22 Thread sjlee
YARN-5836. Malicious AM can kill containers of other apps running in any node 
its containers are running. Contributed by Botong Huang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59bfcbf3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59bfcbf3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59bfcbf3

Branch: refs/heads/HADOOP-13070
Commit: 59bfcbf3579e45ddf96db3aafccf669c8e03648f
Parents: 6a11877
Author: Jason Lowe 
Authored: Wed Nov 16 22:21:03 2016 +
Committer: Jason Lowe 
Committed: Wed Nov 16 22:21:03 2016 +

--
 .../containermanager/ContainerManagerImpl.java  | 15 ++--
 .../TestContainerManagerWithLCE.java| 11 +++
 .../BaseContainerManagerTest.java   | 10 ++-
 .../containermanager/TestContainerManager.java  | 85 
 4 files changed, 97 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59bfcbf3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index e8de8b7..9546a30 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -1357,18 +1357,21 @@ public class ContainerManagerImpl extends 
CompositeService implements
 if 
((!nmTokenAppId.equals(containerId.getApplicationAttemptId().getApplicationId()))
 || (container != null && !nmTokenAppId.equals(container
 .getContainerId().getApplicationAttemptId().getApplicationId( {
+  String msg;
   if (stopRequest) {
-LOG.warn(identifier.getApplicationAttemptId()
+msg = identifier.getApplicationAttemptId()
 + " attempted to stop non-application container : "
-+ container.getContainerId());
++ containerId;
 NMAuditLogger.logFailure("UnknownUser", AuditConstants.STOP_CONTAINER,
-  "ContainerManagerImpl", "Trying to stop unknown container!",
-  nmTokenAppId, container.getContainerId());
+"ContainerManagerImpl", "Trying to stop unknown container!",
+nmTokenAppId, containerId);
   } else {
-LOG.warn(identifier.getApplicationAttemptId()
+msg = identifier.getApplicationAttemptId()
 + " attempted to get status for non-application container : "
-+ container.getContainerId());
++ containerId;
   }
+  LOG.warn(msg);
+  throw RPCUtil.getRemoteException(msg);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59bfcbf3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
index f72a606..8221827 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
@@ -190,6 +190,17 @@ public class TestContainerManagerWithLCE extends 
TestContainerManager {
   }
 
   @Override
+  public void testUnauthorizedRequests() throws IOException, YarnException {
+// Don't run the test if the binary is not available.
+if (!shouldRunTest()) {
+  LOG.info("LCE binary path is not passed. Not running the test");
+  return;
+}
+LOG.info("Running testUnauthorizedRequests");
+super.testUnauthorizedRequests();
+  }
+
+  @Ove

[15/50] [abbrv] hadoop git commit: MAPREDUCE-6811. TestPipeApplication#testSubmitter fails after HADOOP-13802 (Brahma Reddy Battula via Varun Saxena)

2016-11-22 Thread sjlee
MAPREDUCE-6811. TestPipeApplication#testSubmitter fails after HADOOP-13802 
(Brahma Reddy Battula via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8690a9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8690a9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8690a9d

Branch: refs/heads/HADOOP-13070
Commit: b8690a9d25cb212d53a92bfa1f76a33b8c74413c
Parents: 7ef290c
Author: Varun Saxena 
Authored: Thu Nov 17 00:51:07 2016 +0530
Committer: Varun Saxena 
Committed: Thu Nov 17 00:51:07 2016 +0530

--
 .../mapred/pipes/TestPipeApplication.java   | 35 +---
 1 file changed, 16 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8690a9d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
index 22c5f41..c8b2f3a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
@@ -300,28 +300,25 @@ public class TestPipeApplication {
   assertTrue(out.toString().contains(
   "[-lazyOutput ] // createOutputLazily"));
 
-  assertTrue(out
-  .toString()
-  .contains(
-  "-conf  specify an application 
configuration file"));
   assertTrue(out.toString().contains(
-  "-D use value for given property"));
+  "-conf specify an application "
+  + "configuration file"));
   assertTrue(out.toString().contains(
-  "-fs   specify a namenode"));
+  "-Ddefine a value for a given "
+  + "property"));
+  assertTrue(out.toString()
+  .contains("-fs  specify a namenode"));
+  assertTrue(out.toString().contains(
+  "-jt   specify a ResourceManager"));
+  assertTrue(out.toString().contains(
+  "-files specify a comma-separated list of 
"
+  + "files to be copied to the map reduce cluster"));
+  assertTrue(out.toString().contains(
+  "-libjarsspecify a comma-separated list of 
"
+  + "jar files to be included in the classpath"));
   assertTrue(out.toString().contains(
-  "-jt specify a 
ResourceManager"));
-  assertTrue(out
-  .toString()
-  .contains(
-  "-files specify comma 
separated files to be copied to the map reduce cluster"));
-  assertTrue(out
-  .toString()
-  .contains(
-  "-libjars specify 
comma separated jar files to include in the classpath."));
-  assertTrue(out
-  .toString()
-  .contains(
-  "-archives specify 
comma separated archives to be unarchived on the compute machines."));
+  "-archives   specify a comma-separated list of 
"
+  + "archives to be unarchived on the compute machines"));
 } finally {
   System.setOut(oldps);
   // restore


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: YARN-5870. Expose getApplications API in YarnClient with GetApplicationsRequest parameter. Contributed by Jian He.

2016-11-22 Thread sjlee
YARN-5870. Expose getApplications API in YarnClient with GetApplicationsRequest 
parameter. Contributed by Jian He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce2847e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce2847e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce2847e7

Branch: refs/heads/HADOOP-13070
Commit: ce2847e79441881eeca888b581c7be2cc3ac74f4
Parents: 04a024b
Author: Rohith Sharma K S 
Authored: Thu Nov 17 08:48:35 2016 +0530
Committer: Rohith Sharma K S 
Committed: Thu Nov 17 08:48:35 2016 +0530

--
 .../hadoop/yarn/client/api/YarnClient.java  | 26 
 .../yarn/client/api/impl/YarnClientImpl.java|  7 ++
 2 files changed, 33 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce2847e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
index 4cac2c2..4e0ba2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
@@ -32,6 +32,8 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
@@ -379,6 +381,30 @@ public abstract class YarnClient extends AbstractService {
 
   /**
* 
+   * Get a list of ApplicationReports that match the given
+   * {@link GetApplicationsRequest}.
+   *
+   *
+   * 
+   * If the user does not have VIEW_APP access for an application
+   * then the corresponding report will be filtered as described in
+   * {@link #getApplicationReport(ApplicationId)}.
+   * 
+   *
+   * @param request the request object to get the list of applications.
+   * @return The list of ApplicationReports that match the request
+   * @throws YarnException Exception specific to YARN.
+   * @throws IOException Exception mostly related to connection errors.
+   */
+  public List getApplications(GetApplicationsRequest 
request)
+  throws YarnException, IOException {
+throw new UnsupportedOperationException(
+"The sub-class extending " + YarnClient.class.getName()
++ " is expected to implement this !");
+  }
+
+  /**
+   * 
* Get metrics ({@link YarnClusterMetrics}) about the cluster.
* 
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce2847e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 80e453f..57f50c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -544,6 +544,13 @@ public class YarnClientImpl extends YarnClient {
   }
 
   @Override
+  public List getApplications(
+  GetApplicationsRequest request) throws YarnException, IOException {
+GetApplicationsResponse response = rmClient.getApplications(request);
+return response.getApplicationList();
+  }
+
+  @Override
   public YarnClusterMetrics getYarnClusterMetrics() throws YarnException,
   IOException {
 GetClusterMetricsRequest request =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: HADOOP-13822. Use GlobalStorageStatistics.INSTANCE.reset() at FileSystem#clearStatistics(). Contribute by Brahma Reddy Battula

2016-11-22 Thread sjlee
HADOOP-13822. Use GlobalStorageStatistics.INSTANCE.reset() at 
FileSystem#clearStatistics(). Contribute by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aab9737a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aab9737a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aab9737a

Branch: refs/heads/HADOOP-13070
Commit: aab9737a058c0bdeeb17c173e530b7e571315a56
Parents: ce2847e
Author: Mingliang Liu 
Authored: Wed Nov 16 20:04:57 2016 -0800
Committer: Mingliang Liu 
Committed: Wed Nov 16 20:08:48 2016 -0800

--
 .../src/main/java/org/apache/hadoop/fs/FileSystem.java| 7 +--
 1 file changed, 1 insertion(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aab9737a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 529745c..9e98455 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -3719,12 +3719,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* Reset all statistics for all file systems
*/
   public static synchronized void clearStatistics() {
-final Iterator iterator =
-GlobalStorageStatistics.INSTANCE.iterator();
-while (iterator.hasNext()) {
-  final StorageStatistics statistics = iterator.next();
-  statistics.reset();
-}
+GlobalStorageStatistics.INSTANCE.reset();
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: YARN-5891. yarn rmadmin -help contains a misspelled ResourceManager (Grant Sohn via Varun Saxena)

2016-11-22 Thread sjlee
YARN-5891. yarn rmadmin -help contains a misspelled ResourceManager (Grant Sohn 
via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a11877a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a11877a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a11877a

Branch: refs/heads/HADOOP-13070
Commit: 6a11877ab5b4e81769dc097e91198abd994d103f
Parents: c90891e
Author: Varun Saxena 
Authored: Thu Nov 17 03:26:13 2016 +0530
Committer: Varun Saxena 
Committed: Thu Nov 17 03:26:13 2016 +0530

--
 .../main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a11877a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 82a910b..7779ddf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -124,7 +124,7 @@ public class RMAdminCLI extends HAAdmin {
   "Refresh acls for administration of ResourceManager"))
   .put("-refreshServiceAcl", new UsageInfo("",
   "Reload the service-level authorization policy file. \n\t\t" +
-  "ResoureceManager will reload the authorization policy 
file."))
+  "ResourceManager will reload the authorization policy 
file."))
   .put("-getGroups", new UsageInfo("[username]",
   "Get the groups which given user belongs to."))
   .put("-addToClusterNodeLabels",


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: YARN-5713. Update jackson from 1.9.13 to 2.x in hadoop-yarn.

2016-11-22 Thread sjlee
YARN-5713. Update jackson from 1.9.13 to 2.x in hadoop-yarn.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f807429
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f807429
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f807429

Branch: refs/heads/HADOOP-13070
Commit: 6f8074298d8f33effe08f6be49ecfc89f69feda7
Parents: 683e0c7
Author: Akira Ajisaka 
Authored: Tue Nov 22 10:20:31 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 22 10:20:31 2016 +0900

--
 hadoop-project/pom.xml  | 10 ++
 .../hadoop-yarn/hadoop-yarn-api/pom.xml |  4 +++
 .../records/timelineservice/TimelineEntity.java |  3 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  | 32 ++--
 .../api/impl/FileSystemTimelineWriter.java  | 22 +++---
 .../client/api/impl/TimelineClientImpl.java |  2 +-
 .../yarn/util/timeline/TimelineUtils.java   |  6 ++--
 .../apache/hadoop/yarn/webapp/Controller.java   |  2 +-
 .../webapp/YarnJacksonJaxbJsonProvider.java | 16 +-
 .../hadoop-yarn/hadoop-yarn-registry/pom.xml| 12 
 .../registry/client/binding/JsonSerDeser.java   | 32 +---
 .../hadoop/registry/client/types/Endpoint.java  |  4 +--
 .../client/types/RegistryPathStatus.java|  4 +--
 .../registry/client/types/ServiceRecord.java|  7 ++---
 .../server/timeline/GenericObjectMapper.java|  6 ++--
 .../pom.xml | 12 
 .../timeline/EntityGroupFSTimelineStore.java| 12 +---
 .../hadoop/yarn/server/timeline/LogInfo.java| 12 
 .../server/timeline/PluginStoreTestUtils.java   | 22 +++---
 .../yarn/server/timeline/TestLogInfo.java   | 12 
 .../hadoop-yarn-server-timelineservice/pom.xml  | 10 --
 .../storage/FileSystemTimelineReaderImpl.java   |  6 ++--
 22 files changed, 115 insertions(+), 133 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f807429/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 93a41f2..95fe839 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -866,6 +866,16 @@
 ${jackson2.version}
   
   
+com.fasterxml.jackson.module
+jackson-module-jaxb-annotations
+${jackson2.version}
+  
+  
+com.fasterxml.jackson.jaxrs
+jackson-jaxrs-json-provider
+${jackson2.version}
+  
+  
 org.mockito
 mockito-all
 1.8.5

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f807429/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 71014e4..d36ae58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -81,6 +81,10 @@
   junit
   test
 
+
+  com.fasterxml.jackson.core
+  jackson-annotations
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f807429/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 9c0a983..e43c3ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -32,7 +32,8 @@ import javax.xml.bind.annotation.XmlRootElement;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.yarn.util.TimelineServiceHelper;
-import org.codehaus.jackson.annotate.JsonSetter;
+
+import com.fasterxml.jackson.annotation.JsonSetter;
 
 /**
  * The basic timeline entity data structure for timeline service v2. Timeline

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f807429/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hado

[28/50] [abbrv] hadoop git commit: YARN-3538. TimelineWebService doesn't catch runtime exception. Contributed by Steve Loughran and Tsuyoshi Ozawa.

2016-11-22 Thread sjlee
YARN-3538. TimelineWebService doesn't catch runtime exception. Contributed by 
Steve Loughran and Tsuyoshi Ozawa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f05a9ceb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f05a9ceb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f05a9ceb

Branch: refs/heads/HADOOP-13070
Commit: f05a9ceb4a9623517aa1c8d995805e26ae1bde5a
Parents: b4f1971
Author: Junping Du 
Authored: Thu Nov 17 16:55:39 2016 -0800
Committer: Junping Du 
Committed: Thu Nov 17 16:55:39 2016 -0800

--
 .../hadoop/yarn/server/timeline/webapp/TimelineWebServices.java  | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f05a9ceb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
index d4acbe4..ad4e2bb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
@@ -270,6 +270,10 @@ public class TimelineWebServices {
   // The user doesn't have the access to override the existing domain.
   LOG.error(e.getMessage(), e);
   throw new ForbiddenException(e);
+} catch (RuntimeException e) {
+  LOG.error("Error putting domain", e);
+  throw new WebApplicationException(e,
+  Response.Status.INTERNAL_SERVER_ERROR);
 } catch (IOException e) {
   LOG.error("Error putting domain", e);
   throw new WebApplicationException(e,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: YARN-5865. Retrospect updateApplicationPriority api to handle state store exception in align with YARN-5611. Contributed by Sunil G.

2016-11-22 Thread sjlee
YARN-5865. Retrospect updateApplicationPriority api to handle state store 
exception in align with YARN-5611. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a926f895
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a926f895
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a926f895

Branch: refs/heads/HADOOP-13070
Commit: a926f895c11cd69cf2117c3b970304f3f1f53d92
Parents: 6f80742
Author: Rohith Sharma K S 
Authored: Tue Nov 22 14:49:15 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Nov 22 14:49:15 2016 +0530

--
 .../ApplicationMasterService.java   |  2 +-
 .../server/resourcemanager/ClientRMService.java | 10 ++--
 .../server/resourcemanager/RMAppManager.java| 49 ++--
 .../metrics/TimelineServiceV1Publisher.java |  4 +-
 .../metrics/TimelineServiceV2Publisher.java |  4 +-
 .../server/resourcemanager/rmapp/RMApp.java | 14 ++
 .../server/resourcemanager/rmapp/RMAppImpl.java | 37 ---
 .../scheduler/AbstractYarnScheduler.java|  6 ++-
 .../scheduler/YarnScheduler.java| 11 -
 .../scheduler/capacity/CapacityScheduler.java   | 28 +--
 .../scheduler/event/AppAddedSchedulerEvent.java |  5 +-
 .../resourcemanager/webapp/RMWebServices.java   |  6 +--
 .../resourcemanager/webapp/dao/AppInfo.java |  9 ++--
 .../server/resourcemanager/TestAppManager.java  |  1 +
 .../TestApplicationMasterService.java   |  6 +--
 .../resourcemanager/TestClientRMService.java| 13 --
 .../applicationsmanager/MockAsm.java| 11 +
 .../metrics/TestSystemMetricsPublisher.java |  2 +
 .../TestSystemMetricsPublisherForV2.java|  1 +
 .../server/resourcemanager/rmapp/MockRMApp.java | 10 
 .../rmapp/TestRMAppTransitions.java |  2 +
 .../capacity/TestApplicationPriority.java   | 13 +++---
 22 files changed, 181 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a926f895/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index 4f952b7..3d7b2b1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -616,7 +616,7 @@ public class ApplicationMasterService extends 
AbstractService implements
 
   // Set application priority
   allocateResponse.setApplicationPriority(app
-  .getApplicationSubmissionContext().getPriority());
+  .getApplicationPriority());
 
   // update AMRMToken if the token is rolled-up
   MasterKeyData nextMasterKey =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a926f895/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index c8af526..4e36b6c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1602,14 +1602,14 @@ public class ClientRMService extends AbstractService 
implements
 .newRecordInstance(UpdateApplicationPriorityResponse.class);
 // Update priority only when app is tracked by the scheduler
 if (!ACTIVE_APP_STATES.contains(application.getState())) {
-  if (COMPLETED_APP_STATES.contains(application.getState())) {
+  if (a

[50/50] [abbrv] hadoop git commit: HADOOP-13400. Update the ApplicationClassLoader implementation in line with latest Java ClassLoader implementation. Contributed by Vrushali C.

2016-11-22 Thread sjlee
HADOOP-13400. Update the ApplicationClassLoader implementation in line with 
latest Java ClassLoader implementation. Contributed by Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ceb2cb28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ceb2cb28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ceb2cb28

Branch: refs/heads/HADOOP-13070
Commit: ceb2cb2895a0be504d5eeedcb100991db52a809f
Parents: beb70fe
Author: Sangjin Lee 
Authored: Tue Oct 18 16:44:06 2016 -0700
Committer: Sangjin Lee 
Committed: Tue Nov 22 13:31:53 2016 -0800

--
 .../hadoop/util/ApplicationClassLoader.java | 66 ++--
 1 file changed, 33 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ceb2cb28/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
index 2f46e1f..9b89889 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
@@ -158,49 +158,49 @@ public class ApplicationClassLoader extends 
URLClassLoader {
   }
 
   @Override
-  protected synchronized Class loadClass(String name, boolean resolve)
+  protected Class loadClass(String name, boolean resolve)
   throws ClassNotFoundException {
-
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Loading class: " + name);
-}
+synchronized (getClassLoadingLock(name)) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Loading class: " + name);
+  }
 
-Class c = findLoadedClass(name);
-ClassNotFoundException ex = null;
+  Class c = findLoadedClass(name);
+  ClassNotFoundException ex = null;
+
+  if (c == null && !isSystemClass(name, systemClasses)) {
+// Try to load class from this classloader's URLs. Note that this is
+// like the servlet spec, not the usual Java 2 behaviour where we ask
+// the parent to attempt to load first.
+try {
+  c = findClass(name);
+  if (LOG.isDebugEnabled() && c != null) {
+LOG.debug("Loaded class: " + name + " ");
+  }
+} catch (ClassNotFoundException e) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug(e);
+  }
+  ex = e;
+}
+  }
 
-if (c == null && !isSystemClass(name, systemClasses)) {
-  // Try to load class from this classloader's URLs. Note that this is like
-  // the servlet spec, not the usual Java 2 behaviour where we ask the
-  // parent to attempt to load first.
-  try {
-c = findClass(name);
+  if (c == null) { // try parent
+c = parent.loadClass(name);
 if (LOG.isDebugEnabled() && c != null) {
-  LOG.debug("Loaded class: " + name + " ");
+  LOG.debug("Loaded class from parent: " + name + " ");
 }
-  } catch (ClassNotFoundException e) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug(e);
-}
-ex = e;
   }
-}
 
-if (c == null) { // try parent
-  c = parent.loadClass(name);
-  if (LOG.isDebugEnabled() && c != null) {
-LOG.debug("Loaded class from parent: " + name + " ");
+  if (c == null) {
+throw ex != null ? ex : new ClassNotFoundException(name);
   }
-}
 
-if (c == null) {
-  throw ex != null ? ex : new ClassNotFoundException(name);
-}
-
-if (resolve) {
-  resolveClass(c);
+  if (resolve) {
+resolveClass(c);
+  }
+  return c;
 }
-
-return c;
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: YARN-5859. TestResourceLocalizationService#testParallelDownloadAttemptsForPublicResource sometimes fails. Contributed by Eric Badger

2016-11-22 Thread sjlee
YARN-5859. 
TestResourceLocalizationService#testParallelDownloadAttemptsForPublicResource 
sometimes fails. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/009452bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/009452bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/009452bb

Branch: refs/heads/HADOOP-13070
Commit: 009452bb6dbe5dffb0b304d67a2f360fe0eee1e2
Parents: 3eb7b68
Author: Jason Lowe 
Authored: Mon Nov 21 16:36:08 2016 +
Committer: Jason Lowe 
Committed: Mon Nov 21 16:36:08 2016 +

--
 .../TestResourceLocalizationService.java  | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/009452bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 13ba2c1..42479ea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -1888,7 +1888,7 @@ public class TestResourceLocalizationService {
 createContainerLocalizationEvent(container1,
   LocalResourceVisibility.PRIVATE, req));
   Assert
-.assertTrue(waitForPrivateDownloadToStart(rls, localizerId1, 1, 200));
+.assertTrue(waitForPrivateDownloadToStart(rls, localizerId1, 1, 5000));
 
   // Container - 2 now makes the request.
   ContainerImpl container2 = createMockContainer(user, 2);
@@ -1902,7 +1902,7 @@ public class TestResourceLocalizationService {
 createContainerLocalizationEvent(container2,
   LocalResourceVisibility.PRIVATE, req));
   Assert
-.assertTrue(waitForPrivateDownloadToStart(rls, localizerId2, 1, 200));
+.assertTrue(waitForPrivateDownloadToStart(rls, localizerId2, 1, 5000));
 
   // Retrieving localized resource.
   LocalResourcesTracker tracker =
@@ -1944,7 +1944,7 @@ public class TestResourceLocalizationService {
   Assert
 .assertTrue(waitForResourceState(lr, rls, req,
   LocalResourceVisibility.PRIVATE, user, appId, ResourceState.FAILED,
-  200));
+  5000));
   Assert.assertTrue(lr.getState().equals(ResourceState.FAILED));
   Assert.assertEquals(0, localizerRunner1.scheduled.size());
 
@@ -2060,7 +2060,7 @@ public class TestResourceLocalizationService {
   // Now waiting for resource download to start. Here actual will not start
   // Only the resources will be populated into pending list.
   Assert
-.assertTrue(waitForPrivateDownloadToStart(rls, localizerId1, 2, 500));
+.assertTrue(waitForPrivateDownloadToStart(rls, localizerId1, 2, 5000));
 
   // Validating user and application cache paths
 
@@ -2210,10 +2210,10 @@ public class TestResourceLocalizationService {
   // Waiting for resource to change into DOWNLOADING state.
   Assert.assertTrue(waitForResourceState(null, spyService, req,
 LocalResourceVisibility.PUBLIC, user, null, ResourceState.DOWNLOADING,
-200));
+5000));
 
   // Waiting for download to start.
-  Assert.assertTrue(waitForPublicDownloadToStart(spyService, 1, 200));
+  Assert.assertTrue(waitForPublicDownloadToStart(spyService, 1, 5000));
 
   LocalizedResource lr =
   getLocalizedResource(spyService, req, LocalResourceVisibility.PUBLIC,
@@ -2236,7 +2236,7 @@ public class TestResourceLocalizationService {
 
   // Waiting for download to start. This should return false as new 
download
   // will not start
-  Assert.assertFalse(waitForPublicDownloadToStart(spyService, 2, 100));
+  Assert.assertFalse(waitForPublicDownloadToStart(spyService, 2, 5000));
 
   // Now Failing the resource download. As a part of it
   // resource state is changed and then lock is released.
@@ -2248,7 +2248,7 @@ public class TestResourceLocalizationService {
 
   // 

[49/50] [abbrv] hadoop git commit: HADOOP-13655. document object store use with fs shell and distcp. Contributed by Steve Loughran

2016-11-22 Thread sjlee
HADOOP-13655. document object store use with fs shell and distcp. Contributed 
by Steve Loughran

This closes #131


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/beb70fed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/beb70fed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/beb70fed

Branch: refs/heads/HADOOP-13070
Commit: beb70fed4f15cd4afe8ea23e6068a8344d3557b1
Parents: 83cc726
Author: Mingliang Liu 
Authored: Mon Nov 21 17:49:05 2016 -0800
Committer: Mingliang Liu 
Committed: Tue Nov 22 13:12:23 2016 -0800

--
 .../src/site/markdown/FileSystemShell.md| 324 ++-
 .../site/markdown/filesystem/introduction.md|   4 +-
 .../src/site/markdown/DistCp.md.vm  | 131 +++-
 3 files changed, 431 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/beb70fed/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index c48ff5c..43fc28b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -53,10 +53,14 @@ Returns 0 on success and 1 on error.
 cat
 ---
 
-Usage: `hadoop fs -cat URI [URI ...]`
+Usage: `hadoop fs -cat [-ignoreCrc] URI [URI ...]`
 
 Copies source paths to stdout.
 
+Options
+
+* The `-ignoreCrc` option disables checkshum verification.
+
 Example:
 
 * `hadoop fs -cat hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2`
@@ -116,11 +120,16 @@ copyFromLocal
 
 Usage: `hadoop fs -copyFromLocal  URI`
 
-Similar to put command, except that the source is restricted to a local file 
reference.
+Similar to the `fs -put` command, except that the source is restricted to a 
local file reference.
 
 Options:
 
-* The -f option will overwrite the destination if it already exists.
+* `-p` : Preserves access and modification times, ownership and the 
permissions.
+(assuming the permissions can be propagated across filesystems)
+* `-f` : Overwrites the destination if it already exists.
+* `-l` : Allow DataNode to lazily persist the file to disk, Forces a 
replication
+ factor of 1. This flag will result in reduced durability. Use with care.
+* `-d` : Skip creation of temporary file with the suffix `._COPYING_`.
 
 copyToLocal
 ---
@@ -300,7 +309,7 @@ Returns 0 on success and -1 on error.
 get
 ---
 
-Usage: `hadoop fs -get [-ignorecrc] [-crc]   `
+Usage: `hadoop fs -get [-ignorecrc] [-crc] [-p] [-f]   `
 
 Copy files to the local file system. Files that fail the CRC check may be 
copied with the -ignorecrc option. Files and CRCs may be copied using the -crc 
option.
 
@@ -315,7 +324,11 @@ Returns 0 on success and -1 on error.
 
 Options:
 
-The -f option will overwrite the destination if it already exists.
+* `-p` : Preserves access and modification times, ownership and the 
permissions.
+(assuming the permissions can be propagated across filesystems)
+* `-f` : Overwrites the destination if it already exists.
+* `-ignorecrc` : Skip CRC checks on the file(s) downloaded.
+* `-crc`: write CRC checksums for the files downloaded.
 
 getfacl
 ---
@@ -483,13 +496,28 @@ Returns 0 on success and -1 on error.
 put
 ---
 
-Usage: `hadoop fs -put  ...  `
+Usage: `hadoop fs -put  [-f] [-p] [-l] [-d] [ - |   .. ]. `
+
+Copy single src, or multiple srcs from local file system to the destination 
file system.
+Also reads input from stdin and writes to destination file system if the 
source is set to "-"
+
+Copying fails if the file already exists, unless the -f flag is given.
+
+Options:
 
-Copy single src, or multiple srcs from local file system to the destination 
file system. Also reads input from stdin and writes to destination file system.
+* `-p` : Preserves access and modification times, ownership and the 
permissions.
+(assuming the permissions can be propagated across filesystems)
+* `-f` : Overwrites the destination if it already exists.
+* `-l` : Allow DataNode to lazily persist the file to disk, Forces a 
replication
+ factor of 1. This flag will result in reduced durability. Use with care.
+* `-d` : Skip creation of temporary file with the suffix `._COPYING_`.
+
+
+Examples:
 
 * `hadoop fs -put localfile /user/hadoop/hadoopfile`
-* `hadoop fs -put localfile1 localfile2 /user/hadoop/hadoopdir`
-* `hadoop fs -put localfile hdfs://nn.example.com/hadoop/hadoopfile`
+* `hadoop fs -put -f localfile1 localfile2 /user/hadoop/hadoopdir`
+* `hadoop fs -put -d localfile hdfs://nn.example.com/hadoop/hadoopfile`
 * `hadoop fs -put - hdfs://nn.example.com/hado

[34/50] [abbrv] hadoop git commit: HDFS-11117. Refactor striped file tests to allow flexibly test erasure coding policy. Contributed by Sammi Chen

2016-11-22 Thread sjlee
HDFS-7. Refactor striped file tests to allow flexibly test erasure coding 
policy. Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6ffa116
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6ffa116
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6ffa116

Branch: refs/heads/HADOOP-13070
Commit: f6ffa11635c47030a91d420da942da1fb425eb49
Parents: c0b1a44
Author: Kai Zheng 
Authored: Fri Nov 18 16:08:59 2016 +0600
Committer: Kai Zheng 
Committed: Fri Nov 18 16:08:59 2016 +0600

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   9 +-
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 116 -
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 180 ++---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  12 +-
 .../TestDFSStripedOutputStreamWithFailure.java  | 255 ---
 ...estDFSStripedOutputStreamWithFailure000.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure010.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure020.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure030.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure040.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure050.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure060.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure070.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure080.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure090.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure100.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure110.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure120.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure130.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure140.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure150.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure160.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure170.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure180.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure190.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure200.java |   8 +-
 ...estDFSStripedOutputStreamWithFailure210.java |   9 +-
 .../hdfs/TestDecommissionWithStriped.java   |  46 ++--
 .../TestErasureCodingPolicyWithSnapshot.java|   9 +-
 .../apache/hadoop/hdfs/TestFileChecksum.java|  11 +-
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   |  72 +++---
 .../hdfs/TestReadStripedFileWithDecoding.java   |  29 ++-
 .../TestReadStripedFileWithMissingBlocks.java   |  29 ++-
 .../hadoop/hdfs/TestReconstructStripedFile.java |  15 +-
 .../hdfs/TestSafeModeWithStripedFile.java   |  20 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |  24 +-
 .../hdfs/TestWriteStripedFileWithFailure.java   |  16 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|   3 +-
 .../hdfs/server/balancer/TestBalancer.java  |  23 +-
 .../blockmanagement/TestBlockInfoStriped.java   |  78 +++---
 .../TestBlockTokenWithDFSStriped.java   |  16 +-
 ...constructStripedBlocksWithRackAwareness.java |  58 +++--
 .../TestSequentialBlockGroupId.java |  11 +-
 .../TestSortLocatedStripedBlock.java|  45 ++--
 .../hdfs/server/datanode/TestBlockRecovery.java |  57 ++---
 .../TestDataNodeErasureCodingMetrics.java   |  29 ++-
 .../hadoop/hdfs/server/mover/TestMover.java |  22 +-
 .../TestAddOverReplicatedStripedBlocks.java |  71 +++---
 .../namenode/TestAddStripedBlockInFBR.java  |  22 +-
 .../server/namenode/TestAddStripedBlocks.java   |  72 +++---
 .../server/namenode/TestFSEditLogLoader.java|  13 +-
 .../namenode/TestQuotaWithStripedBlocks.java|  20 +-
 .../namenode/TestReconstructStripedBlocks.java  |  53 ++--
 .../server/namenode/TestStripedINodeFile.java   |   4 +-
 ...TestOfflineImageViewerWithStripedBlocks.java |  27 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  | 138 +-
 56 files changed, 972 insertions(+), 810 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 945d2c8..13e2656 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -111,6 +111,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeIn

[39/50] [abbrv] hadoop git commit: HDFS-11101. TestDFSShell#testMoveWithTargetPortEmpty fails intermittently. Contributed By Brahma Reddy Battula.

2016-11-22 Thread sjlee
HDFS-11101. TestDFSShell#testMoveWithTargetPortEmpty fails intermittently. 
Contributed By Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9220677
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9220677
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9220677

Branch: refs/heads/HADOOP-13070
Commit: f92206774899b84e8abaecdf97399b65e0995ed8
Parents: c68dad1
Author: Brahma Reddy Battula 
Authored: Mon Nov 21 19:34:52 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Mon Nov 21 19:34:52 2016 +0530

--
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9220677/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index cf193c9..3f6b268 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -776,7 +776,7 @@ public class TestDFSShell {
   .format(true)
   .numDataNodes(2)
   .nameNodePort(ServerSocketUtil.waitForPort(
-  HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT, 10))
+  HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT, 60))
   .waitSafeMode(true)
   .build();
   FileSystem srcFs = cluster.getFileSystem();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: HADOOP-13814. Sample configuration of KMS HTTP Authentication signature is misleading. Contributed by Masahiro Tanaka.

2016-11-22 Thread sjlee
HADOOP-13814. Sample configuration of KMS HTTP Authentication signature is 
misleading. Contributed by Masahiro Tanaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c65d6b65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c65d6b65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c65d6b65

Branch: refs/heads/HADOOP-13070
Commit: c65d6b65415742288b53f8e38314b71794e47ecc
Parents: 7584fbf
Author: Akira Ajisaka 
Authored: Mon Nov 21 11:25:11 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Nov 21 11:25:11 2016 +0900

--
 hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml| 4 ++--
 hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c65d6b65/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml
--
diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml 
b/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml
index 6c8becf..d188735 100644
--- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml
+++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml
@@ -147,9 +147,9 @@
 
   
 
hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type
-kerberos
+none
 
-  The Zookeeper authentication type, 'none' or 'sasl' (Kerberos).
+  The Zookeeper authentication type, 'none' (default) or 'sasl' (Kerberos).
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c65d6b65/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
--
diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm 
b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
index 69eb1dd..39aab82 100644
--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -737,9 +737,9 @@ This secret sharing can be done using a Zookeeper service 
which is configured in
   
   
 
hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type
-kerberos
+sasl
 
-  The Zookeeper authentication type, 'none' or 'sasl' (Kerberos).
+  The Zookeeper authentication type, 'none' (default) or 'sasl' (Kerberos).
 
   
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: HADOOP-12705 Upgrade Jackson 2.2.3 to 2.7.8

2016-11-22 Thread sjlee
HADOOP-12705 Upgrade Jackson 2.2.3 to 2.7.8


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3eb7b686
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3eb7b686
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3eb7b686

Branch: refs/heads/HADOOP-13070
Commit: 3eb7b686879d26fa2505b23e5e80b2f2a0ac436f
Parents: 49a0917
Author: Steve Loughran 
Authored: Mon Nov 21 16:15:06 2016 +
Committer: Steve Loughran 
Committed: Mon Nov 21 16:18:17 2016 +

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3eb7b686/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index caf6e6f..93a41f2 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -72,7 +72,7 @@
 
 
 1.9.13
-2.2.3
+2.7.8
 
 
 1.0


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HADOOP-13646. Remove outdated overview.html. Contributed By Brahma Reddy Battula.

2016-11-22 Thread sjlee
HADOOP-13646. Remove outdated overview.html. Contributed By Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afcf8d38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afcf8d38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afcf8d38

Branch: refs/heads/HADOOP-13070
Commit: afcf8d38e750f935c06629e641a1321b79c4cace
Parents: a926f89
Author: Brahma Reddy Battula 
Authored: Tue Nov 22 19:45:45 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 22 19:45:45 2016 +0530

--
 .../hadoop-common/src/main/java/overview.html   | 274 ---
 .../hadoop-hdfs/src/main/java/overview.html | 274 ---
 pom.xml |  25 --
 3 files changed, 573 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afcf8d38/hadoop-common-project/hadoop-common/src/main/java/overview.html
--
diff --git a/hadoop-common-project/hadoop-common/src/main/java/overview.html 
b/hadoop-common-project/hadoop-common/src/main/java/overview.html
deleted file mode 100644
index 2c64121..000
--- a/hadoop-common-project/hadoop-common/src/main/java/overview.html
+++ /dev/null
@@ -1,274 +0,0 @@
-
-
-
-
-   Hadoop
-
-
-
-Hadoop is a distributed computing platform.
-
-Hadoop primarily consists of the http://hadoop.apache.org/hdfs/";>Hadoop Distributed FileSystem
-(HDFS) and an
-implementation of the http://hadoop.apache.org/mapreduce/";>
-Map-Reduce programming paradigm.
-
-
-Hadoop is a software framework that lets one easily write and run 
applications
-that process vast amounts of data. Here's what makes Hadoop especially 
useful:
-
-  
-Scalable: Hadoop can reliably store and process petabytes.
-  
-  
-Economical: It distributes the data and processing across clusters
-of commonly available computers. These clusters can number into the 
thousands
-of nodes.
-  
-  
-Efficient: By distributing the data, Hadoop can process it in 
parallel
-on the nodes where the data is located. This makes it extremely rapid.
-  
-  
-Reliable: Hadoop automatically maintains multiple copies of data and
-automatically redeploys computing tasks based on failures.
-  
-
-
-Requirements
-
-Platforms
-
-
-  
-Hadoop has been demonstrated on GNU/Linux clusters with more than 4000 
nodes.
-  
-  
-Windows is also a supported platform.
-  
-
-
-Requisite Software
-
-
-  
-Java 1.6.x, preferably from
-http://java.sun.com/javase/downloads/";>Sun.
-Set JAVA_HOME to the root of your Java installation.
-  
-  
-ssh must be installed and sshd must be running to use Hadoop's
-scripts to manage remote Hadoop daemons.
-  
-  
-rsync may be installed to use Hadoop's scripts to manage remote
-Hadoop installations.
-  
-
-
-Installing Required Software
-
-If your platform does not have the required software listed above, you
-will have to install it.
-
-For example on Ubuntu Linux:
-
-$ sudo apt-get install ssh
-$ sudo apt-get install rsync
-
-
-Getting Started
-
-First, you need to get a copy of the Hadoop code.
-
-Edit the file conf/hadoop-env.sh to define at least
-JAVA_HOME.
-
-Try the following command:
-bin/hadoop
-This will display the documentation for the Hadoop command script.
-
-Standalone operation
-
-By default, Hadoop is configured to run things in a non-distributed
-mode, as a single Java process.  This is useful for debugging, and can
-be demonstrated as follows:
-
-mkdir input
-cp conf/*.xml input
-bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'
-cat output/*
-
-This will display counts for each match of the http://java.sun.com/j2se/1.4.2/docs/api/java/util/regex/Pattern.html";>
-regular expression.
-
-Note that input is specified as a directory containing input
-files and that output is also specified as a directory where parts are
-written.
-
-Distributed operation
-
-To configure Hadoop for distributed operation you must specify the
-following:
-
-
-
-The NameNode (Distributed Filesystem master) host.  This is
-specified with the configuration property fs.default.name.
-
-
-The org.apache.hadoop.mapred.JobTracker (MapReduce master)
-host and port.  This is specified with the configuration property
-mapred.job.tracker.
-
-
-A workers file that lists the names of all the hosts in
-the cluster.  The default workers file is conf/workers.
-
-
-
-Pseudo-distributed configuration
-
-You can in fact run everything on a single host.  To run things this
-way, put the following in:
-
-
-conf/core-site.xml:
-
-
-  
-fs.default.name
-hdfs://localhost/
-  
-
-
-
-conf/hdfs-site.xml:
-
-
-  
-dfs.replication
-1
-  
-
-
-
-conf/mapred-site.xml:
-
-
-  
-mapred.job.tracker
-loca

[48/50] [abbrv] hadoop git commit: YARN-5722. FairScheduler hides group resolution exceptions when assigning queue (Contributed by Wilfred Spiegelenburg via Daniel Templeton)

2016-11-22 Thread sjlee
YARN-5722. FairScheduler hides group resolution exceptions when assigning queue 
(Contributed by Wilfred Spiegelenburg via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83cc7263
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83cc7263
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83cc7263

Branch: refs/heads/HADOOP-13070
Commit: 83cc7263af632939dc3b2ee58d8f03f98ed4d96a
Parents: 613b902
Author: Daniel Templeton 
Authored: Tue Nov 22 11:16:00 2016 -0800
Committer: Daniel Templeton 
Committed: Tue Nov 22 11:16:00 2016 -0800

--
 .../yarn/server/resourcemanager/scheduler/fair/FairScheduler.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83cc7263/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 1d04710..354f936 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -767,7 +767,8 @@ public class FairScheduler extends
 } catch (InvalidQueueNameException qne) {
   appRejectMsg = qne.getMessage();
 } catch (IOException ioe) {
-  appRejectMsg = "Error assigning app to queue " + queueName;
+  // IOException should only happen for a user without groups
+  appRejectMsg = "Error assigning app to a queue: " + ioe.getMessage();
 }
 
 if (appRejectMsg != null && rmApp != null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HDFS-11117. Refactor striped file tests to allow flexibly test erasure coding policy. Contributed by Sammi Chen

2016-11-22 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
index b532443..1574a03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static 
org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
@@ -39,23 +36,18 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -90,10 +82,14 @@ public class TestDecommissionWithStriped {
   private Configuration conf;
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
+  private final ErasureCodingPolicy ecPolicy =
+  ErasureCodingPolicyManager.getSystemDefaultPolicy();
   private int numDNs;
-  private final int blockSize = StripedFileTestUtil.blockSize;
-  private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int dataBlocks = ecPolicy.getNumDataUnits();
+  private final int parityBlocks = ecPolicy.getNumParityUnits();
+  private final int blockSize = cellSize * 4;
+  private final int blockGroupSize = blockSize * dataBlocks;
   private final Path ecDir = new Path("/" + this.getClass().getSimpleName());
 
   private FSNamesystem fsn;
@@ -132,12 +128,12 @@ public class TestDecommissionWithStriped {
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 conf.setInt(
 DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
-StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE - 1);
+cellSize - 1);
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
 false);
 
-numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
+numDNs = dataBlocks + parityBlocks + 2;
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
 cluster.waitActive();
 dfs = cluster.getFileSystem(0);
@@ -194,7 +190,7 @@ public class TestDecommissionWithStriped {
 LOG.info("Starting test testDecommissionWithURBlocksForSameBlockGroup");
 
 final Path ecFile = new Path(ecDir, "testDecommissionWithCorruptBlocks");
-int writeBytes = BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2;
+int writeBytes = cellSize * dataBlocks * 2;
 writeStripedFile(dfs, ecFile, writeBytes);
 Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
 
@@ -202,8 +198,8 @@ public class TestDecommissionWithStriped {
 LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0)
 .get(0);
 DatanodeInfo[] dnLocs = lb.getLocations();
-assertEquals(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS, dnLocs.length);
-int decommNodeIndex = NUM_DATA_B

[47/50] [abbrv] hadoop git commit: HDFS-11148. Update DataNode to use StorageLocationChecker at startup.

2016-11-22 Thread sjlee
HDFS-11148. Update DataNode to use StorageLocationChecker at startup.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/613b902b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/613b902b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/613b902b

Branch: refs/heads/HADOOP-13070
Commit: 613b902b9808c1c679674047cff15feade01dfab
Parents: afcf8d3
Author: Arpit Agarwal 
Authored: Tue Nov 22 10:50:25 2016 -0800
Committer: Arpit Agarwal 
Committed: Tue Nov 22 10:50:25 2016 -0800

--
 .../hadoop/hdfs/server/datanode/DataNode.java   | 79 ++--
 .../checker/StorageLocationChecker.java | 42 +--
 .../datanode/InternalDataNodeTestUtils.java |  2 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |  2 +-
 .../hdfs/server/datanode/TestDataDirs.java  | 27 ---
 .../hdfs/server/datanode/TestDataNodeUUID.java  |  2 +-
 .../datanode/TestDataNodeVolumeFailure.java | 31 
 .../TestDatanodeProtocolRetryPolicy.java|  2 +-
 .../checker/TestStorageLocationChecker.java | 12 ++-
 9 files changed, 79 insertions(+), 120 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/613b902b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 78a2044..22a70fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.hdfs.server.datanode;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
@@ -67,7 +65,6 @@ import java.io.PrintStream;
 import java.lang.management.ManagementFactory;
 import java.net.InetSocketAddress;
 import java.net.Socket;
-import java.net.URI;
 import java.net.UnknownHostException;
 import java.nio.channels.ServerSocketChannel;
 import java.security.PrivilegedExceptionAction;
@@ -92,6 +89,7 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import javax.annotation.Nullable;
 import javax.management.ObjectName;
 import javax.net.SocketFactory;
 
@@ -103,16 +101,14 @@ import org.apache.hadoop.conf.ReconfigurableBase;
 import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.conf.ReconfigurationTaskStatus;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -203,7 +199,6 @@ import 
org.apache.hadoop.tracing.TraceAdminProtocolServerSideTranslatorPB;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.tracing.TracerConfigurationManager;
 import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.InvalidChecksumSizeException;
@@ -211,6 +206,7 @@ import org.apache.hadoop.util.JvmPauseMonitor;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.Timer;
 import org.apac

hadoop git commit: Set the release date for 2.6.5 and moved the CHANGES.txt fixes from branch-2.6.5.

2016-10-10 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 3a75b446e -> 82f5d6350


Set the release date for 2.6.5 and moved the CHANGES.txt fixes from 
branch-2.6.5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82f5d635
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82f5d635
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82f5d635

Branch: refs/heads/branch-2.6
Commit: 82f5d6350f4acf4287af9dbd7a2f23cbcd441768
Parents: 3a75b44
Author: Sangjin Lee 
Authored: Mon Oct 10 18:33:35 2016 -0700
Committer: Sangjin Lee 
Committed: Mon Oct 10 18:33:35 2016 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 27 +++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 27 +---
 hadoop-mapreduce-project/CHANGES.txt|  6 -
 hadoop-yarn-project/CHANGES.txt |  9 +--
 4 files changed, 55 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82f5d635/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7ca53c3..fa4ac16 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop Change Log
 
-Release 2.6.5 - UNRELEASED
+Release 2.6.5 - 2016-10-08
 
   INCOMPATIBLE CHANGES
 
@@ -17,6 +17,9 @@ Release 2.6.5 - UNRELEASED
 HADOOP-12789. log classpath of ApplicationClassLoader at INFO level
 (Sangjin Lee via mingma)
 
+HADOOP-13290. Appropriate use of generics in FairCallQueue. (Jonathan Hung
+via zhz)
+
 HADOOP-13298. Fix the leftover L&N files in
 hadoop-build-tools/src/main/resources/META-INF/. (ozawa)
 
@@ -36,18 +39,40 @@ Release 2.6.5 - UNRELEASED
 HADOOP-12589. Fix intermittent test failure of TestCopyPreserveFlag
 (iwasakims)
 
+HADOOP-12559. KMS connection failures should trigger TGT renewal. (Zhe
+Zhang via xiaoyuyao)
+
+HADOOP-12682. Fix TestKMS#testKMSRestart* failure. (Wei-Chiu Chuang via
+xiaoyuyao)
+
 HADOOP-12958. PhantomReference for filesystem statistics can trigger OOM
 (Sangjin Lee via jlowe)
 
 HADOOP-7817. RawLocalFileSystem.append() should give FSDataOutputStream
 with accurate .getPos() (kanaka kumar avvaru via vinayakumarb)
 
+HADOOP-13042. Restore lost leveldbjni LICENSE and NOTICE changes. (wang)
+
+HADOOP-13043. Add LICENSE.txt entries for bundled javascript dependencies.
+(wang)
+
 HADOOP-12893. Verify LICENSE.txt and NOTICE.txt.
 (Xiao Chen and Andrew Wang via aajisaka)
 
+HADOOP-13189. FairCallQueue makes callQueue larger than the configured
+capacity. (Vinitha Gankidi via shvachko)
+
+HADOOP-13255. KMSClientProvider should check and renew tgt when doing
+delegation token operations. (Xiao Chen via xiaoyuyao)
+
+HADOOP-13350. Additional fix to LICENSE and NOTICE. (Xiao Chen via wang)
+
 HADOOP-13297. Add missing dependency in setting
 maven-remote-resource-plugin to fix builds. (Sean Busbey via aajisaka)
 
+HADOOP-13494. ReconfigurableBase can log sensitive information. (Sean
+Mackrory via wang)
+
 HADOOP-13579. Fix source-level compatibility after HADOOP-11252.
 (Tsuyoshi Ozawa via aajisaka)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82f5d635/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e18b40c..b8ac777 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop HDFS Change Log
 
-Release 2.6.5 - UNRELEASED
+Release 2.6.5 - 2016-10-08
 
   INCOMPATIBLE CHANGES
 
@@ -14,9 +14,17 @@ Release 2.6.5 - UNRELEASED
 HDFS-9688. Test the effect of nested encryption zones in HDFS downgrade.
 (zhz)
 
+HDFS-10264. Logging improvements in FSImageFormatProtobuf.Saver. (Xiaobing
+Zhou via shvachko)
+
+HDFS-2580. NameNode#main(...) can make use of GenericOptionsParser. (harsh)
+
 HDFS-7258. CacheReplicationMonitor rescan schedule log should use DEBUG
 level instead of INFO level. (Xiaoyu Yao via wheat9)
 
+HDFS-10377. CacheReplicationMonitor shutdown log message should use INFO
+level (Yiqun Lin via cmccabe)
+
 HDFS-9669. TcpPeerServer should respect ipc.server.listen.queue.size
 (Elliot Clark via cmccabe)
 
@@ -25,6 +33,9 @@ Release 2.6.5 - UNRELEASED
 
   OPTIMIZATIONS
 
+HDFS-10653. Optimize conversion from path string to components. (Daryn
+Sharp via zhz)
+
 HDFS-8845. DiskChecker

[hadoop] Git Push Summary

2016-10-10 Thread sjlee
Repository: hadoop
Updated Tags:  refs/tags/rel/release-2.6.5 [created] c8f337a74

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r16465 - in /release/hadoop/common/hadoop-2.6.5: ./ hadoop-2.6.5-src.tar.gz hadoop-2.6.5-src.tar.gz.asc hadoop-2.6.5-src.tar.gz.md5 hadoop-2.6.5.tar.gz hadoop-2.6.5.tar.gz.asc hadoop-2.6.5

2016-10-10 Thread sjlee
Author: sjlee
Date: Tue Oct 11 02:31:46 2016
New Revision: 16465

Log:
Publishing the bits for release 2.6.5

Added:
release/hadoop/common/hadoop-2.6.5/
release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz   (with props)
release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.asc
release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.md5
release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz   (with props)
release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.asc
release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.md5

Added: release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.asc
==
--- release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.asc (added)
+++ release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.asc Tue Oct 11 
02:31:46 2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Comment: GPGTools - https://gpgtools.org
+
+iQIcBAABCgAGBQJX8Z7SAAoJEKXO4gqQNI1HOjgP/3/Qdd5NPjJafyyh9xLi9BVX
+HWcL5Bjvw1v0NKMufaVKmJBiCCVA1GPDenJp5p52pbtXtF5djFEXrU2NdyZXv4oC
+0R9c/nILkKkK+jubjxHQwqsYrpPs3jNqgLDtTAI63DT/SBwePXE0jKKBSIIJOGJs
+Sxm1rHKTCnVeeIAzTIFc8RO7hEwuF9VD5cFdI30j1rxY5M4/lmXlnOwTg/OTA3Wo
+eOGY+6ttHF91pn3lVEBUONiwD7F4jgzojaiOjPw2LA97IHjKsKgYgw0aLz9xleb5
+ArlAzVjYzyAV2z7W/6XGdo8TBiBOhUIf4MhStbwL6EpELAusgaekdiQHck3J0Oaq
+rI1Jy1uQ2YDVjsQgUzF5Kyb5HvZdB+cbtClmU/L9+8PvOxdhnpw5GoD0Q8oRTxIp
+z+6dDoxn7fByT3H0ODTibURemsLumbzPwwm9rUg0Ah8XCXAqXl6jybqj30RVI2Bl
+pxIjupOHv8afyHeOG2MDEib4EXx3yc30mA5j/vhuwh1rDYbG3lEz12AZEcx6WWSx
+gtE8FQoRC1jswMu6O2975jOSV+u10xnn9b2AigCoJY9Cy4WyFZKsNcA3dEkomEWj
+DX35805hVhd8yYTfWUq/amamqX/WR+e2BJzLHT6iT3qs9rZIUbPFmg21sivfgtTL
+BKGv6N+LAO5bESUgFkNO
+=t5H4
+-END PGP SIGNATURE-

Added: release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.md5
==
--- release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.md5 (added)
+++ release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.md5 Tue Oct 11 
02:31:46 2016
@@ -0,0 +1,2 @@
+$ md5sum target/artifacts/hadoop-2.6.5-RC1-src.tar.gz
+13cdf917ebc32d6aea37478b1e3f7d91  target/artifacts/hadoop-2.6.5-RC1-src.tar.gz

Added: release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz
--
svn:mime-type = application/octet-stream

Added: release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.asc
==
--- release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.asc (added)
+++ release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.asc Tue Oct 11 
02:31:46 2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Comment: GPGTools - https://gpgtools.org
+
+iQIcBAABCgAGBQJX8Z7fAAoJEKXO4gqQNI1HETYP/0qAThu8q5Ce46tBABiiqsNK
+bo9fVi1vJxb/fojLepJjf1Km7l99cJSarOdOq1U4GEFr672vQLYH+fup905uRn3Y
+qcTirdS+BWne8/lfzu0ZrMXFV3Ai3JxmjI3SUwKzHIJi0O/opZHrb43sFSdLa8J+
+AD6saf1vCg2YCU/ADH9jTedSWhzAKypyk4CHejFS5ZfPBuZGO+LRe3JvOWfebvaT
+2DyFCwGZXjFSeVqS4fBYke9Sp0h49nBXl1tPmaSx2YuDPtf9NfTntOxjEzTBkdN4
+YQQfBwKbmQRVxaBhaxAEVUhSp73rx8LF2b9tTcsHQYxKA6k3EYOZf15t+ATnoXCk
+BFUOYirvsAeobaYeKHcD0X+oCHxyIQ+w67xG9aUv3/tWB+Q6+oKpUUhuMm0yM2Yu
+0GZTny5rPKWo+SfryYRDmeDBZS4vbeLG4CjsUnHfKJSRp/uKRna2YwgKUMbkyFSr
+YW7LM+a44guqoYcpJYhndCmzjUeBPz/N/5plUs20zpIMqQ3t9bwEFQK5mDJW3aFD
+fanpnWgxMeiPRUYqv1D4yILKkrS/sZvXz8WnodhdVjdrRWcOxZ19Bfhk9hclle2E
+1ftYNY6GmsFqqmEpV6EGHoE6TQ2tXOY8fDlSlzjMBPYjUk21/2+MDULkza4ixDv5
++FoTCAwooS4MT359yRBv
+=yWnq
+-END PGP SIGNATURE-

Added: release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.md5
==
--- release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.md5 (added)
+++ release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.md5 Tue Oct 11 
02:31:46 2016
@@ -0,0 +1,2 @@
+$ md5sum target/artifacts/hadoop-2.6.5-RC1.tar.gz
+967c24f3c15fcdd058f34923e92ce8ac  target/artifacts/hadoop-2.6.5-RC1.tar.gz



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r16466 - in /release/hadoop/common/hadoop-2.6.5: hadoop-2.6.5-src.tar.gz.md5 hadoop-2.6.5-src.tar.gz.mds hadoop-2.6.5.tar.gz.md5 hadoop-2.6.5.tar.gz.mds

2016-10-10 Thread sjlee
Author: sjlee
Date: Tue Oct 11 03:24:34 2016
New Revision: 16466

Log:
Replaced md5-only signatures with mds files for 2.6.5.

Added:
release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.mds
release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.mds
Removed:
release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.md5
release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.md5

Added: release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.mds
==
--- release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.mds (added)
+++ release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5-src.tar.gz.mds Tue Oct 11 
03:24:34 2016
@@ -0,0 +1,17 @@
+hadoop-2.6.5-src.tar.gz:MD5 = 13 CD F9 17 EB C3 2D 6A  EA 37 47 8B 1E 3F 7D
+  91
+hadoop-2.6.5-src.tar.gz:   SHA1 = 6412 858A 59E5 DAED 2486  5C40 47CC 8F3B 513D
+  8421
+hadoop-2.6.5-src.tar.gz: RMD160 = B57B 1423 4B81 ED8B C5F1  BB8D 0669 D003 9251
+  AA5A
+hadoop-2.6.5-src.tar.gz: SHA224 = 50C4EB55 F8F81F45 A477265B 22DA8F4F 2BE90443
+  037716DA F277D694
+hadoop-2.6.5-src.tar.gz: SHA256 = 3A843F18 73D9951A 51114777 ECD4DF58 E455340E
+  BCDAF9F7 E6124410 D4DD65F0
+hadoop-2.6.5-src.tar.gz: SHA384 = 6C42498F 608A5F9D EA75F6D0 42AC93EA 2220B0C2
+  C5E089FA E40B8262 A50B4516 C3CF515B B476621D
+  A45221EE 2231F855
+hadoop-2.6.5-src.tar.gz: SHA512 = 61948BEC ADFF13B6 D9ADD2F1 2DB6DE64 6FBBF821
+  C709079C 6A06E6AA 1AC4815C 8F9C28F1 30D6A4A0
+  9A8E6D23 E8C4F3EA 3B8B36E3 8EC6F601 8D2A46C9
+  C65E38AE

Added: release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.mds
==
--- release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.mds (added)
+++ release/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz.mds Tue Oct 11 
03:24:34 2016
@@ -0,0 +1,14 @@
+hadoop-2.6.5.tar.gz:MD5 = 96 7C 24 F3 C1 5F CD D0  58 F3 49 23 E9 2C E8 AC
+hadoop-2.6.5.tar.gz:   SHA1 = 877B FD6E 3577 3E4B 9021  53BD BD89 877B F360 
5BA4
+hadoop-2.6.5.tar.gz: RMD160 = 1F4D 4D66 26D4 ED7F 0F90  45D6 ADF8 5314 6C80 
112C
+hadoop-2.6.5.tar.gz: SHA224 = F187156E B23AA6D6 6BAD2788 4EB0480C 006732D3
+  E6EF3033 093750CF
+hadoop-2.6.5.tar.gz: SHA256 = 001AD18D 4B6D0FE5 42B15DDA DBA2D092 BC97DF1C
+  4D2D7973 81C8D128 87691898
+hadoop-2.6.5.tar.gz: SHA384 = 9B4B0CAE AD744C17 62660C76 11EEB503 B0F04ED6
+  6CF54B45 1196D68F 3129369E 04279893 B63A7799
+  AE45CA9D 3ECE13F9
+hadoop-2.6.5.tar.gz: SHA512 = FD9D2415 5BB18475 C25BC6A8 8284A696 224D5EF3
+  8503C5C3 F1D4C0B6 5AE1C552 D90D2136 CA0FA452
+  94D62204 EED3D115 4785C12C C260CCA4 2EA2FBF0
+  27B461B2



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Updated the release notes for 2.6.5 post-release (the web site will use the correct release notes).

2016-10-10 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.5 e8c9fe0b4 -> 09ba74e35


Updated the release notes for 2.6.5 post-release (the web site will use the 
correct release notes).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09ba74e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09ba74e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09ba74e3

Branch: refs/heads/branch-2.6.5
Commit: 09ba74e3502fb9f42d20498f8c909bff0c782327
Parents: e8c9fe0
Author: Sangjin Lee 
Authored: Mon Oct 10 21:05:16 2016 -0700
Committer: Sangjin Lee 
Committed: Mon Oct 10 21:05:16 2016 -0700

--
 .../src/main/docs/releasenotes.html | 820 ++-
 1 file changed, 258 insertions(+), 562 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ba74e3/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html 
b/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
index 29da383..f23d12e 100644
--- a/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
+++ b/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
@@ -1,5 +1,5 @@
 
-Hadoop  2.6.1 Release Notes
+Hadoop  2.6.5 Release Notes
 
H1 {font-family: sans-serif}
H2 {font-family: sans-serif; margin-left: 7mm}
@@ -7,632 +7,328 @@
 
 
 
-Hadoop  2.6.1 Release Notes
+Hadoop  2.6.5 Release Notes
 These release notes include new developer and user-facing incompatibilities, 
features, and major improvements. 
 
-Changes since Hadoop 2.6.0
+Changes since Hadoop 2.6.4
 
- https://issues.apache.org/jira/browse/YARN-4047";>YARN-4047.
- Major bug reported by Jason Lowe and fixed by Jason Lowe 
(resourcemanager)
- ClientRMService getApplications has high scheduler lock 
contention
- 
- https://issues.apache.org/jira/browse/YARN-3999";>YARN-3999.
- Major bug reported by Jian He and fixed by Jian He 
- RM hangs on draining events
- 
- https://issues.apache.org/jira/browse/YARN-3990";>YARN-3990.
- Critical bug reported by Rohith Sharma K S and fixed by Bibin A Chundatt 
(resourcemanager)
- AsyncDispatcher may overloaded with RMAppNodeUpdateEvent when Node is 
connected/disconnected
- 
- https://issues.apache.org/jira/browse/YARN-3978";>YARN-3978.
- Major improvement reported by Eric Payne and fixed by Eric Payne 
(timelineserver , yarn)
- Configurably turn off the saving of container info in Generic 
AHS
- 
- https://issues.apache.org/jira/browse/YARN-3850";>YARN-3850.
- Blocker bug reported by Varun Saxena and fixed by Varun Saxena 
(log-aggregation , nodemanager)
- NM fails to read files from full disks which can lead to container 
logs being lost and other issues
- 
- https://issues.apache.org/jira/browse/YARN-3832";>YARN-3832.
- Critical bug reported by Ranga Swamy and fixed by Brahma Reddy Battula 
(nodemanager)
- Resource Localization fails on a cluster due to existing cache 
directories
- 
- https://issues.apache.org/jira/browse/YARN-3733";>YARN-3733.
- Blocker bug reported by Bibin A Chundatt and fixed by Rohith Sharma K S 
(resourcemanager)
- Fix DominantRC#compare() does not work as expected if cluster resource 
is empty
- 
- https://issues.apache.org/jira/browse/YARN-3725";>YARN-3725.
- Blocker bug reported by Zhijie Shen and fixed by Zhijie Shen 
(resourcemanager , timelineserver)
- App submission via REST API is broken in secure mode due to Timeline 
DT service address is empty
- 
- https://issues.apache.org/jira/browse/YARN-3700";>YARN-3700.
- Major sub-task reported by Xuan Gong and fixed by Xuan Gong 
(resourcemanager , webapp , yarn)
- ATS Web Performance issue at load time when large number of 
jobs
- 
- https://issues.apache.org/jira/browse/YARN-3585";>YARN-3585.
- Critical bug reported by Peng Zhang and fixed by Rohith Sharma K S 
- NodeManager cannot exit on SHUTDOWN event triggered and NM recovery is 
enabled
- 
- https://issues.apache.org/jira/browse/YARN-3526";>YARN-3526.
- Major bug reported by Weiwei Yang and fixed by Weiwei Yang 
(resourcemanager , webapp)
- ApplicationMaster tracking URL is incorrectly redirected on a QJM 
cluster
- 
- https://issues.apache.org/jira/browse/YARN-3493";>YARN-3493.
- Critical bug reported by Sumana Sathish and fixed by Jian He (yarn)
- RM fails to come up with error "Failed to load/recover state" when  
mem settings are changed
- 
- https://issues.apache.org/jira/browse/YARN-3487";>YARN-3487.
- Critical sub-task reported by Jason Lowe and fixed by Jason Lowe 
(capacityscheduler)
- CapacityScheduler

svn commit: r1764362 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/ publish/docs/r2.6.5/ publish/docs/r2.6.5/api/ publish/docs/r2.6.5/api/org/ publish/docs/r2.6.5/api/

2016-10-11 Thread sjlee
Author: sjlee
Date: Tue Oct 11 22:50:42 2016
New Revision: 1764362

URL: http://svn.apache.org/viewvc?rev=1764362&view=rev
Log:
Updated site for release 2.6.5.


[This commit notification would consist of 842 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-5638. Introduce a collector timestamp to uniquely identify collectors creation order in collector discovery. Contributed by Li Lu.

2016-10-14 Thread sjlee
YARN-5638. Introduce a collector timestamp to uniquely identify collectors 
creation order in collector discovery. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c186314
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c186314
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c186314

Branch: refs/heads/YARN-5355
Commit: 0c1863144649ea265da65ce25158707cc3a3fb4a
Parents: 5d7ad39
Author: Sangjin Lee 
Authored: Fri Oct 14 14:40:05 2016 -0700
Committer: Sangjin Lee 
Committed: Fri Oct 14 14:40:05 2016 -0700

--
 .../protocolrecords/NodeHeartbeatRequest.java   |  13 +-
 .../protocolrecords/NodeHeartbeatResponse.java  |   6 +-
 .../ReportNewCollectorInfoRequest.java  |  10 +-
 .../impl/pb/NodeHeartbeatRequestPBImpl.java |  47 +++--
 .../impl/pb/NodeHeartbeatResponsePBImpl.java|  37 ++--
 .../pb/ReportNewCollectorInfoRequestPBImpl.java |  36 ++--
 .../server/api/records/AppCollectorData.java| 104 ++
 .../server/api/records/AppCollectorsMap.java|  46 -
 .../records/impl/pb/AppCollectorDataPBImpl.java | 200 +++
 .../records/impl/pb/AppCollectorsMapPBImpl.java | 152 --
 .../api/records/impl/pb/package-info.java   |  19 ++
 .../yarn_server_common_service_protos.proto |  14 +-
 .../java/org/apache/hadoop/yarn/TestRPC.java|   6 +-
 .../hadoop/yarn/TestYarnServerApiClasses.java   |  22 +-
 .../hadoop/yarn/server/nodemanager/Context.java |  14 +-
 .../yarn/server/nodemanager/NodeManager.java|  23 +--
 .../nodemanager/NodeStatusUpdaterImpl.java  |  59 +++---
 .../collectormanager/NMCollectorService.java|  29 ++-
 .../application/ApplicationImpl.java|  33 ++-
 .../amrmproxy/BaseAMRMProxyTest.java|   8 +-
 .../ApplicationMasterService.java   |  10 +-
 .../resourcemanager/ResourceTrackerService.java |  62 +++---
 .../server/resourcemanager/rmapp/RMApp.java |  30 +--
 .../rmapp/RMAppCollectorUpdateEvent.java|  40 
 .../resourcemanager/rmapp/RMAppEventType.java   |   3 -
 .../server/resourcemanager/rmapp/RMAppImpl.java |  51 +
 .../TestResourceTrackerService.java |  32 ++-
 .../applicationsmanager/MockAsm.java|  11 +-
 .../server/resourcemanager/rmapp/MockRMApp.java |  11 +-
 29 files changed, 621 insertions(+), 507 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c186314/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
index c795e55..f238f79 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
@@ -24,6 +24,7 @@ import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.util.Records;
@@ -47,7 +48,7 @@ public abstract class NodeHeartbeatRequest {
   public static NodeHeartbeatRequest newInstance(NodeStatus nodeStatus,
   MasterKey lastKnownContainerTokenMasterKey,
   MasterKey lastKnownNMTokenMasterKey, Set nodeLabels,
-  Map registeredCollectors) {
+  Map registeringCollectors) {
 NodeHeartbeatRequest nodeHeartbeatRequest =
 Records.newRecord(NodeHeartbeatRequest.class);
 nodeHeartbeatRequest.setNodeStatus(nodeStatus);
@@ -56,7 +57,7 @@ public abstract class NodeHeartbeatRequest {
 nodeHeartbeatRequest
 .setLastKnownNMTokenMasterKey(lastKnownNMTokenMasterKey);
 nodeHeartbeatRequest.setNodeLabels(nodeLabels);
-nodeHeartbeatRequest.setRegisteredCollectors(registeredCollectors);
+nodeHeartbeatRequest.setRegisteringCollectors(registeringCollectors);
 return nodeHeartbeatRequest;
   }
 
@@ -79,7 +80,9 @@ public abstract class NodeHeartbeatRequest {
   List logAggregationReportsForApps);
 
   // This tells RM register

[1/2] hadoop git commit: YARN-5638. Introduce a collector timestamp to uniquely identify collectors creation order in collector discovery. Contributed by Li Lu.

2016-10-14 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 5d7ad396d -> 0c1863144


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c186314/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index 98cbd92..c405a8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -22,6 +22,8 @@ import java.util.Collection;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.ipc.CallerContext;
 import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -36,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 
@@ -177,27 +180,16 @@ public interface RMApp extends EventHandler {
   String getTrackingUrl();
 
   /**
-   * The collector address for the application. It should be used only if the
-   * timeline service v.2 is enabled.
+   * The timeline collector information for the application. It should be used
+   * only if the timeline service v.2 is enabled.
*
-   * @return the address for the application's collector, or null if the
-   * timeline service v.2 is not enabled.
+   * @return the data for the application's collector, including collector
+   * address, collector ID. Return null if the timeline service v.2 is not
+   * enabled.
*/
-  String getCollectorAddr();
-
-  /**
-   * Set collector address for the application. It should be used only if the
-   * timeline service v.2 is enabled.
-   *
-   * @param collectorAddr the address of collector
-   */
-  void setCollectorAddr(String collectorAddr);
-
-  /**
-   * Remove collector address when application is finished or killed. It should
-   * be used only if the timeline service v.2 is enabled.
-   */
-  void removeCollectorAddr();
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  AppCollectorData getCollectorData();
 
   /**
* The original tracking url for the application master.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c186314/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
deleted file mode 100644
index 9642911..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific langu

[2/2] hadoop git commit: YARN-5638. Introduce a collector timestamp to uniquely identify collectors creation order in collector discovery. Contributed by Li Lu.

2016-10-14 Thread sjlee
YARN-5638. Introduce a collector timestamp to uniquely identify collectors 
creation order in collector discovery. Contributed by Li Lu.

(cherry picked from commit 0c1863144649ea265da65ce25158707cc3a3fb4a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fb2c111
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fb2c111
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fb2c111

Branch: refs/heads/YARN-5355-branch-2
Commit: 1fb2c111f12152f5756ed1d3c11a030b9540dc1d
Parents: d6b3743
Author: Sangjin Lee 
Authored: Fri Oct 14 14:40:05 2016 -0700
Committer: Sangjin Lee 
Committed: Fri Oct 14 14:50:42 2016 -0700

--
 .../protocolrecords/NodeHeartbeatRequest.java   |  13 +-
 .../protocolrecords/NodeHeartbeatResponse.java  |   6 +-
 .../ReportNewCollectorInfoRequest.java  |  10 +-
 .../impl/pb/NodeHeartbeatRequestPBImpl.java |  47 +++--
 .../impl/pb/NodeHeartbeatResponsePBImpl.java|  37 ++--
 .../pb/ReportNewCollectorInfoRequestPBImpl.java |  36 ++--
 .../server/api/records/AppCollectorData.java| 104 ++
 .../server/api/records/AppCollectorsMap.java|  46 -
 .../records/impl/pb/AppCollectorDataPBImpl.java | 200 +++
 .../records/impl/pb/AppCollectorsMapPBImpl.java | 152 --
 .../api/records/impl/pb/package-info.java   |  19 ++
 .../yarn_server_common_service_protos.proto |  14 +-
 .../java/org/apache/hadoop/yarn/TestRPC.java|   6 +-
 .../hadoop/yarn/TestYarnServerApiClasses.java   |  22 +-
 .../hadoop/yarn/server/nodemanager/Context.java |  14 +-
 .../yarn/server/nodemanager/NodeManager.java|  23 +--
 .../nodemanager/NodeStatusUpdaterImpl.java  |  61 +++---
 .../collectormanager/NMCollectorService.java|  29 ++-
 .../application/ApplicationImpl.java|  33 ++-
 .../amrmproxy/BaseAMRMProxyTest.java|   8 +-
 .../ApplicationMasterService.java   |  10 +-
 .../resourcemanager/ResourceTrackerService.java |  62 +++---
 .../server/resourcemanager/rmapp/RMApp.java |  30 +--
 .../rmapp/RMAppCollectorUpdateEvent.java|  40 
 .../resourcemanager/rmapp/RMAppEventType.java   |   3 -
 .../server/resourcemanager/rmapp/RMAppImpl.java |  51 +
 .../TestResourceTrackerService.java |  32 ++-
 .../applicationsmanager/MockAsm.java|  11 +-
 .../server/resourcemanager/rmapp/MockRMApp.java |  11 +-
 29 files changed, 622 insertions(+), 508 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fb2c111/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
index c795e55..f238f79 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
@@ -24,6 +24,7 @@ import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.util.Records;
@@ -47,7 +48,7 @@ public abstract class NodeHeartbeatRequest {
   public static NodeHeartbeatRequest newInstance(NodeStatus nodeStatus,
   MasterKey lastKnownContainerTokenMasterKey,
   MasterKey lastKnownNMTokenMasterKey, Set nodeLabels,
-  Map registeredCollectors) {
+  Map registeringCollectors) {
 NodeHeartbeatRequest nodeHeartbeatRequest =
 Records.newRecord(NodeHeartbeatRequest.class);
 nodeHeartbeatRequest.setNodeStatus(nodeStatus);
@@ -56,7 +57,7 @@ public abstract class NodeHeartbeatRequest {
 nodeHeartbeatRequest
 .setLastKnownNMTokenMasterKey(lastKnownNMTokenMasterKey);
 nodeHeartbeatRequest.setNodeLabels(nodeLabels);
-nodeHeartbeatRequest.setRegisteredCollectors(registeredCollectors);
+nodeHeartbeatRequest.setRegisteringCollectors(registeringCollectors);
 return nodeHeartbeatRequest;
   }
 
@@ -79,7 +80,9 @@ public abstract class NodeHeartbeatRequ

[1/2] hadoop git commit: YARN-5638. Introduce a collector timestamp to uniquely identify collectors creation order in collector discovery. Contributed by Li Lu.

2016-10-14 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 d6b374360 -> 1fb2c111f


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fb2c111/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index 98cbd92..c405a8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -22,6 +22,8 @@ import java.util.Collection;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.ipc.CallerContext;
 import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -36,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 
@@ -177,27 +180,16 @@ public interface RMApp extends EventHandler {
   String getTrackingUrl();
 
   /**
-   * The collector address for the application. It should be used only if the
-   * timeline service v.2 is enabled.
+   * The timeline collector information for the application. It should be used
+   * only if the timeline service v.2 is enabled.
*
-   * @return the address for the application's collector, or null if the
-   * timeline service v.2 is not enabled.
+   * @return the data for the application's collector, including collector
+   * address, collector ID. Return null if the timeline service v.2 is not
+   * enabled.
*/
-  String getCollectorAddr();
-
-  /**
-   * Set collector address for the application. It should be used only if the
-   * timeline service v.2 is enabled.
-   *
-   * @param collectorAddr the address of collector
-   */
-  void setCollectorAddr(String collectorAddr);
-
-  /**
-   * Remove collector address when application is finished or killed. It should
-   * be used only if the timeline service v.2 is enabled.
-   */
-  void removeCollectorAddr();
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  AppCollectorData getCollectorData();
 
   /**
* The original tracking url for the application master.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fb2c111/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
deleted file mode 100644
index 9642911..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the speci

hadoop git commit: YARN-5699. Retrospect yarn entity fields which are publishing in events info fields. Contributed by Rohith Sharma K S.

2016-10-15 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5f4ae85bd -> 1f304b0c7


YARN-5699. Retrospect yarn entity fields which are publishing in events info 
fields. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f304b0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f304b0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f304b0c

Branch: refs/heads/trunk
Commit: 1f304b0c7f261369dd68839507bb609a949965ad
Parents: 5f4ae85
Author: Sangjin Lee 
Authored: Sat Oct 15 13:54:40 2016 -0700
Committer: Sangjin Lee 
Committed: Sat Oct 15 13:54:40 2016 -0700

--
 ...pplicationHistoryManagerOnTimelineStore.java |  69 ++---
 ...pplicationHistoryManagerOnTimelineStore.java |  38 +++
 .../metrics/AppAttemptMetricsConstants.java |  16 +--
 .../metrics/ContainerMetricsConstants.java  |  21 ++--
 .../timelineservice/NMTimelinePublisher.java|  34 ---
 .../metrics/TimelineServiceV1Publisher.java |  44 
 .../metrics/TimelineServiceV2Publisher.java | 101 +--
 .../metrics/TestSystemMetricsPublisher.java |  40 
 8 files changed, 186 insertions(+), 177 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f304b0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index feeafdd..6e6576a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -463,21 +463,21 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   if (eventInfo == null) {
 continue;
   }
-  if 
(eventInfo.containsKey(AppAttemptMetricsConstants.HOST_EVENT_INFO)) {
+  if (eventInfo.containsKey(AppAttemptMetricsConstants.HOST_INFO)) {
 host =
-eventInfo.get(AppAttemptMetricsConstants.HOST_EVENT_INFO)
+eventInfo.get(AppAttemptMetricsConstants.HOST_INFO)
 .toString();
   }
   if (eventInfo
-  .containsKey(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.RPC_PORT_INFO)) {
 rpcPort = (Integer) eventInfo.get(
-AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO);
+AppAttemptMetricsConstants.RPC_PORT_INFO);
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_INFO)) {
 amContainerId =
 ContainerId.fromString(eventInfo.get(
-AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)
+AppAttemptMetricsConstants.MASTER_CONTAINER_INFO)
 .toString());
   }
 } else if (event.getEventType().equals(
@@ -487,39 +487,40 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 continue;
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.TRACKING_URL_INFO)) {
 trackingUrl =
 eventInfo.get(
-AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)
+AppAttemptMetricsConstants.TRACKING_URL_INFO)
 .toString();
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO)) {
+  .containsKey(
+  AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_INFO)) {
 originalTrackingUrl =
 eventInfo
 .get(
-

hadoop git commit: YARN-5699. Retrospect yarn entity fields which are publishing in events info fields. Contributed by Rohith Sharma K S.

2016-10-15 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 0c1863144 -> 787d59420


YARN-5699. Retrospect yarn entity fields which are publishing in events info 
fields. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/787d5942
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/787d5942
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/787d5942

Branch: refs/heads/YARN-5355
Commit: 787d5942003a5441ba455a79fa08da28f60d5777
Parents: 0c18631
Author: Sangjin Lee 
Authored: Sat Oct 15 13:58:40 2016 -0700
Committer: Sangjin Lee 
Committed: Sat Oct 15 13:58:40 2016 -0700

--
 ...pplicationHistoryManagerOnTimelineStore.java |  69 ++---
 ...pplicationHistoryManagerOnTimelineStore.java |  38 +++
 .../metrics/AppAttemptMetricsConstants.java |  16 +--
 .../metrics/ContainerMetricsConstants.java  |  21 ++--
 .../timelineservice/NMTimelinePublisher.java|  32 +++---
 .../metrics/TimelineServiceV1Publisher.java |  44 
 .../metrics/TimelineServiceV2Publisher.java | 101 +--
 .../metrics/TestSystemMetricsPublisher.java |  40 
 8 files changed, 186 insertions(+), 175 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/787d5942/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 84d4543..b5b2362 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -455,21 +455,21 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   if (eventInfo == null) {
 continue;
   }
-  if 
(eventInfo.containsKey(AppAttemptMetricsConstants.HOST_EVENT_INFO)) {
+  if (eventInfo.containsKey(AppAttemptMetricsConstants.HOST_INFO)) {
 host =
-eventInfo.get(AppAttemptMetricsConstants.HOST_EVENT_INFO)
+eventInfo.get(AppAttemptMetricsConstants.HOST_INFO)
 .toString();
   }
   if (eventInfo
-  .containsKey(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.RPC_PORT_INFO)) {
 rpcPort = (Integer) eventInfo.get(
-AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO);
+AppAttemptMetricsConstants.RPC_PORT_INFO);
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_INFO)) {
 amContainerId =
 ContainerId.fromString(eventInfo.get(
-AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)
+AppAttemptMetricsConstants.MASTER_CONTAINER_INFO)
 .toString());
   }
 } else if (event.getEventType().equals(
@@ -479,39 +479,40 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 continue;
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.TRACKING_URL_INFO)) {
 trackingUrl =
 eventInfo.get(
-AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)
+AppAttemptMetricsConstants.TRACKING_URL_INFO)
 .toString();
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO)) {
+  .containsKey(
+  AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_INFO)) {
 originalTrackingUrl =
 eventInfo
 .get(
- 

hadoop git commit: YARN-5699. Retrospect yarn entity fields which are publishing in events info fields. Contributed by Rohith Sharma K S.

2016-10-15 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 1fb2c111f -> 059fac44e


YARN-5699. Retrospect yarn entity fields which are publishing in events info 
fields. Contributed by Rohith Sharma K S.

(cherry picked from commit 787d5942003a5441ba455a79fa08da28f60d5777)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/059fac44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/059fac44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/059fac44

Branch: refs/heads/YARN-5355-branch-2
Commit: 059fac44e8c90d2ed30f12340cab7625ddd3e999
Parents: 1fb2c11
Author: Sangjin Lee 
Authored: Sat Oct 15 13:58:40 2016 -0700
Committer: Sangjin Lee 
Committed: Sat Oct 15 14:12:24 2016 -0700

--
 ...pplicationHistoryManagerOnTimelineStore.java |  69 ++---
 ...pplicationHistoryManagerOnTimelineStore.java |  38 +++
 .../metrics/AppAttemptMetricsConstants.java |  16 +--
 .../metrics/ContainerMetricsConstants.java  |  21 ++--
 .../timelineservice/NMTimelinePublisher.java|  32 +++---
 .../metrics/TimelineServiceV1Publisher.java |  44 
 .../metrics/TimelineServiceV2Publisher.java | 101 +--
 .../metrics/TestSystemMetricsPublisher.java |  40 
 8 files changed, 186 insertions(+), 175 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/059fac44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 84d4543..b5b2362 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -455,21 +455,21 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   if (eventInfo == null) {
 continue;
   }
-  if 
(eventInfo.containsKey(AppAttemptMetricsConstants.HOST_EVENT_INFO)) {
+  if (eventInfo.containsKey(AppAttemptMetricsConstants.HOST_INFO)) {
 host =
-eventInfo.get(AppAttemptMetricsConstants.HOST_EVENT_INFO)
+eventInfo.get(AppAttemptMetricsConstants.HOST_INFO)
 .toString();
   }
   if (eventInfo
-  .containsKey(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.RPC_PORT_INFO)) {
 rpcPort = (Integer) eventInfo.get(
-AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO);
+AppAttemptMetricsConstants.RPC_PORT_INFO);
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_INFO)) {
 amContainerId =
 ContainerId.fromString(eventInfo.get(
-AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)
+AppAttemptMetricsConstants.MASTER_CONTAINER_INFO)
 .toString());
   }
 } else if (event.getEventType().equals(
@@ -479,39 +479,40 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 continue;
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.TRACKING_URL_INFO)) {
 trackingUrl =
 eventInfo.get(
-AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)
+AppAttemptMetricsConstants.TRACKING_URL_INFO)
 .toString();
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO)) {
+  .containsKey(
+  AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_INFO)) {
 or

[2/2] hadoop git commit: YARN-5980. Update documentation for single node hbase deploy. Contributed by Vrushali C.

2017-01-13 Thread sjlee
YARN-5980. Update documentation for single node hbase deploy. Contributed by 
Vrushali C.

(cherry picked from commit e1bdba77888723b435a235a96c8659029afd25d5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d1e4140
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d1e4140
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d1e4140

Branch: refs/heads/YARN-5355-branch-2
Commit: 8d1e41407b093435b5a2c9e52d1102522e1fdaf7
Parents: cf7f9e9
Author: Sangjin Lee 
Authored: Fri Jan 13 09:12:48 2017 -0800
Committer: Sangjin Lee 
Committed: Fri Jan 13 11:46:31 2017 -0800

--
 .../src/site/markdown/TimelineServiceV2.md  | 63 +---
 1 file changed, 55 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d1e4140/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 182a5fe..7a0c477 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -165,18 +165,64 @@ New configuration parameters that are introduced with v.2 
are marked bold.
 ### Enabling Timeline Service v.2
 
  Preparing Apache HBase cluster for storage
+There are a few steps to be done for preparing the storage for Timeline 
Service v.2:
+
+Step 1) [Set up the HBase cluster](#Set_up_the_HBase_cluster)
+
+Step 2) [Enable the coprocessor](#Enable_the_coprocessor)
+
+Step 3) [Create the schema for Timeline Service v.2](#Create_schema)
+
+Each step is explained in more detail below.
+
+#  Step 1) Set up the HBase cluster
 The first part is to set up or pick an Apache HBase cluster to use as the 
storage cluster. The
-version of Apache HBase that is supported with Timeline Service v.2 is 1.1.x. 
The 1.0.x versions
-do not work with Timeline Service v.2. The 1.2.x versions have not been tested.
+version of Apache HBase that is supported with Timeline Service v.2 is 1.2.4. 
The 1.0.x versions
+do not work with Timeline Service v.2. Later versions of HBase have not been 
tested with
+Timeline Service.
+
+HBase has different deployment modes. Refer to the HBase book for 
understanding them and pick a
+mode that is suitable for your setup.
+(http://hbase.apache.org/book.html#standalone_dist)
+
+# Simple deployment for HBase
+If you are intent on a simple deploy profile for the Apache HBase cluster
+where the data loading is light but the data needs to persist across node
+comings and goings, you could consider the "Standalone HBase over HDFS" deploy 
mode.
+
+This is a useful variation on the standalone HBase setup and has all HBase 
daemons running inside
+one JVM but rather than persisting to the local filesystem, it persists to an 
HDFS instance.
+Writing to HDFS where data is replicated ensures that data is persisted across 
node
+comings and goings. To configure this standalone variant, edit your 
`hbase-site.xml` setting
+the `hbase.rootdir` to point at a directory in your HDFS instance but then set
+`hbase.cluster.distributed` to false. For example:
+
+```
+
+  
+hbase.rootdir
+hdfs://namenode.example.org:8020/hbase
+  
+  
+hbase.cluster.distributed
+false
+  
+
+```
+
+For more details on this mode, refer to
+http://hbase.apache.org/book.html#standalone.over.hdfs .
+
+Once you have an Apache HBase cluster ready to use, perform the following 
steps.
 
-Once you have an Apache HBase cluster ready to use for this purpose, perform 
the following steps.
+#  Step 2) Enable the coprocessor
 
-First, add the timeline service jar to the HBase classpath in all HBase 
machines in the cluster. It
+Step 2.1) Add the timeline service jar to the HBase classpath in all HBase 
machines in the cluster. It
 is needed for the coprocessor as well as the schema creator. For example,
 
 cp hadoop-yarn-server-timelineservice-3.0.0-alpha1-SNAPSHOT.jar 
/usr/hbase/lib/
 
-Then, enable the coprocessor that handles the aggregation. To enable it, add 
the following entry in
+Step 2.2) Enable the coprocessor that handles the aggregation. To enable it, 
add the following entry in
 region servers' `hbase-site.xml` file (generally located in the `conf` 
directory) as follows:
 
 ```
@@ -186,10 +232,11 @@ region servers' `hbase-site.xml` file (generally located 
in the `conf` directory
 
 ```
 
-Restart the region servers and the master to pick up the timeline service jar 
as well as the config
-change. In this version, the coprocessor is loaded s

[1/2] hadoop git commit: YARN-5980. Update documentation for single node hbase deploy. Contributed by Vrushali C.

2017-01-13 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 8df6f98e5 -> e1bdba778
  refs/heads/YARN-5355-branch-2 cf7f9e91f -> 8d1e41407


YARN-5980. Update documentation for single node hbase deploy. Contributed by 
Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1bdba77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1bdba77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1bdba77

Branch: refs/heads/YARN-5355
Commit: e1bdba77888723b435a235a96c8659029afd25d5
Parents: 8df6f98
Author: Sangjin Lee 
Authored: Fri Jan 13 09:12:48 2017 -0800
Committer: Sangjin Lee 
Committed: Fri Jan 13 09:12:48 2017 -0800

--
 .../src/site/markdown/TimelineServiceV2.md  | 63 +---
 1 file changed, 55 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1bdba77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 9a06b47..0d77f2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -165,18 +165,64 @@ New configuration parameters that are introduced with v.2 
are marked bold.
 ### Enabling Timeline Service v.2
 
  Preparing Apache HBase cluster for storage
+There are a few steps to be done for preparing the storage for Timeline 
Service v.2:
+
+Step 1) [Set up the HBase cluster](#Set_up_the_HBase_cluster)
+
+Step 2) [Enable the coprocessor](#Enable_the_coprocessor)
+
+Step 3) [Create the schema for Timeline Service v.2](#Create_schema)
+
+Each step is explained in more detail below.
+
+#  Step 1) Set up the HBase cluster
 The first part is to set up or pick an Apache HBase cluster to use as the 
storage cluster. The
-version of Apache HBase that is supported with Timeline Service v.2 is 1.1.x. 
The 1.0.x versions
-do not work with Timeline Service v.2. The 1.2.x versions have not been tested.
+version of Apache HBase that is supported with Timeline Service v.2 is 1.2.4. 
The 1.0.x versions
+do not work with Timeline Service v.2. Later versions of HBase have not been 
tested with
+Timeline Service.
+
+HBase has different deployment modes. Refer to the HBase book for 
understanding them and pick a
+mode that is suitable for your setup.
+(http://hbase.apache.org/book.html#standalone_dist)
+
+# Simple deployment for HBase
+If you are intent on a simple deploy profile for the Apache HBase cluster
+where the data loading is light but the data needs to persist across node
+comings and goings, you could consider the "Standalone HBase over HDFS" deploy 
mode.
+
+This is a useful variation on the standalone HBase setup and has all HBase 
daemons running inside
+one JVM but rather than persisting to the local filesystem, it persists to an 
HDFS instance.
+Writing to HDFS where data is replicated ensures that data is persisted across 
node
+comings and goings. To configure this standalone variant, edit your 
`hbase-site.xml` setting
+the `hbase.rootdir` to point at a directory in your HDFS instance but then set
+`hbase.cluster.distributed` to false. For example:
+
+```
+
+  
+hbase.rootdir
+hdfs://namenode.example.org:8020/hbase
+  
+  
+hbase.cluster.distributed
+false
+  
+
+```
+
+For more details on this mode, refer to
+http://hbase.apache.org/book.html#standalone.over.hdfs .
+
+Once you have an Apache HBase cluster ready to use, perform the following 
steps.
 
-Once you have an Apache HBase cluster ready to use for this purpose, perform 
the following steps.
+#  Step 2) Enable the coprocessor
 
-First, add the timeline service jar to the HBase classpath in all HBase 
machines in the cluster. It
+Step 2.1) Add the timeline service jar to the HBase classpath in all HBase 
machines in the cluster. It
 is needed for the coprocessor as well as the schema creator. For example,
 
 cp hadoop-yarn-server-timelineservice-3.0.0-alpha1-SNAPSHOT.jar 
/usr/hbase/lib/
 
-Then, enable the coprocessor that handles the aggregation. To enable it, add 
the following entry in
+Step 2.2) Enable the coprocessor that handles the aggregation. To enable it, 
add the following entry in
 region servers' `hbase-site.xml` file (generally located in the `conf` 
directory) as follows:
 
 ```
@@ -186,10 +232,11 @@ region servers' `hbase-site.xml` file (generally located 
in the `conf` directory
 
 ```
 
-Restart the region servers and the master to pick up the timeline service jar 
as well as the

[2/2] hadoop git commit: YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed by Vrushali C.

2017-01-19 Thread sjlee
YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed 
by Vrushali C.

(cherry picked from commit 0327a79d79a4d56d9c7cb6889886afd2272b07d3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0177c95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0177c95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0177c95

Branch: refs/heads/YARN-5355-branch-2
Commit: e0177c952c3b64c8dcf0408562faa98f725280e0
Parents: c11078f
Author: Sangjin Lee 
Authored: Thu Jan 19 14:52:47 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 14:55:55 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../src/main/resources/yarn-default.xml |   9 +
 ...stTimelineReaderWebServicesHBaseStorage.java |   4 +-
 .../storage/DataGeneratorForTest.java   | 364 ---
 .../storage/TestHBaseTimelineStorageApps.java   |   6 +-
 .../TestHBaseTimelineStorageEntities.java   |   6 +-
 .../storage/TestHBaseTimelineStorageSchema.java |  12 +-
 .../storage/flow/TestFlowDataGenerator.java |  28 +-
 .../flow/TestHBaseStorageFlowActivity.java  |   8 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  46 ++-
 .../flow/TestHBaseStorageFlowRunCompaction.java |   8 +-
 .../storage/flow/FlowRunCoprocessor.java|  36 +-
 .../storage/flow/FlowRunTable.java  |  33 +-
 .../src/site/markdown/TimelineServiceV2.md  |  26 +-
 14 files changed, 322 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0177c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 659b5eb..e1ecaf9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2058,6 +2058,18 @@ public class YarnConfiguration extends Configuration {
   + "hbase.coprocessor.app-final-value-retention-milliseconds";
 
   /**
+   * The name of the setting for the location of the coprocessor
+   * jar on hdfs.
+   */
+  public static final String FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION =
+  TIMELINE_SERVICE_PREFIX
+  + "hbase.coprocessor.jar.hdfs.location";
+
+  /** default hdfs location for flowrun coprocessor jar. */
+  public static final String DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR =
+  "/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar";
+
+/**
* The name for setting that points to an optional HBase configuration
* (hbase-site.xml file) with settings that will override the ones found on
* the classpath.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0177c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 416d705..a21ee2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2272,6 +2272,15 @@
 
   
 
+The default hdfs location for flowrun coprocessor jar.
+
+yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+
+/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar
+  
+
+  
+
 The value of this parameter sets the prefix for all tables that are part of
 timeline service in the hbase storage schema. It can be set to "dev."
 or "staging." if it is to be used for development or staging instances.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0177c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-time

[1/2] hadoop git commit: YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed by Vrushali C.

2017-01-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 c9246f619 -> 0327a79d7
  refs/heads/YARN-5355-branch-2 c11078fd2 -> e0177c952


YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed 
by Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0327a79d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0327a79d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0327a79d

Branch: refs/heads/YARN-5355
Commit: 0327a79d79a4d56d9c7cb6889886afd2272b07d3
Parents: c9246f6
Author: Sangjin Lee 
Authored: Thu Jan 19 14:52:47 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 14:52:47 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../src/main/resources/yarn-default.xml |   9 +
 ...stTimelineReaderWebServicesHBaseStorage.java |   4 +-
 .../storage/DataGeneratorForTest.java   | 364 ---
 .../storage/TestHBaseTimelineStorageApps.java   |   6 +-
 .../TestHBaseTimelineStorageEntities.java   |   6 +-
 .../storage/TestHBaseTimelineStorageSchema.java |  12 +-
 .../storage/flow/TestFlowDataGenerator.java |  28 +-
 .../flow/TestHBaseStorageFlowActivity.java  |   8 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  46 ++-
 .../flow/TestHBaseStorageFlowRunCompaction.java |   8 +-
 .../storage/flow/FlowRunCoprocessor.java|  36 +-
 .../storage/flow/FlowRunTable.java  |  33 +-
 .../src/site/markdown/TimelineServiceV2.md  |  26 +-
 14 files changed, 322 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0327a79d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8752e5d..e45bfe3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2047,6 +2047,18 @@ public class YarnConfiguration extends Configuration {
   + "hbase.coprocessor.app-final-value-retention-milliseconds";
 
   /**
+   * The name of the setting for the location of the coprocessor
+   * jar on hdfs.
+   */
+  public static final String FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION =
+  TIMELINE_SERVICE_PREFIX
+  + "hbase.coprocessor.jar.hdfs.location";
+
+  /** default hdfs location for flowrun coprocessor jar. */
+  public static final String DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR =
+  "/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar";
+
+/**
* The name for setting that points to an optional HBase configuration
* (hbase-site.xml file) with settings that will override the ones found on
* the classpath.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0327a79d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 6f62fd8..16954a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2243,6 +2243,15 @@
 
   
 
+The default hdfs location for flowrun coprocessor jar.
+
+yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+
+/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar
+  
+
+  
+
 The value of this parameter sets the prefix for all tables that are part of
 timeline service in the hbase storage schema. It can be set to "dev."
 or "staging." if it is to be used for development or staging instances.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0327a79d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-p

[11/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or mor

[07/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
deleted file mode 100644
index cccae26..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-
-/**
- * Set of utility methods used by timeline filter classes.
- */
-public final class TimelineFilterUtils {
-
-  private static final Log LOG = LogFactory.getLog(TimelineFilterUtils.class);
-
-  private TimelineFilterUtils() {
-  }
-
-  /**
-   * Returns the equivalent HBase filter list's {@link Operator}.
-   *
-   * @param op timeline filter list operator.
-   * @return HBase filter list's Operator.
-   */
-  private static Operator getHBaseOperator(TimelineFilterList.Operator op) {
-switch (op) {
-case AND:
-  return Operator.MUST_PASS_ALL;
-case OR:
-  return Operator.MUST_PASS_ONE;
-default:
-  throw new IllegalArgumentException("Invalid operator");
-}
-  }
-
-  /**
-   * Returns the equivalent HBase compare filter's {@link CompareOp}.
-   *
-   * @param op timeline compare op.
-   * @return HBase compare filter's CompareOp.
-   */
-  private static CompareOp getHBaseCompareOp(
-  TimelineCompareOp op) {
-switch (op) {
-case LESS_THAN:
-  return CompareOp.LESS;
-case LESS_OR_EQUAL:
-  return CompareOp.LESS_OR_EQUAL;
-case EQUAL:
-  return CompareOp.EQUAL;
-case NOT_EQUAL:
-  return CompareOp.NOT_EQUAL;
-case GREATER_OR_EQUAL:
-  return CompareOp.GREATER_OR_EQUAL;
-case GREATER_THAN:
-  return CompareOp.GREATER;
-default:
-  throw new IllegalArgumentException("Invalid compare operator");
-}
-  }
-
-  /**
-   * Converts a {@link TimelinePrefixFilter} to an equivalent HBase
-   * {@link QualifierFilter}.
-   * @param colPrefix
-   * @param filter
-   * @return a {@link QualifierFilter} object
-   */
-  private static  Filter createHBaseColQualPrefixFilter(
-  ColumnPrefix colPrefix, TimelinePrefixFilter filter) {
-return new QualifierFilter(getHBaseCompareOp(filter.getCompareOp()),
-new BinaryPrefixComparator(
-colPrefix.getColumnPrefixBytes(filter.getPrefix(;
-  }
-
-  /**
-   * Create a HBase {@link QualifierFilter} for the passed column prefix and
-   * compare op.
-   *
-   * @param  Describes the type of column prefix.
-   * @param compareOp compare op.
-   * @param columnPrefix column prefix.
-   * @return a column 

[05/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
deleted file mode 100644
index e93b470..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * A bunch of utility functions used in HBase TimelineService backend.
- */
-public final class HBaseTimelineStorageUtils {
-  /** milliseconds in one day. */
-  public static final long MILLIS_ONE_DAY = 8640L;
-  private static final Log LOG =
-  LogFactory.getLog(HBaseTimelineStorageUtils.class);
-
-  private HBaseTimelineStorageUtils() {
-  }
-
-
-  /**
-   * Combines the input array of attributes and the input aggregation operation
-   * into a new array of attributes.
-   *
-   * @param attributes Attributes to be combined.
-   * @param aggOp Aggregation operation.
-   * @return array of combined attributes.
-   */
-  public static Attribute[] combineAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int newLength = getNewLengthCombinedAttributes(attributes, aggOp);
-Attribute[] combinedAttributes = new Attribute[newLength];
-
-if (attributes != null) {
-  System.arraycopy(attributes, 0, combinedAttributes, 0, 
attributes.length);
-}
-
-if (aggOp != null) {
-  Attribute a2 = aggOp.getAttribute();
-  combinedAttributes[newLength - 1] = a2;
-}
-return combinedAttributes;
-  }
-
-  /**
-   * Returns a number for the new array size. The new array is the combination
-   * of input array of attributes and the input aggregation operation.
-   *
-   * @param attributes Attributes.
-   * @param aggOp Aggregation operation.
-   * @return the size for the new array
-   */
-  private static int getNewLengthCombinedAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int oldLength = getAttributesLength(attributes);
-int aggLength = getAppOpLength(aggOp);
-return oldLength + aggLength;
-  }
-
-  private static int getAppOpLength(AggregationOperation aggOp) {
-if (aggOp != null) {
-  return 1;
-}
-return 0;
-  }
-
-  private static int getAttributesLength(Attribute[] attributes) {
-if (attributes != null) {
-  return attributes.length;
-}
-return 0;
-  }
-
-  /**
-   * Returns the first seen aggregation operation as seen in the list of input
-   * tags or null otherwise.
-   *
-   * @param tags list of HBase tags.
-   * @return AggregationOperation
-   */
-  public static AggregationOperat

[19/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
deleted file mode 100644
index e93b470..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * A bunch of utility functions used in HBase TimelineService backend.
- */
-public final class HBaseTimelineStorageUtils {
-  /** milliseconds in one day. */
-  public static final long MILLIS_ONE_DAY = 8640L;
-  private static final Log LOG =
-  LogFactory.getLog(HBaseTimelineStorageUtils.class);
-
-  private HBaseTimelineStorageUtils() {
-  }
-
-
-  /**
-   * Combines the input array of attributes and the input aggregation operation
-   * into a new array of attributes.
-   *
-   * @param attributes Attributes to be combined.
-   * @param aggOp Aggregation operation.
-   * @return array of combined attributes.
-   */
-  public static Attribute[] combineAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int newLength = getNewLengthCombinedAttributes(attributes, aggOp);
-Attribute[] combinedAttributes = new Attribute[newLength];
-
-if (attributes != null) {
-  System.arraycopy(attributes, 0, combinedAttributes, 0, 
attributes.length);
-}
-
-if (aggOp != null) {
-  Attribute a2 = aggOp.getAttribute();
-  combinedAttributes[newLength - 1] = a2;
-}
-return combinedAttributes;
-  }
-
-  /**
-   * Returns a number for the new array size. The new array is the combination
-   * of input array of attributes and the input aggregation operation.
-   *
-   * @param attributes Attributes.
-   * @param aggOp Aggregation operation.
-   * @return the size for the new array
-   */
-  private static int getNewLengthCombinedAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int oldLength = getAttributesLength(attributes);
-int aggLength = getAppOpLength(aggOp);
-return oldLength + aggLength;
-  }
-
-  private static int getAppOpLength(AggregationOperation aggOp) {
-if (aggOp != null) {
-  return 1;
-}
-return 0;
-  }
-
-  private static int getAttributesLength(Attribute[] attributes) {
-if (attributes != null) {
-  return attributes.length;
-}
-return 0;
-  }
-
-  /**
-   * Returns the first seen aggregation operation as seen in the list of input
-   * tags or null otherwise.
-   *
-   * @param tags list of HBase tags.
-   * @return AggregationOperation
-   */
-  public static AggregationOperat

[01/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-alpha2 18f64065d -> 9a925cb8e
  refs/heads/trunk 60865c8ea -> b01514f65


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
deleted file mode 100644
index 5beb189..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
-import org.junit.Test;
-
-
-public class TestRowKeys {
-
-  private final static String QUALIFIER_SEP = Separator.QUALIFIERS.getValue();
-  private final static byte[] QUALIFIER_SEP_BYTES = Bytes
-  .toBytes(QUALIFIER_SEP);
-  private final static String CLUSTER = "cl" + QUALIFIER_SEP + "uster";
-  private final static String USER = QUALIFIER_SEP + "user";
-  private final static String FLOW_NAME = "dummy_" + QUALIFIER_SEP + "flow"
-  + QUALIFIER_SEP;
-  private final static Long FLOW_RUN_ID;
-  private final static String APPLICATION_ID;
-  static {
-long runid = Long.MAX_VALUE - 900L;
-byte[] longMaxByteArr = Bytes.toBytes(Long.MAX_VALUE);
-byte[] byteArr = Bytes.toBytes(runid);
-int sepByteLen = QUALIFIER_SEP_BYTES.length;
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[i] = (byte) (longMaxByteArr[i] - QUALIFIER_SEP_BYTES[i]);
-  }
-}
-FLOW_RUN_ID = Bytes.toLong(byteArr);
-long clusterTs = System.currentTimeMillis();
-byteArr = Bytes.toBytes(clusterTs);
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[byteArr.length - sepByteLen + i] =
-(byte) (longMaxByteArr[byteArr.length - sepByteLen + i] -
-QUALIFIER_SEP_BYTES[i]);
-  }
-}
-clusterTs = Bytes.toLong(byteArr);
-int seqId = 222;
-APPLICATION_ID = ApplicationId.newInstance(clusterTs, seqId).toString();
-  }
-
-  private static void verifyRowPrefixBytes(byte[] byteRowKeyPrefix) {
-int sepLen = QUALIFIER_SEP_BYTES.length;
-for (int i = 0; i < sepLen; i++) {
-  assertTrue(
-  "Row key prefix not encoded properly.",
-  byteRowKeyPrefix[byteRowKeyPrefix.length - sepLen + i] ==
-  QUALIFIER_SEP_BYTES[i]);
-}
-  }
-
-  @Test
-  public void testApplicationRowKey() {
-byte[] byteRowKey =
-new ApplicationRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
-APPLICATION_ID).getRowKey();
-ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(byteRowKey);
-

[25/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or mor

[03/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
deleted file mode 100644
index 2be6ef8..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
-
-/**
- * Coprocessor for flow run table.
- */
-public class FlowRunCoprocessor extends BaseRegionObserver {
-
-  private static final Log LOG = LogFactory.getLog(FlowRunCoprocessor.class);
-  private boolean isFlowRunRegion = false;
-
-  private Region region;
-  /**
-   * generate a timestamp that is unique per row in a region this is per 
region.
-   */
-  private final TimestampGenerator timestampGenerator =
-  new TimestampGenerator();
-
-  @Override
-  public void start(CoprocessorEnvironment e) throws IOException {
-if (e instanceof RegionCoprocessorEnvironment) {
-  RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
-  this.region = env.getRegion();
-  isFlowRunRegion = HBaseTimelineStorageUtils.isFlowRunTable(
-  region.getRegionInfo(), env.getConfiguration());
-}
-  }
-
-  public boolean isFlowRunRegion() {
-return isFlowRunRegion;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * This method adds the tags onto the cells in the Put. It is presumed that
-   * all the cells in one Put have the same set of Tags. The existing cell
-   * timestamp is overwritten for non-metric cells and each such cell gets a 
new
-   * unique timestamp generated by {@link TimestampGenerator}
-   *
-   * @see
-   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache
-   * .hadoop.hbase.coprocessor.ObserverContext,
-   * org.apache.hadoop.hbase.client.Put,
-   * org.apache.hadoop.hbase.regionserver.wal.WALEdit,
-   * org.apache.hadoop.hbase.client.Durabilit

[13/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the application table.
+ */
+public enum ApplicationColumnPrefix implements ColumnPrefix {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(ApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(ApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(ApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an application.
+   */
+  EVENT(ApplicationColumnFamily.INFO, "e"),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(ApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(ApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnHelper column;
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix) {
+this(columnFamily, columnPrefix, GenericConverter.getInstance());
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix, ValueConverter converter) {
+column = new ColumnHelper(columnFamily, converter);
+this.columnFamily = 

[15/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
deleted file mode 100644
index 5beb189..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
-import org.junit.Test;
-
-
-public class TestRowKeys {
-
-  private final static String QUALIFIER_SEP = Separator.QUALIFIERS.getValue();
-  private final static byte[] QUALIFIER_SEP_BYTES = Bytes
-  .toBytes(QUALIFIER_SEP);
-  private final static String CLUSTER = "cl" + QUALIFIER_SEP + "uster";
-  private final static String USER = QUALIFIER_SEP + "user";
-  private final static String FLOW_NAME = "dummy_" + QUALIFIER_SEP + "flow"
-  + QUALIFIER_SEP;
-  private final static Long FLOW_RUN_ID;
-  private final static String APPLICATION_ID;
-  static {
-long runid = Long.MAX_VALUE - 900L;
-byte[] longMaxByteArr = Bytes.toBytes(Long.MAX_VALUE);
-byte[] byteArr = Bytes.toBytes(runid);
-int sepByteLen = QUALIFIER_SEP_BYTES.length;
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[i] = (byte) (longMaxByteArr[i] - QUALIFIER_SEP_BYTES[i]);
-  }
-}
-FLOW_RUN_ID = Bytes.toLong(byteArr);
-long clusterTs = System.currentTimeMillis();
-byteArr = Bytes.toBytes(clusterTs);
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[byteArr.length - sepByteLen + i] =
-(byte) (longMaxByteArr[byteArr.length - sepByteLen + i] -
-QUALIFIER_SEP_BYTES[i]);
-  }
-}
-clusterTs = Bytes.toLong(byteArr);
-int seqId = 222;
-APPLICATION_ID = ApplicationId.newInstance(clusterTs, seqId).toString();
-  }
-
-  private static void verifyRowPrefixBytes(byte[] byteRowKeyPrefix) {
-int sepLen = QUALIFIER_SEP_BYTES.length;
-for (int i = 0; i < sepLen; i++) {
-  assertTrue(
-  "Row key prefix not encoded properly.",
-  byteRowKeyPrefix[byteRowKeyPrefix.length - sepLen + i] ==
-  QUALIFIER_SEP_BYTES[i]);
-}
-  }
-
-  @Test
-  public void testApplicationRowKey() {
-byte[] byteRowKey =
-new ApplicationRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
-APPLICATION_ID).getRowKey();
-ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(byteRowKey);
-assertEquals(CLUSTER, rowKey.getClusterId());
-assertEquals(USER, rowKey.getUserId());
-assertEquals(FLOW_NAME, rowKey.getFlo

[06/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
deleted file mode 100644
index a02f768..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-
-/**
- * The application table as column families info, config and metrics. Info
- * stores information about a YARN application entity, config stores
- * configuration data of a YARN application, metrics stores the metrics of a
- * YARN application. This table is entirely analogous to the entity table but
- * created for better performance.
- *
- * Example application table record:
- *
- * 
- * |-|
- * |  Row   | Column Family| Column Family| Column Family|
- * |  key   | info | metrics  | config   |
- * |-|
- * | clusterId! | id:appId | metricId1:   | configKey1:  |
- * | userName!  |  | metricValue1 | configValue1 |
- * | flowName!  | created_time:| @timestamp1  |  |
- * | flowRunId! | 1392993084018|  | configKey2:  |
- * | AppId  |  | metriciD1:   | configValue2 |
- * || i!infoKey:   | metricValue2 |  |
- * || infoValue| @timestamp2  |  |
- * ||  |  |  |
- * || r!relatesToKey:  | metricId2:   |  |
- * || id3=id4=id5  | metricValue1 |  |
- * ||  | @timestamp2  |  |
- * || s!isRelatedToKey:|  |  |
- * || id7=id9=id6  |  |  |
- * ||  |  |  |
- * || e!eventId=timestamp=infoKey: |  |  |
- * || eventInfoValue   |  |  |
- * ||  |  |  |
- * || flowVersion: |  |  |
- * || versionValue |  |  |
- * |-|
- * 
- */
-public class ApplicationTable extends BaseTable {
-  /** application prefix. */
-  private static final String PREFIX =
-  YarnConfi

[23/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation action

[08/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
new file mode 100644
index 000..4e1ab8a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -0,0 +1,648 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for generic entities that are stored in the entity
+ * table.
+ */
+class GenericEntityReader extends TimelineEntityReader {
+  private static final EntityTable ENTITY_TABLE = new EntityTable();
+
+  /**
+   * Used to look up the flow context.
+   */
+  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
+
+  /**
+

[12/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..be55db5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,388 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), attribute.getVa

[09/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation action

[20/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
deleted file mode 100644
index a02f768..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-
-/**
- * The application table as column families info, config and metrics. Info
- * stores information about a YARN application entity, config stores
- * configuration data of a YARN application, metrics stores the metrics of a
- * YARN application. This table is entirely analogous to the entity table but
- * created for better performance.
- *
- * Example application table record:
- *
- * 
- * |-|
- * |  Row   | Column Family| Column Family| Column Family|
- * |  key   | info | metrics  | config   |
- * |-|
- * | clusterId! | id:appId | metricId1:   | configKey1:  |
- * | userName!  |  | metricValue1 | configValue1 |
- * | flowName!  | created_time:| @timestamp1  |  |
- * | flowRunId! | 1392993084018|  | configKey2:  |
- * | AppId  |  | metriciD1:   | configValue2 |
- * || i!infoKey:   | metricValue2 |  |
- * || infoValue| @timestamp2  |  |
- * ||  |  |  |
- * || r!relatesToKey:  | metricId2:   |  |
- * || id3=id4=id5  | metricValue1 |  |
- * ||  | @timestamp2  |  |
- * || s!isRelatedToKey:|  |  |
- * || id7=id9=id6  |  |  |
- * ||  |  |  |
- * || e!eventId=timestamp=infoKey: |  |  |
- * || eventInfoValue   |  |  |
- * ||  |  |  |
- * || flowVersion: |  |  |
- * || versionValue |  |  |
- * |-|
- * 
- */
-public class ApplicationTable extends BaseTable {
-  /** application prefix. */
-  private static final String PREFIX =
-  YarnConfi

[26/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..be55db5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,388 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), attribute.getVa

[24/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[14/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b01514f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b01514f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b01514f6

Branch: refs/heads/trunk
Commit: b01514f65bc6090a50a583f67d1ecb5d74b6d276
Parents: 60865c8
Author: Sangjin Lee 
Authored: Thu Jan 19 20:52:55 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 20:52:55 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 191 +
 .../reader/filter/TimelineFilterUtils.java  | 290 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  88 +++
 .../storage/HBaseTimelineWriterImpl.java| 566 ++
 .../storage/TimelineSchemaCreator.java  | 250 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../storage/apptoflow/AppToFlowColumn.java  | 148 
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../storage/apptoflow/AppToFlowRowKey.java  | 143 
 .../storage/apptoflow/AppToFlowTable.java   | 113 +++
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 140 
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 388 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 243 +++
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 225 ++
 .../storage/entity/EntityRowKeyPrefix.java  |  74 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 304 
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 141 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOperation.java  |  46 ++
 .../storage/flow/package-info.java  |  29 +
 .../timelineservice/storage/package-info.java   |  28 +
 ..

[04/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
deleted file mode 100644
index ff22178..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the entity table.
- */
-public class EntityRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final String entityType;
-  private final String entityId;
-  private final KeyConverter entityRowKeyConverter =
-  new EntityRowKeyConverter();
-
-  public EntityRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId, String entityType, String entityId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-this.entityType = entityType;
-this.entityId = entityId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  public String getEntityType() {
-return entityType;
-  }
-
-  public String getEntityId() {
-return entityId;
-  }
-
-  /**
-   * Constructs a row key for the entity table as follows:
-   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
-   * Typically used while querying a specific entity.
-   *
-   * @return byte array with the row key.
-   */
-  public byte[] getRowKey() {
-return entityRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey byte representation of row key.
-   * @return An EntityRowKey object.
-   */
-  public static EntityRowKey parseRowKey(byte[] rowKey) {
-return new EntityRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for entity table. The row key is of the form :
-   * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId
-   * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and
-   * rest are strings.
-   * 
-   */
-  final private static class EntityRowKeyConverter implements
-  KeyConverter {
-
-private final AppIdKeyConverter appIDKeyConverter = new 
AppIdKeyConverter();
-
-private EntityRowKeyConverter() {
-}
-
-/**
- * Entity row key is of the form
- * userName!clusterId!flowName!flowRunId!appId!entityType!entityId w. each
- * segment separated by !. The sizes below indicate sizes of each one of
- * these segments in sequence. clusterId, userName, flowName, entityType 
and
- * entityId are strings. flowrunId is a long hence 8 bytes in size. app id
- * is represented as 12 bytes

[10/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[28/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.

(cherry picked from commit b01514f65bc6090a50a583f67d1ecb5d74b6d276)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a925cb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a925cb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a925cb8

Branch: refs/heads/branch-3.0.0-alpha2
Commit: 9a925cb8e8f438d29934043ceabb4c0066279cd5
Parents: 18f6406
Author: Sangjin Lee 
Authored: Thu Jan 19 20:52:55 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 21:00:24 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 191 +
 .../reader/filter/TimelineFilterUtils.java  | 290 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  88 +++
 .../storage/HBaseTimelineWriterImpl.java| 566 ++
 .../storage/TimelineSchemaCreator.java  | 250 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../storage/apptoflow/AppToFlowColumn.java  | 148 
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../storage/apptoflow/AppToFlowRowKey.java  | 143 
 .../storage/apptoflow/AppToFlowTable.java   | 113 +++
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 140 
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 388 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 243 +++
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 225 ++
 .../storage/entity/EntityRowKeyPrefix.java  |  74 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 304 
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 141 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOperation.java  |  46 ++
 .../storage/flow/package-info.ja

[27/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the application table.
+ */
+public enum ApplicationColumnPrefix implements ColumnPrefix {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(ApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(ApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(ApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an application.
+   */
+  EVENT(ApplicationColumnFamily.INFO, "e"),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(ApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(ApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnHelper column;
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix) {
+this(columnFamily, columnPrefix, GenericConverter.getInstance());
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix, ValueConverter converter) {
+column = new ColumnHelper(columnFamily, converter);
+this.columnFamily = 

[21/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
deleted file mode 100644
index cccae26..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-
-/**
- * Set of utility methods used by timeline filter classes.
- */
-public final class TimelineFilterUtils {
-
-  private static final Log LOG = LogFactory.getLog(TimelineFilterUtils.class);
-
-  private TimelineFilterUtils() {
-  }
-
-  /**
-   * Returns the equivalent HBase filter list's {@link Operator}.
-   *
-   * @param op timeline filter list operator.
-   * @return HBase filter list's Operator.
-   */
-  private static Operator getHBaseOperator(TimelineFilterList.Operator op) {
-switch (op) {
-case AND:
-  return Operator.MUST_PASS_ALL;
-case OR:
-  return Operator.MUST_PASS_ONE;
-default:
-  throw new IllegalArgumentException("Invalid operator");
-}
-  }
-
-  /**
-   * Returns the equivalent HBase compare filter's {@link CompareOp}.
-   *
-   * @param op timeline compare op.
-   * @return HBase compare filter's CompareOp.
-   */
-  private static CompareOp getHBaseCompareOp(
-  TimelineCompareOp op) {
-switch (op) {
-case LESS_THAN:
-  return CompareOp.LESS;
-case LESS_OR_EQUAL:
-  return CompareOp.LESS_OR_EQUAL;
-case EQUAL:
-  return CompareOp.EQUAL;
-case NOT_EQUAL:
-  return CompareOp.NOT_EQUAL;
-case GREATER_OR_EQUAL:
-  return CompareOp.GREATER_OR_EQUAL;
-case GREATER_THAN:
-  return CompareOp.GREATER;
-default:
-  throw new IllegalArgumentException("Invalid compare operator");
-}
-  }
-
-  /**
-   * Converts a {@link TimelinePrefixFilter} to an equivalent HBase
-   * {@link QualifierFilter}.
-   * @param colPrefix
-   * @param filter
-   * @return a {@link QualifierFilter} object
-   */
-  private static  Filter createHBaseColQualPrefixFilter(
-  ColumnPrefix colPrefix, TimelinePrefixFilter filter) {
-return new QualifierFilter(getHBaseCompareOp(filter.getCompareOp()),
-new BinaryPrefixComparator(
-colPrefix.getColumnPrefixBytes(filter.getPrefix(;
-  }
-
-  /**
-   * Create a HBase {@link QualifierFilter} for the passed column prefix and
-   * compare op.
-   *
-   * @param  Describes the type of column prefix.
-   * @param compareOp compare op.
-   * @param columnPrefix column prefix.
-   * @return a column 

[22/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
new file mode 100644
index 000..4e1ab8a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -0,0 +1,648 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for generic entities that are stored in the entity
+ * table.
+ */
+class GenericEntityReader extends TimelineEntityReader {
+  private static final EntityTable ENTITY_TABLE = new EntityTable();
+
+  /**
+   * Used to look up the flow context.
+   */
+  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
+
+  /**
+

[02/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
deleted file mode 100644
index 9ba5e38..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for flow activity entities that are stored in the
- * flow activity table.
- */
-class FlowActivityEntityReader extends TimelineEntityReader {
-  private static final FlowActivityTable FLOW_ACTIVITY_TABLE =
-  new FlowActivityTable();
-
-  /**
-   * Used to convert Long key components to and from storage format.
-   */
-  private final KeyConverter longKeyConverter = new LongKeyConverter();
-
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-super(ctxt, entityFilters, toRetrieve, true);
-  }
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineDataToRetrieve toRetrieve) {
-super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link FlowActivityTable}.
-   */
-  @Override
-  protected BaseTable getTable() {
-return FLOW_ACTIVITY_TABLE;
-  }
-
-  @Override
-  protected void validateParams() {
-Preconditions.checkNotNull(getContext().getClusterId(),
-"clusterId shouldn't be null");
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-  throws IOException {
-createFiltersIfNull();
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-return null;
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() {
-return null;
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-  FilterLi

[18/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
deleted file mode 100644
index ff22178..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the entity table.
- */
-public class EntityRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final String entityType;
-  private final String entityId;
-  private final KeyConverter entityRowKeyConverter =
-  new EntityRowKeyConverter();
-
-  public EntityRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId, String entityType, String entityId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-this.entityType = entityType;
-this.entityId = entityId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  public String getEntityType() {
-return entityType;
-  }
-
-  public String getEntityId() {
-return entityId;
-  }
-
-  /**
-   * Constructs a row key for the entity table as follows:
-   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
-   * Typically used while querying a specific entity.
-   *
-   * @return byte array with the row key.
-   */
-  public byte[] getRowKey() {
-return entityRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey byte representation of row key.
-   * @return An EntityRowKey object.
-   */
-  public static EntityRowKey parseRowKey(byte[] rowKey) {
-return new EntityRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for entity table. The row key is of the form :
-   * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId
-   * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and
-   * rest are strings.
-   * 
-   */
-  final private static class EntityRowKeyConverter implements
-  KeyConverter {
-
-private final AppIdKeyConverter appIDKeyConverter = new 
AppIdKeyConverter();
-
-private EntityRowKeyConverter() {
-}
-
-/**
- * Entity row key is of the form
- * userName!clusterId!flowName!flowRunId!appId!entityType!entityId w. each
- * segment separated by !. The sizes below indicate sizes of each one of
- * these segments in sequence. clusterId, userName, flowName, entityType 
and
- * entityId are strings. flowrunId is a long hence 8 bytes in size. app id
- * is represented as 12 bytes

[16/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
deleted file mode 100644
index 9ba5e38..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for flow activity entities that are stored in the
- * flow activity table.
- */
-class FlowActivityEntityReader extends TimelineEntityReader {
-  private static final FlowActivityTable FLOW_ACTIVITY_TABLE =
-  new FlowActivityTable();
-
-  /**
-   * Used to convert Long key components to and from storage format.
-   */
-  private final KeyConverter longKeyConverter = new LongKeyConverter();
-
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-super(ctxt, entityFilters, toRetrieve, true);
-  }
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineDataToRetrieve toRetrieve) {
-super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link FlowActivityTable}.
-   */
-  @Override
-  protected BaseTable getTable() {
-return FLOW_ACTIVITY_TABLE;
-  }
-
-  @Override
-  protected void validateParams() {
-Preconditions.checkNotNull(getContext().getClusterId(),
-"clusterId shouldn't be null");
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-  throws IOException {
-createFiltersIfNull();
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-return null;
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() {
-return null;
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-  FilterLi

[17/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
deleted file mode 100644
index 2be6ef8..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
-
-/**
- * Coprocessor for flow run table.
- */
-public class FlowRunCoprocessor extends BaseRegionObserver {
-
-  private static final Log LOG = LogFactory.getLog(FlowRunCoprocessor.class);
-  private boolean isFlowRunRegion = false;
-
-  private Region region;
-  /**
-   * generate a timestamp that is unique per row in a region this is per 
region.
-   */
-  private final TimestampGenerator timestampGenerator =
-  new TimestampGenerator();
-
-  @Override
-  public void start(CoprocessorEnvironment e) throws IOException {
-if (e instanceof RegionCoprocessorEnvironment) {
-  RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
-  this.region = env.getRegion();
-  isFlowRunRegion = HBaseTimelineStorageUtils.isFlowRunTable(
-  region.getRegionInfo(), env.getConfiguration());
-}
-  }
-
-  public boolean isFlowRunRegion() {
-return isFlowRunRegion;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * This method adds the tags onto the cells in the Put. It is presumed that
-   * all the cells in one Put have the same set of Tags. The existing cell
-   * timestamp is overwritten for non-metric cells and each such cell gets a 
new
-   * unique timestamp generated by {@link TimestampGenerator}
-   *
-   * @see
-   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache
-   * .hadoop.hbase.coprocessor.ObserverContext,
-   * org.apache.hadoop.hbase.client.Put,
-   * org.apache.hadoop.hbase.regionserver.wal.WALEdit,
-   * org.apache.hadoop.hbase.client.Durabilit

[03/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
deleted file mode 100644
index 90dd345..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies fully qualified columns for the {@link FlowRunTable}.
- */
-public enum FlowRunColumn implements Column {
-
-  /**
-   * When the flow was started. This is the minimum of currently known
-   * application start times.
-   */
-  MIN_START_TIME(FlowRunColumnFamily.INFO, "min_start_time",
-  AggregationOperation.GLOBAL_MIN, new LongConverter()),
-
-  /**
-   * When the flow ended. This is the maximum of currently known application 
end
-   * times.
-   */
-  MAX_END_TIME(FlowRunColumnFamily.INFO, "max_end_time",
-  AggregationOperation.GLOBAL_MAX, new LongConverter()),
-
-  /**
-   * The version of the flow that this flow belongs to.
-   */
-  FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null);
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-  private final AggregationOperation aggOp;
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp) {
-this(columnFamily, columnQualifier, aggOp,
-GenericConverter.getInstance());
-  }
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp,
-  ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-this.aggOp = aggOp;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
-.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-return columnFamily.getBytes();
-  }
-
-  public AggregationOperation getAggregationOperation() {
-return aggOp;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.Column#store
-   * (byte[], org.apache.hadoop.yarn.server.timelineservice.s

[11/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or mor

[23/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation action

[05/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
deleted file mode 100644
index 89aa013..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Used to represent a partially qualified column, where the actual column name
- * will be composed of a prefix and the remainder of the column qualifier. The
- * prefix can be null, in which case the column qualifier will be completely
- * determined when the values are stored.
- */
-public interface ColumnPrefix {
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  byte[] qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  String qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Get the latest version of this specified column. Note: this call clones 
the
-   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
-   *
-   * @param result Cannot be null
-   * @param qualifier column qualifier. Nothing gets read when null.
-   * @return result object (can be cas

[08/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
new file mode 100644
index 000..cedf96a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for flow run entities that are stored in the flow run
+ * table.
+ */
+class FlowRunEntityReader extends TimelineEntityReader {
+  private static final FlowRunTable FLOW_RUN_TABLE = new FlowRunTable();
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+super(ctxt, entityFilters, toRetrieve);
+  }
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineDataToRetrieve toRetrieve) {
+super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link FlowRunTable}.
+   */
+  @Override
+  protected BaseTable getTable() {
+return FLOW_RUN_TABLE;
+  }
+
+  @Override
+  protected void validateParams() 

[27/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
new file mode 100644
index 000..97e5f7b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the application table column families.
+ */
+public enum ApplicationColumnFamily implements ColumnFamily {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons: a) the 
size
+   * of the config values can be very large and b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value create a column family with this name. Must be lower case and
+   *  without spaces.
+   */
+  private ApplicationColumnFamily(String value) {
+// column families should be lower case and not contain any spaces.
+this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KI

[09/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation action

[04/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
deleted file mode 100644
index 93b4b36..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies fully qualified columns for the {@link EntityTable}.
- */
-public enum EntityColumn implements Column {
-
-  /**
-   * Identifier for the entity.
-   */
-  ID(EntityColumnFamily.INFO, "id"),
-
-  /**
-   * The type of entity.
-   */
-  TYPE(EntityColumnFamily.INFO, "type"),
-
-  /**
-   * When the entity was created.
-   */
-  CREATED_TIME(EntityColumnFamily.INFO, "created_time", new LongConverter()),
-
-  /**
-   * The version of the flow that this entity belongs to.
-   */
-  FLOW_VERSION(EntityColumnFamily.INFO, "flow_version");
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier) {
-this(columnFamily, columnQualifier, GenericConverter.getInstance());
-  }
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier, ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes =
-Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  public void store(byte[] rowKey,
-  TypedBufferedMutator tableMutator, Long timestamp,
-  Object inputValue, Attribute... attributes) throws IOException {
-column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-inputValue, attributes);
-  }
-
-  public Object readResult(Result result) throws IOException {
-return column.readResult(result, columnQualifierBytes);
-  }
-
-  /**
-   * Retrieve an {@link EntityColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnQualifier Name of the column to retrieve
-   * @return the corresponding {@link EntityColumn} or null
-   */
-  public static final EntityColumn columnFor(String columnQuali

[28/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.

(cherry picked from commit b92089c0e8ab1b87b8b5b55b1e3d4367ae5d847a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47ec7f92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47ec7f92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47ec7f92

Branch: refs/heads/YARN-5355-branch-2
Commit: 47ec7f927e8b1b1eeb8a2287ae2a7795cab131dd
Parents: e0177c9
Author: Sangjin Lee 
Authored: Thu Jan 19 21:21:48 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 21:38:38 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../TestRMHATimelineCollectors.java |   6 +
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 190 +
 .../reader/filter/TimelineFilterUtils.java  | 307 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  96 +++
 .../storage/HBaseTimelineWriterImpl.java| 542 ++
 .../storage/TimelineSchemaCreator.java  | 251 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../apptoflow/AppToFlowColumnPrefix.java| 206 ++
 .../storage/apptoflow/AppToFlowRowKey.java  |  58 ++
 .../storage/apptoflow/AppToFlowTable.java   | 124 
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 167 +
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 389 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 306 
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 249 +++
 .../storage/entity/EntityRowKeyPrefix.java  |  77 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 274 +++
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 150 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOpe

[26/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..b9815eb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,389 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), attribute.getVa

[01/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 0327a79d7 -> b92089c0e
  refs/heads/YARN-5355-branch-2 e0177c952 -> 47ec7f927


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
deleted file mode 100644
index f6904c5..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ /dev/null
@@ -1,628 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for generic entities that are stored in the entity
- * table.
- */
-class GenericEntityReader extends TimelineEntityReader {
-  private static final EntityTable ENTITY_TABLE = new EntityTable();
-
-  /**
-   * Used to convert strings key components to and from storage format.
-   */
-  private final KeyConverter stringKeyConverter =
-  new StringKeyConvert

[02/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
deleted file mode 100644
index 5bacf66..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-
-/**
- * The base class for reading timeline data from the HBase storage. This class
- * provides basic support to validate and augment reader context.
- */
-public abstract class AbstractTimelineStorageReader {
-
-  private final TimelineReaderContext context;
-  /**
-   * Used to look up the flow context.
-   */
-  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
-
-  public AbstractTimelineStorageReader(TimelineReaderContext ctxt) {
-context = ctxt;
-  }
-
-  protected TimelineReaderContext getContext() {
-return context;
-  }
-
-  /**
-   * Looks up flow context from AppToFlow table.
-   *
-   * @param appToFlowRowKey to identify Cluster and App Ids.
-   * @param clusterId the cluster id.
-   * @param hbaseConf HBase configuration.
-   * @param conn HBase Connection.
-   * @return flow context information.
-   * @throws IOException if any problem occurs while fetching flow information.
-   */
-  protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
-  String clusterId, Configuration hbaseConf, Connection conn)
-  throws IOException {
-byte[] rowKey = appToFlowRowKey.getRowKey();
-Get get = new Get(rowKey);
-Result result = appToFlowTable.getResult(hbaseConf, conn, get);
-if (result != null && !result.isEmpty()) {
-  Object flowName =
-  AppToFlowColumnPrefix.FLOW_NAME.readResult(result, clusterId);
-  Object flowRunId =
-  AppToFlowColumnPrefix.FLOW_RUN_ID.readResult(result, clusterId);
-  Object userId =
-  AppToFlowColumnPrefix.USER_ID.readResult(result, clusterId);
-  if (flowName == null || userId == null || flowRunId == null) {
-throw new NotFoundException(
-"Unable to find the context flow name, and flow run id, "
-+ "and user id for clusterId=" + clusterId
-+ ", appId=" + appToFlowRowKey.getAppId());
-  }
-  return new FlowContext((String)userId, (String)flowName,
-  ((Number)flowRunId).longValue());
-} else {
-  throw new NotFoundException(
-  "Unable to find the context flow name, and flow run id, "
-  + "and user id for clusterId=" + clusterId
-  + ", appId=" + appToFlowRowKey.getAppId());
-}
-  }
-
-  /**
-* Sets certain parameters to defaults if the values are not provided.
-*
-* @param hbaseConf HBase Configuration.
-* @param conn HBase Connection.
-* @throws IOE

[19/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
deleted file mode 100644
index 89aa013..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Used to represent a partially qualified column, where the actual column name
- * will be composed of a prefix and the remainder of the column qualifier. The
- * prefix can be null, in which case the column qualifier will be completely
- * determined when the values are stored.
- */
-public interface ColumnPrefix {
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  byte[] qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  String qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Get the latest version of this specified column. Note: this call clones 
the
-   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
-   *
-   * @param result Cannot be null
-   * @param qualifier column qualifier. Nothing gets read when null.
-   * @return result object (can be cas

[25/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or mor

[13/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
new file mode 100644
index 000..97e5f7b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the application table column families.
+ */
+public enum ApplicationColumnFamily implements ColumnFamily {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons: a) the 
size
+   * of the config values can be very large and b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value create a column family with this name. Must be lower case and
+   *  without spaces.
+   */
+  private ApplicationColumnFamily(String value) {
+// column families should be lower case and not contain any spaces.
+this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KI

[17/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
deleted file mode 100644
index 90dd345..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies fully qualified columns for the {@link FlowRunTable}.
- */
-public enum FlowRunColumn implements Column {
-
-  /**
-   * When the flow was started. This is the minimum of currently known
-   * application start times.
-   */
-  MIN_START_TIME(FlowRunColumnFamily.INFO, "min_start_time",
-  AggregationOperation.GLOBAL_MIN, new LongConverter()),
-
-  /**
-   * When the flow ended. This is the maximum of currently known application 
end
-   * times.
-   */
-  MAX_END_TIME(FlowRunColumnFamily.INFO, "max_end_time",
-  AggregationOperation.GLOBAL_MAX, new LongConverter()),
-
-  /**
-   * The version of the flow that this flow belongs to.
-   */
-  FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null);
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-  private final AggregationOperation aggOp;
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp) {
-this(columnFamily, columnQualifier, aggOp,
-GenericConverter.getInstance());
-  }
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp,
-  ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-this.aggOp = aggOp;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
-.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-return columnFamily.getBytes();
-  }
-
-  public AggregationOperation getAggregationOperation() {
-return aggOp;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.Column#store
-   * (byte[], org.apache.hadoop.yarn.server.timelineservice.s

<    2   3   4   5   6   7   8   9   10   11   >