hadoop git commit: HDFS-110268. Correctly reconstruct erasure coding file from FSImage. Contributed by SammiChen.

2017-01-19 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 63320d1da -> a8f1c7f54


HDFS-110268. Correctly reconstruct erasure coding file from FSImage. 
Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8f1c7f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8f1c7f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8f1c7f5

Branch: refs/heads/trunk
Commit: a8f1c7f542963f66849bcb2a06893c6a99cbe235
Parents: 63320d1
Author: Andrew Wang 
Authored: Thu Jan 19 00:24:11 2017 -0800
Committer: Andrew Wang 
Committed: Thu Jan 19 00:24:11 2017 -0800

--
 .../server/namenode/FSImageFormatPBINode.java   |  4 +-
 .../hdfs/server/namenode/TestFSImage.java   | 82 
 2 files changed, 71 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8f1c7f5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index f85978b..fcc7012 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -332,7 +332,9 @@ public final class FSImageFormatPBINode {
   short replication = (short) f.getReplication();
   BlockType blockType = PBHelperClient.convert(f.getBlockType());
   LoaderContext state = parent.getLoaderContext();
-  ErasureCodingPolicy ecPolicy = 
ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  ErasureCodingPolicy ecPolicy = (blockType == BlockType.STRIPED) ?
+  ErasureCodingPolicyManager.getPolicyByPolicyID((byte) replication) :
+  null;
 
   BlockInfo[] blocks = new BlockInfo[bp.size()];
   for (int i = 0; i < bp.size(); ++i) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8f1c7f5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 4045320..339c591 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -31,6 +33,7 @@ import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.util.EnumSet;
 
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -73,8 +76,9 @@ public class TestFSImage {
 
   private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ =
   "image-with-zero-block-size.tar.gz";
-  private static final ErasureCodingPolicy testECPolicy
-  = ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private static final ErasureCodingPolicy testECPolicy =
+  ErasureCodingPolicyManager.getPolicyByPolicyID(
+  HdfsConstants.RS_10_4_POLICY_ID);
 
   @Test
   public void testPersist() throws IOException {
@@ -446,8 +450,8 @@ public class TestFSImage {
   /**
* Ensure that FSImage supports BlockGroup.
*/
-  @Test
-  public void testSupportBlockGroup() throws IOException {
+  @Test(timeout = 6)
+  public void testSupportBlockGroup() throws Exception {
 final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() +
 testECPolicy.getNumParityUnits());
 final int BLOCK_SIZE = 8 * 1024 * 1024;
@@ -459,32 +463,82 @@ public class TestFSImage {
   .build();
   cluster.waitActive();
   DistributedFileSystem fs = cluster.getFileSystem();
-  fs.getClient().getNamenode().setErasureCodingPolicy("/", testECPolicy);
-  Path file = new Path("/striped");
-  FSDataOutputStream out = fs.create(file);
-  byte[] bytes = DFSTestUtil.generateSequentialBytes(

hadoop git commit: HDFS-11259. Update fsck to display maintenance state info. (Manoj Govindassamy via lei)

2017-01-19 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 541efe18c -> 1cc5f460e


HDFS-11259. Update fsck to display maintenance state info. (Manoj Govindassamy 
via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cc5f460
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cc5f460
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cc5f460

Branch: refs/heads/branch-2
Commit: 1cc5f460edf68e04dd1972f7c1d00077e8bdd5de
Parents: 541efe1
Author: Lei Xu 
Authored: Thu Jan 19 16:24:58 2017 +0800
Committer: Lei Xu 
Committed: Thu Jan 19 16:24:58 2017 +0800

--
 .../hdfs/server/namenode/NamenodeFsck.java  |  58 +++-
 .../org/apache/hadoop/hdfs/tools/DFSck.java |   9 +-
 .../src/site/markdown/HDFSCommands.md   |   3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 271 ---
 4 files changed, 293 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cc5f460/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 1a2deb0..7c9913a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -116,6 +116,9 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   public static final String HEALTHY_STATUS = "is HEALTHY";
   public static final String DECOMMISSIONING_STATUS = "is DECOMMISSIONING";
   public static final String DECOMMISSIONED_STATUS = "is DECOMMISSIONED";
+  public static final String ENTERING_MAINTENANCE_STATUS =
+  "is ENTERING MAINTENANCE";
+  public static final String IN_MAINTENANCE_STATUS = "is IN MAINTENANCE";
   public static final String NONEXISTENT_STATUS = "does not exist";
   public static final String FAILURE_STATUS = "FAILED";
   public static final String UNDEFINED = "undefined";
@@ -138,6 +141,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 
   private boolean showReplicaDetails = false;
   private boolean showUpgradeDomains = false;
+  private boolean showMaintenanceState = false;
   private long staleInterval;
   private Tracer tracer;
 
@@ -220,6 +224,8 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 this.showReplicaDetails = true;
   } else if (key.equals("upgradedomains")) {
 this.showUpgradeDomains = true;
+  } else if (key.equals("maintenance")) {
+this.showMaintenanceState = true;
   } else if (key.equals("storagepolicies")) {
 this.showStoragePolcies = true;
   } else if (key.equals("openforwrite")) {
@@ -271,6 +277,12 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   + numberReplicas.decommissioned());
   out.println("No. of decommissioning Replica: "
   + numberReplicas.decommissioning());
+  if (this.showMaintenanceState) {
+out.println("No. of entering maintenance Replica: "
++ numberReplicas.liveEnteringMaintenanceReplicas());
+out.println("No. of in maintenance Replica: "
++ numberReplicas.maintenanceNotForReadReplicas());
+  }
   out.println("No. of corrupted Replica: " +
   numberReplicas.corruptReplicas());
   //record datanodes that have corrupted block replica
@@ -291,6 +303,10 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   out.print(DECOMMISSIONED_STATUS);
 } else if (dn.isDecommissionInProgress()) {
   out.print(DECOMMISSIONING_STATUS);
+} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
+  out.print(ENTERING_MAINTENANCE_STATUS);
+} else if (this.showMaintenanceState && dn.isInMaintenance()) {
+  out.print(IN_MAINTENANCE_STATUS);
 } else {
   out.print(HEALTHY_STATUS);
 }
@@ -567,13 +583,21 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   NumberReplicas numberReplicas = bm.countNodes(storedBlock);
   int decommissionedReplicas = numberReplicas.decommissioned();;
   int decommissioningReplicas = numberReplicas.decommissioning();
+  int enteringMaintenanceReplicas =
+  numberReplicas.liveEnteringMaintenanceReplicas();
+  int inMaintenanceReplicas =
+  numberReplicas.maintenanceNotForReadReplicas();
   res.decommissionedReplicas +=  decommissionedReplicas;
   res.decommissioni

hadoop git commit: HDFS-11259. Update fsck to display maintenance state info. (Manoj Govindassamy via lei)

2017-01-19 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk a8f1c7f54 -> 78ae2aed8


HDFS-11259. Update fsck to display maintenance state info. (Manoj Govindassamy 
via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78ae2aed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78ae2aed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78ae2aed

Branch: refs/heads/trunk
Commit: 78ae2aed8f84d2d3983f81a5219e8b1f1ec59dca
Parents: a8f1c7f
Author: Lei Xu 
Authored: Thu Jan 19 16:05:56 2017 +0800
Committer: Lei Xu 
Committed: Thu Jan 19 16:28:31 2017 +0800

--
 .../hdfs/server/namenode/NamenodeFsck.java  |  68 +++-
 .../org/apache/hadoop/hdfs/tools/DFSck.java |  10 +-
 .../src/site/markdown/HDFSCommands.md   |   4 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 381 +--
 4 files changed, 418 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78ae2aed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index d8441f8..1ae75f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -117,6 +117,9 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   public static final String HEALTHY_STATUS = "is HEALTHY";
   public static final String DECOMMISSIONING_STATUS = "is DECOMMISSIONING";
   public static final String DECOMMISSIONED_STATUS = "is DECOMMISSIONED";
+  public static final String ENTERING_MAINTENANCE_STATUS =
+  "is ENTERING MAINTENANCE";
+  public static final String IN_MAINTENANCE_STATUS = "is IN MAINTENANCE";
   public static final String NONEXISTENT_STATUS = "does not exist";
   public static final String FAILURE_STATUS = "FAILED";
   public static final String UNDEFINED = "undefined";
@@ -144,6 +147,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 
   private boolean showReplicaDetails = false;
   private boolean showUpgradeDomains = false;
+  private boolean showMaintenanceState = false;
   private long staleInterval;
   private Tracer tracer;
 
@@ -227,6 +231,8 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 this.showReplicaDetails = true;
   } else if (key.equals("upgradedomains")) {
 this.showUpgradeDomains = true;
+  } else if (key.equals("maintenance")) {
+this.showMaintenanceState = true;
   } else if (key.equals("storagepolicies")) {
 this.showStoragePolcies = true;
   } else if (key.equals("showprogress")) {
@@ -280,6 +286,12 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   + numberReplicas.decommissioned());
   out.println("No. of decommissioning Replica: "
   + numberReplicas.decommissioning());
+  if (this.showMaintenanceState) {
+out.println("No. of entering maintenance Replica: "
++ numberReplicas.liveEnteringMaintenanceReplicas());
+out.println("No. of in maintenance Replica: "
++ numberReplicas.maintenanceNotForReadReplicas());
+  }
   out.println("No. of corrupted Replica: " +
   numberReplicas.corruptReplicas());
   //record datanodes that have corrupted block replica
@@ -300,6 +312,10 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   out.print(DECOMMISSIONED_STATUS);
 } else if (dn.isDecommissionInProgress()) {
   out.print(DECOMMISSIONING_STATUS);
+} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
+  out.print(ENTERING_MAINTENANCE_STATUS);
+} else if (this.showMaintenanceState && dn.isInMaintenance()) {
+  out.print(IN_MAINTENANCE_STATUS);
 } else {
   out.print(HEALTHY_STATUS);
 }
@@ -598,6 +614,12 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   sb.append("DECOMMISSIONED)");
 } else if (dnDesc.isDecommissionInProgress()) {
   sb.append("DECOMMISSIONING)");
+} else if (this.showMaintenanceState &&
+dnDesc.isEnteringMaintenance()) {
+  sb.append("ENTERING MAINTENANCE)");
+} else if (this.showMaintenanceState &&
+dnDesc.isInMaintenance()) {
+  sb.append("IN MAINTENANCE)");
 } else if (corruptReplicas != null
 && corruptReplicas.contain

hadoop git commit: HADOOP-14001. Improve delegation token validity checking.

2017-01-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 78ae2aed8 -> 176346721


HADOOP-14001. Improve delegation token validity checking.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17634672
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17634672
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17634672

Branch: refs/heads/trunk
Commit: 176346721006a03f41d028560e9e29b5931d5be2
Parents: 78ae2ae
Author: Akira Ajisaka 
Authored: Thu Jan 19 17:56:39 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 19 17:56:39 2017 +0900

--
 .../token/delegation/AbstractDelegationTokenSecretManager.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17634672/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index 21d3dd6..4b14059 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.security.token.delegation;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.util.Arrays;
+import java.security.MessageDigest;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -467,7 +467,7 @@ extends AbstractDelegationTokenIdentifier>
   public synchronized void verifyToken(TokenIdent identifier, byte[] password)
   throws InvalidToken {
 byte[] storedPassword = retrievePassword(identifier);
-if (!Arrays.equals(password, storedPassword)) {
+if (!MessageDigest.isEqual(password, storedPassword)) {
   throw new InvalidToken("token " + formatTokenId(identifier)
   + " is invalid, password doesn't match");
 }
@@ -516,7 +516,7 @@ extends AbstractDelegationTokenIdentifier>
   + id.getSequenceNumber());
 }
 byte[] password = createPassword(token.getIdentifier(), key.getKey());
-if (!Arrays.equals(password, token.getPassword())) {
+if (!MessageDigest.isEqual(password, token.getPassword())) {
   throw new AccessControlException(renewer
   + " is trying to renew a token "
   + formatTokenId(id) + " with wrong password");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14001. Improve delegation token validity checking.

2017-01-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1cc5f460e -> c6c29d008


HADOOP-14001. Improve delegation token validity checking.

(cherry picked from commit 176346721006a03f41d028560e9e29b5931d5be2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6c29d00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6c29d00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6c29d00

Branch: refs/heads/branch-2
Commit: c6c29d0080964e55ff84246af1e4149d1ef3162a
Parents: 1cc5f46
Author: Akira Ajisaka 
Authored: Thu Jan 19 17:56:39 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 19 17:58:44 2017 +0900

--
 .../token/delegation/AbstractDelegationTokenSecretManager.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6c29d00/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index 21d3dd6..4b14059 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.security.token.delegation;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.util.Arrays;
+import java.security.MessageDigest;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -467,7 +467,7 @@ extends AbstractDelegationTokenIdentifier>
   public synchronized void verifyToken(TokenIdent identifier, byte[] password)
   throws InvalidToken {
 byte[] storedPassword = retrievePassword(identifier);
-if (!Arrays.equals(password, storedPassword)) {
+if (!MessageDigest.isEqual(password, storedPassword)) {
   throw new InvalidToken("token " + formatTokenId(identifier)
   + " is invalid, password doesn't match");
 }
@@ -516,7 +516,7 @@ extends AbstractDelegationTokenIdentifier>
   + id.getSequenceNumber());
 }
 byte[] password = createPassword(token.getIdentifier(), key.getKey());
-if (!Arrays.equals(password, token.getPassword())) {
+if (!MessageDigest.isEqual(password, token.getPassword())) {
   throw new AccessControlException(renewer
   + " is trying to renew a token "
   + formatTokenId(id) + " with wrong password");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14001. Improve delegation token validity checking.

2017-01-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 e7b4f88ac -> b8b8b9a32


HADOOP-14001. Improve delegation token validity checking.

(cherry picked from commit 176346721006a03f41d028560e9e29b5931d5be2)
(cherry picked from commit c6c29d0080964e55ff84246af1e4149d1ef3162a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8b8b9a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8b8b9a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8b8b9a3

Branch: refs/heads/branch-2.8
Commit: b8b8b9a32b0b12ea3f6356f0317729197871eac4
Parents: e7b4f88
Author: Akira Ajisaka 
Authored: Thu Jan 19 17:56:39 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 19 17:59:18 2017 +0900

--
 .../token/delegation/AbstractDelegationTokenSecretManager.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b8b9a3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index 21d3dd6..4b14059 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.security.token.delegation;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.util.Arrays;
+import java.security.MessageDigest;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -467,7 +467,7 @@ extends AbstractDelegationTokenIdentifier>
   public synchronized void verifyToken(TokenIdent identifier, byte[] password)
   throws InvalidToken {
 byte[] storedPassword = retrievePassword(identifier);
-if (!Arrays.equals(password, storedPassword)) {
+if (!MessageDigest.isEqual(password, storedPassword)) {
   throw new InvalidToken("token " + formatTokenId(identifier)
   + " is invalid, password doesn't match");
 }
@@ -516,7 +516,7 @@ extends AbstractDelegationTokenIdentifier>
   + id.getSequenceNumber());
 }
 byte[] password = createPassword(token.getIdentifier(), key.getKey());
-if (!Arrays.equals(password, token.getPassword())) {
+if (!MessageDigest.isEqual(password, token.getPassword())) {
   throw new AccessControlException(renewer
   + " is trying to renew a token "
   + formatTokenId(id) + " with wrong password");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14001. Improve delegation token validity checking.

2017-01-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.0 544e30170 -> deb0b10e8


HADOOP-14001. Improve delegation token validity checking.

(cherry picked from commit 176346721006a03f41d028560e9e29b5931d5be2)
(cherry picked from commit c6c29d0080964e55ff84246af1e4149d1ef3162a)
(cherry picked from commit b8b8b9a32b0b12ea3f6356f0317729197871eac4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/deb0b10e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/deb0b10e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/deb0b10e

Branch: refs/heads/branch-2.8.0
Commit: deb0b10e8f8864b6506680bf9c3bea70f135da4a
Parents: 544e3017
Author: Akira Ajisaka 
Authored: Thu Jan 19 17:56:39 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 19 18:02:02 2017 +0900

--
 .../token/delegation/AbstractDelegationTokenSecretManager.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/deb0b10e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index 21d3dd6..4b14059 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.security.token.delegation;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.util.Arrays;
+import java.security.MessageDigest;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -467,7 +467,7 @@ extends AbstractDelegationTokenIdentifier>
   public synchronized void verifyToken(TokenIdent identifier, byte[] password)
   throws InvalidToken {
 byte[] storedPassword = retrievePassword(identifier);
-if (!Arrays.equals(password, storedPassword)) {
+if (!MessageDigest.isEqual(password, storedPassword)) {
   throw new InvalidToken("token " + formatTokenId(identifier)
   + " is invalid, password doesn't match");
 }
@@ -516,7 +516,7 @@ extends AbstractDelegationTokenIdentifier>
   + id.getSequenceNumber());
 }
 byte[] password = createPassword(token.getIdentifier(), key.getKey());
-if (!Arrays.equals(password, token.getPassword())) {
+if (!MessageDigest.isEqual(password, token.getPassword())) {
   throw new AccessControlException(renewer
   + " is trying to renew a token "
   + formatTokenId(id) + " with wrong password");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14001. Improve delegation token validity checking.

2017-01-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 a6625e69a -> 1cf20b37e


HADOOP-14001. Improve delegation token validity checking.

(cherry picked from commit 176346721006a03f41d028560e9e29b5931d5be2)
(cherry picked from commit c6c29d0080964e55ff84246af1e4149d1ef3162a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cf20b37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cf20b37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cf20b37

Branch: refs/heads/branch-2.7
Commit: 1cf20b37ed55eb94e63216b13db33da40563c7f4
Parents: a6625e6
Author: Akira Ajisaka 
Authored: Thu Jan 19 17:56:39 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 19 18:07:54 2017 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt| 2 ++
 .../token/delegation/AbstractDelegationTokenSecretManager.java | 6 +++---
 2 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cf20b37/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1e65a6d..e16ac7d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -110,6 +110,8 @@ Release 2.7.4 - UNRELEASED
 HADOOP-13839. Fix outdated tracing documentation.
 (Elek, Marton via iwasakims)
 
+HADOOP-14001. Improve delegation token validity checking. (aajisaka)
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cf20b37/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index 16de391..02b3fea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.security.token.delegation;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.util.Arrays;
+import java.security.MessageDigest;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -446,7 +446,7 @@ extends AbstractDelegationTokenIdentifier>
   public synchronized void verifyToken(TokenIdent identifier, byte[] password)
   throws InvalidToken {
 byte[] storedPassword = retrievePassword(identifier);
-if (!Arrays.equals(password, storedPassword)) {
+if (!MessageDigest.isEqual(password, storedPassword)) {
   throw new InvalidToken("token (" + identifier
   + ") is invalid, password doesn't match");
 }
@@ -489,7 +489,7 @@ extends AbstractDelegationTokenIdentifier>
   + " with sequenceNumber=" + id.getSequenceNumber());
 }
 byte[] password = createPassword(token.getIdentifier(), key.getKey());
-if (!Arrays.equals(password, token.getPassword())) {
+if (!MessageDigest.isEqual(password, token.getPassword())) {
   throw new AccessControlException(renewer +
   " is trying to renew a token with wrong password");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HADOOP-13496. Include file lengths in Mismatch in length error for distcp. Contributed by Ted Yu

2017-01-19 Thread stevel
HADOOP-13496. Include file lengths in Mismatch in length error for distcp. 
Contributed by Ted Yu

(cherry picked from commit 77401bd5fcca5127c9908156971eeec468371f47)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed33ce11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed33ce11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed33ce11

Branch: refs/heads/trunk
Commit: ed33ce11dd8de36fb79e103d8491d077cd4aaf77
Parents: 1763467
Author: Steve Loughran 
Authored: Thu Jan 19 11:24:58 2017 +
Committer: Steve Loughran 
Committed: Thu Jan 19 11:25:40 2017 +

--
 .../apache/hadoop/tools/mapred/RetriableFileCopyCommand.java  | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed33ce11/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index 82b3b62..d1cdfdd 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -189,9 +189,10 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
   throws IOException {
 final Path sourcePath = source.getPath();
 FileSystem fs = sourcePath.getFileSystem(configuration);
-if (fs.getFileStatus(sourcePath).getLen() != targetLen)
-  throw new IOException("Mismatch in length of source:" + sourcePath
-+ " and target:" + target);
+long srcLen = fs.getFileStatus(sourcePath).getLen();
+if (srcLen != targetLen)
+  throw new IOException("Mismatch in length of source:" + sourcePath + " 
(" + srcLen +
+  ") and target:" + target + " (" + targetLen + ")");
   }
 
   private void compareCheckSums(FileSystem sourceFS, Path source,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HADOOP-13496. Include file lengths in Mismatch in length error for distcp. Contributed by Ted Yu

2017-01-19 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c6c29d008 -> b2704702f
  refs/heads/branch-2.8 b8b8b9a32 -> 77401bd5f
  refs/heads/trunk 176346721 -> ed33ce11d


HADOOP-13496. Include file lengths in Mismatch in length error for distcp. 
Contributed by Ted Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77401bd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77401bd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77401bd5

Branch: refs/heads/branch-2.8
Commit: 77401bd5fcca5127c9908156971eeec468371f47
Parents: b8b8b9a
Author: Steve Loughran 
Authored: Thu Jan 19 11:24:58 2017 +
Committer: Steve Loughran 
Committed: Thu Jan 19 11:24:58 2017 +

--
 .../apache/hadoop/tools/mapred/RetriableFileCopyCommand.java  | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77401bd5/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index acb30ee..d1c8286 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -189,9 +189,10 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
   throws IOException {
 final Path sourcePath = sourceFileStatus.getPath();
 FileSystem fs = sourcePath.getFileSystem(configuration);
-if (fs.getFileStatus(sourcePath).getLen() != targetLen)
-  throw new IOException("Mismatch in length of source:" + sourcePath
-+ " and target:" + target);
+long srcLen = fs.getFileStatus(sourcePath).getLen();
+if (srcLen != targetLen)
+  throw new IOException("Mismatch in length of source:" + sourcePath + " 
(" + srcLen +
+  ") and target:" + target + " (" + targetLen + ")");
   }
 
   private void compareCheckSums(FileSystem sourceFS, Path source,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HADOOP-13496. Include file lengths in Mismatch in length error for distcp. Contributed by Ted Yu

2017-01-19 Thread stevel
HADOOP-13496. Include file lengths in Mismatch in length error for distcp. 
Contributed by Ted Yu

(cherry picked from commit 77401bd5fcca5127c9908156971eeec468371f47)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2704702
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2704702
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2704702

Branch: refs/heads/branch-2
Commit: b2704702f9784034114c67d64800363d5e6378e4
Parents: c6c29d0
Author: Steve Loughran 
Authored: Thu Jan 19 11:24:58 2017 +
Committer: Steve Loughran 
Committed: Thu Jan 19 11:25:20 2017 +

--
 .../apache/hadoop/tools/mapred/RetriableFileCopyCommand.java  | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2704702/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index ba2e0af..bed9162 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -189,9 +189,10 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
   throws IOException {
 final Path sourcePath = sourceFileStatus.getPath();
 FileSystem fs = sourcePath.getFileSystem(configuration);
-if (fs.getFileStatus(sourcePath).getLen() != targetLen)
-  throw new IOException("Mismatch in length of source:" + sourcePath
-+ " and target:" + target);
+long srcLen = fs.getFileStatus(sourcePath).getLen();
+if (srcLen != targetLen)
+  throw new IOException("Mismatch in length of source:" + sourcePath + " 
(" + srcLen +
+  ") and target:" + target + " (" + targetLen + ")");
   }
 
   private void compareCheckSums(FileSystem sourceFS, Path source,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13999 Add -DskipShade maven profile to disable jar shading to reduce compile time. Contributed by Arun Suresh

2017-01-19 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 07487b459 -> b3cd1a2b4


HADOOP-13999 Add -DskipShade maven profile to disable jar shading to reduce 
compile time. Contributed by Arun Suresh

(cherry picked from commit 85e4961f60b7f8cd1343b6f2b9f4c8bb1a5de6ac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3cd1a2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3cd1a2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3cd1a2b

Branch: refs/heads/HADOOP-13345
Commit: b3cd1a2b41d98139d6850e1989c62f945c32f321
Parents: 07487b4
Author: Steve Loughran 
Authored: Thu Jan 19 11:49:40 2017 +
Committer: Steve Loughran 
Committed: Thu Jan 19 11:51:15 2017 +

--
 hadoop-client-modules/hadoop-client-api/pom.xml | 318 +++--
 .../hadoop-client-integration-tests/pom.xml | 173 ---
 .../hadoop-client-minicluster/pom.xml   | 426 ++
 .../hadoop-client-runtime/pom.xml   | 446 ++-
 4 files changed, 741 insertions(+), 622 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3cd1a2b/hadoop-client-modules/hadoop-client-api/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml 
b/hadoop-client-modules/hadoop-client-api/pom.xml
index 9342d53..de34a9a 100644
--- a/hadoop-client-modules/hadoop-client-api/pom.xml
+++ b/hadoop-client-modules/hadoop-client-api/pom.xml
@@ -68,149 +68,179 @@
   
 
   
-  
-
-  
-  
-org.apache.maven.plugins
-maven-source-plugin
-  
-true
-  
-  
-  
-org.apache.maven.plugins
-maven-shade-plugin
-
-  
-org.apache.hadoop
-hadoop-maven-plugins
-${project.version}
-  
-
-
-  
-package
-
-  shade
-
-
-  
-
-  org.apache.hadoop:*
-
-  
-  
-
-
-  org.apache.hadoop:hadoop-yarn-common
-  
-
org/apache/hadoop/yarn/factories/package-info.class
-
org/apache/hadoop/yarn/util/package-info.class
-
org/apache/hadoop/yarn/factory/providers/package-info.class
-
org/apache/hadoop/yarn/client/api/impl/package-info.class
-
org/apache/hadoop/yarn/client/api/package-info.class
-  
-
-  
-  
-
-  org/
-  
${shaded.dependency.prefix}.org.
-  
-org/apache/hadoop/*
-org/apache/hadoop/**/*
-
-org/apache/htrace/*
-org/apache/htrace/**/*
-org/slf4j/*
-org/slf4j/**/*
-org/apache/commons/logging/*
-org/apache/commons/logging/**/*
-org/apache/log4j/*
-org/apache/log4j/**/*
-**/pom.xml
-
-org/ietf/jgss/*
-org/omg/**/*
-org/w3c/dom/*
-org/w3c/dom/**/*
-org/xml/sax/*
-org/xml/sax/**/*
-  
-
-
-  com/
-  
${shaded.dependency.prefix}.com.
-  
-**/pom.xml
-
-com/sun/tools/*
-com/sun/javadoc/*
-com/sun/security/*
-com/sun/jndi/*
-com/sun/management/*
-com/sun/tools/**/*
-com/sun/javadoc/**/*
-com/sun/security/**/*
-com/sun/jndi/**/*
-com/sun/management/**/*
-  
-
-
-  io/
-  
${shaded.dependency.prefix}.io.
-  
-**/pom.xml
-
-io/compression/*
-io/compression/**/*
-io/mapfile/*
-io/mapfile/**/*
-io/map/index/*
-io/seqfile/*
-io/seqfile/**/*
-io/file/buffer/size
-io/skip/checksum/errors
-io/sort/*
-io/serializations
-  
-
-
-  

hadoop git commit: HADOOP-13999 Add -DskipShade maven profile to disable jar shading to reduce compile time. Contributed by Arun Suresh

2017-01-19 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed33ce11d -> 85e4961f6


HADOOP-13999 Add -DskipShade maven profile to disable jar shading to reduce 
compile time. Contributed by Arun Suresh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85e4961f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85e4961f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85e4961f

Branch: refs/heads/trunk
Commit: 85e4961f60b7f8cd1343b6f2b9f4c8bb1a5de6ac
Parents: ed33ce1
Author: Steve Loughran 
Authored: Thu Jan 19 11:49:40 2017 +
Committer: Steve Loughran 
Committed: Thu Jan 19 11:49:40 2017 +

--
 hadoop-client-modules/hadoop-client-api/pom.xml | 318 +++--
 .../hadoop-client-integration-tests/pom.xml | 173 ---
 .../hadoop-client-minicluster/pom.xml   | 426 ++
 .../hadoop-client-runtime/pom.xml   | 446 ++-
 4 files changed, 741 insertions(+), 622 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85e4961f/hadoop-client-modules/hadoop-client-api/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml 
b/hadoop-client-modules/hadoop-client-api/pom.xml
index 9342d53..de34a9a 100644
--- a/hadoop-client-modules/hadoop-client-api/pom.xml
+++ b/hadoop-client-modules/hadoop-client-api/pom.xml
@@ -68,149 +68,179 @@
   
 
   
-  
-
-  
-  
-org.apache.maven.plugins
-maven-source-plugin
-  
-true
-  
-  
-  
-org.apache.maven.plugins
-maven-shade-plugin
-
-  
-org.apache.hadoop
-hadoop-maven-plugins
-${project.version}
-  
-
-
-  
-package
-
-  shade
-
-
-  
-
-  org.apache.hadoop:*
-
-  
-  
-
-
-  org.apache.hadoop:hadoop-yarn-common
-  
-
org/apache/hadoop/yarn/factories/package-info.class
-
org/apache/hadoop/yarn/util/package-info.class
-
org/apache/hadoop/yarn/factory/providers/package-info.class
-
org/apache/hadoop/yarn/client/api/impl/package-info.class
-
org/apache/hadoop/yarn/client/api/package-info.class
-  
-
-  
-  
-
-  org/
-  
${shaded.dependency.prefix}.org.
-  
-org/apache/hadoop/*
-org/apache/hadoop/**/*
-
-org/apache/htrace/*
-org/apache/htrace/**/*
-org/slf4j/*
-org/slf4j/**/*
-org/apache/commons/logging/*
-org/apache/commons/logging/**/*
-org/apache/log4j/*
-org/apache/log4j/**/*
-**/pom.xml
-
-org/ietf/jgss/*
-org/omg/**/*
-org/w3c/dom/*
-org/w3c/dom/**/*
-org/xml/sax/*
-org/xml/sax/**/*
-  
-
-
-  com/
-  
${shaded.dependency.prefix}.com.
-  
-**/pom.xml
-
-com/sun/tools/*
-com/sun/javadoc/*
-com/sun/security/*
-com/sun/jndi/*
-com/sun/management/*
-com/sun/tools/**/*
-com/sun/javadoc/**/*
-com/sun/security/**/*
-com/sun/jndi/**/*
-com/sun/management/**/*
-  
-
-
-  io/
-  
${shaded.dependency.prefix}.io.
-  
-**/pom.xml
-
-io/compression/*
-io/compression/**/*
-io/mapfile/*
-io/mapfile/**/*
-io/map/index/*
-io/seqfile/*
-io/seqfile/**/*
-io/file/buffer/size
-io/skip/checksum/errors
-io/sort/*
-io/serializations
-  
-
-
-  javax/servlet/
-  
${shaded.dependency.prefix}.javax.servlet.
- 

hadoop git commit: YARN-6110. Fix opportunistic containers documentation. (Akira Ajisaka via asuresh)

2017-01-19 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 85e4961f6 -> 7e8d32147


YARN-6110. Fix opportunistic containers documentation. (Akira Ajisaka via 
asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e8d3214
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e8d3214
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e8d3214

Branch: refs/heads/trunk
Commit: 7e8d32147c701d5207feeb43cb3ae728e357d9dc
Parents: 85e4961
Author: Arun Suresh 
Authored: Thu Jan 19 06:14:28 2017 -0800
Committer: Arun Suresh 
Committed: Thu Jan 19 06:14:28 2017 -0800

--
 .../src/site/markdown/OpportunisticContainers.md   | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e8d3214/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/OpportunisticContainers.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/OpportunisticContainers.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/OpportunisticContainers.md
index 223930e..496777a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/OpportunisticContainers.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/OpportunisticContainers.md
@@ -19,8 +19,8 @@ Opportunistic Containers
 * [Quick Guide](#Quick_Guide)
 * [Main Goal](#Main_Goal)
 * [Enabling Opportunistic Containers](#Enabling_Opportunistic_Containers)
-* [Running a Sample Job](Running_a_Sample_Job)
-* [Opportunistic Containers in Web UI](Opportunistic_Containers_in_Web_UI)
+* [Running a Sample Job](#Running_a_Sample_Job)
+* [Opportunistic Containers in Web UI](#Opportunistic_Containers_in_Web_UI)
 * [Overview](#Overview)
 * [Container Execution Types](#Container_Execution_Types)
 * [Execution of Opportunistic 
Containers](#Execution_of_Opportunistic_Containers)
@@ -71,6 +71,7 @@ By default, allocation of opportunistic containers is 
performed centrally throug
 ###Running a Sample Job
 
 The following command can be used to run a sample pi map-reduce job, executing 
40% of mappers using opportunistic containers (substitute 
`3.0.0-alpha2-SNAPSHOT` below with the version of Hadoop you are using):
+
 ```
 $ hadoop jar 
hadoop-3.0.0-alpha2-SNAPSHOT/share/hadoop/mapreduce/hadoop-mapreduce-examples-3.0.0-alpha2-SNAPSHOT.jar
 pi -Dmapreduce.job.num-opportunistic-maps-percent="40" 50 100
 ```
@@ -81,6 +82,7 @@ By changing the value of 
`mapreduce.job.num-opportunistic-maps-percent` in the a
 ###Opportunistic Containers 
in Web UI
 
 When opportunistic container allocation is enabled, the following new columns 
can be observed in the Nodes page of the Web UI 
(`rm-address:8088/cluster/nodes`):
+
 * Running Containers (O): number of running opportunistic containers on each 
node;
 * Mem Used (O): Total memory used by opportunistic containers on each node;
 * VCores Used (O): Total CPU virtual cores used by opportunistic containers on 
each node;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6110. Fix opportunistic containers documentation. (Akira Ajisaka via asuresh)

2017-01-19 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b2704702f -> 3c6ec2095


YARN-6110. Fix opportunistic containers documentation. (Akira Ajisaka via 
asuresh)

(cherry picked from commit 7e8d32147c701d5207feeb43cb3ae728e357d9dc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c6ec209
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c6ec209
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c6ec209

Branch: refs/heads/branch-2
Commit: 3c6ec20955fc18549d3519e5ea3bcd46a09a13c9
Parents: b270470
Author: Arun Suresh 
Authored: Thu Jan 19 06:14:28 2017 -0800
Committer: Arun Suresh 
Committed: Thu Jan 19 06:16:30 2017 -0800

--
 .../src/site/markdown/OpportunisticContainers.md   | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c6ec209/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/OpportunisticContainers.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/OpportunisticContainers.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/OpportunisticContainers.md
index 223930e..496777a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/OpportunisticContainers.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/OpportunisticContainers.md
@@ -19,8 +19,8 @@ Opportunistic Containers
 * [Quick Guide](#Quick_Guide)
 * [Main Goal](#Main_Goal)
 * [Enabling Opportunistic Containers](#Enabling_Opportunistic_Containers)
-* [Running a Sample Job](Running_a_Sample_Job)
-* [Opportunistic Containers in Web UI](Opportunistic_Containers_in_Web_UI)
+* [Running a Sample Job](#Running_a_Sample_Job)
+* [Opportunistic Containers in Web UI](#Opportunistic_Containers_in_Web_UI)
 * [Overview](#Overview)
 * [Container Execution Types](#Container_Execution_Types)
 * [Execution of Opportunistic 
Containers](#Execution_of_Opportunistic_Containers)
@@ -71,6 +71,7 @@ By default, allocation of opportunistic containers is 
performed centrally throug
 ###Running a Sample Job
 
 The following command can be used to run a sample pi map-reduce job, executing 
40% of mappers using opportunistic containers (substitute 
`3.0.0-alpha2-SNAPSHOT` below with the version of Hadoop you are using):
+
 ```
 $ hadoop jar 
hadoop-3.0.0-alpha2-SNAPSHOT/share/hadoop/mapreduce/hadoop-mapreduce-examples-3.0.0-alpha2-SNAPSHOT.jar
 pi -Dmapreduce.job.num-opportunistic-maps-percent="40" 50 100
 ```
@@ -81,6 +82,7 @@ By changing the value of 
`mapreduce.job.num-opportunistic-maps-percent` in the a
 ###Opportunistic Containers 
in Web UI
 
 When opportunistic container allocation is enabled, the following new columns 
can be observed in the Nodes page of the Web UI 
(`rm-address:8088/cluster/nodes`):
+
 * Running Containers (O): number of running opportunistic containers on each 
node;
 * Mem Used (O): Total memory used by opportunistic containers on each node;
 * VCores Used (O): Total CPU virtual cores used by opportunistic containers on 
each node;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13877 S3Guard: fix TestDynamoDBMetadataStore when fs.s3a.s3guard.ddb.table is set

2017-01-19 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 b3cd1a2b4 -> 31cee3528


HADOOP-13877 S3Guard: fix TestDynamoDBMetadataStore when 
fs.s3a.s3guard.ddb.table is set


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31cee352
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31cee352
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31cee352

Branch: refs/heads/HADOOP-13345
Commit: 31cee352801344f6a10b7cce76ac0e1586616734
Parents: b3cd1a2
Author: Steve Loughran 
Authored: Thu Jan 19 13:58:28 2017 +
Committer: Steve Loughran 
Committed: Thu Jan 19 13:58:28 2017 +

--
 .../fs/s3a/s3guard/MetadataStoreTestBase.java   |   7 ++
 .../s3a/s3guard/TestDynamoDBMetadataStore.java  | 102 ++-
 2 files changed, 63 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31cee352/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java
index c2cdfa2..ebf95d7 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java
@@ -82,6 +82,13 @@ public abstract class MetadataStoreTestBase extends Assert {
 
   private MetadataStore ms;
 
+  /**
+   * @return reference to the test contract.
+   */
+  protected AbstractMSContract getContract() {
+return contract;
+  }
+
   @Before
   public void setUp() throws Exception {
 LOG.debug("== Setup. ==");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31cee352/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java
index fe38c12..af23dfd 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDynamoDBMetadataStore.java
@@ -185,6 +185,14 @@ public class TestDynamoDBMetadataStore extends 
MetadataStoreTestBase {
 : new S3AFileStatus(size, getModTime(), path, BLOCK_SIZE, owner);
   }
 
+  private DynamoDBMetadataStore getDynamoMetadataStore() throws IOException {
+return (DynamoDBMetadataStore) getContract().getMetadataStore();
+  }
+
+  private S3AFileSystem getFileSystem() {
+return (S3AFileSystem) getContract().getFileSystem();
+  }
+
   /**
* This tests that after initialize() using an S3AFileSystem object, the
* instance should have been initialized successfully, and tables are ACTIVE.
@@ -192,7 +200,7 @@ public class TestDynamoDBMetadataStore extends 
MetadataStoreTestBase {
   @Test
   public void testInitialize() throws IOException {
 final String tableName = "testInitializeWithFileSystem";
-final S3AFileSystem s3afs = createContract().getFileSystem();
+final S3AFileSystem s3afs = getFileSystem();
 final Configuration conf = s3afs.getConf();
 conf.set(Constants.S3GUARD_DDB_TABLE_NAME_KEY, tableName);
 try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
@@ -213,7 +221,9 @@ public class TestDynamoDBMetadataStore extends 
MetadataStoreTestBase {
   @Test
   public void testInitializeWithConfiguration() throws IOException {
 final String tableName = "testInitializeWithConfiguration";
-final Configuration conf = new Configuration();
+final Configuration conf = getFileSystem().getConf();
+conf.unset(Constants.S3GUARD_DDB_TABLE_NAME_KEY);
+conf.unset(Constants.S3GUARD_DDB_ENDPOINT_KEY);
 try {
   DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore();
   ddbms.initialize(conf);
@@ -270,54 +280,54 @@ public class TestDynamoDBMetadataStore extends 
MetadataStoreTestBase {
 final Path newDir = new Path(root, "newDir");
 LOG.info("doTestBatchWrite: oldDir={}, newDir={}", oldDir, newDir);
 
-try (DynamoDBMetadataStore ms = createContract().getMetadataStore()) {
-  ms.put(new PathMetadata(basicFileStatus(oldDir, 0, true)));
-  ms.put(new PathMetadata(basicFileStatus(newDir, 0, true)));
+DynamoDBMetadataStore ms = getDynamoMetadataStore();
+ms.put(new PathMetadata(basicFileStatus(oldDir, 0, true)));
+ms.put(new PathMetadata(basic

[1/2] hadoop git commit: YARN-5456. container-executor support for FreeBSD, NetBSD, and others if conf path is absolute. Contributed by Allen Wittenauer.

2017-01-19 Thread vvasudev
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3c6ec2095 -> 5251de00f


YARN-5456. container-executor support for FreeBSD, NetBSD, and others if conf 
path is absolute. Contributed by Allen Wittenauer.

(cherry picked from commit b913677365ad77ca7daa5741c04c14df1a0313cd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5251de00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5251de00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5251de00

Branch: refs/heads/branch-2
Commit: 5251de00fa5ad09371fa05e8bc6b70eea4b01f56
Parents: 198bd84
Author: Chris Nauroth 
Authored: Tue Aug 2 22:24:34 2016 -0700
Committer: Varun Vasudev 
Committed: Thu Jan 19 20:48:27 2017 +0530

--
 .../src/CMakeLists.txt  |  13 ++
 .../src/config.h.cmake  |  12 ++
 .../container-executor/impl/configuration.h |   4 +
 .../impl/container-executor.c   |  26 +++-
 .../impl/container-executor.h   |   8 +-
 .../container-executor/impl/get_executable.c| 118 ---
 .../main/native/container-executor/impl/main.c  |   6 +-
 .../test/test-container-executor.c  |   4 +
 8 files changed, 167 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5251de00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 204b3ca..fbc794c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -19,10 +19,20 @@ cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
 list(APPEND CMAKE_MODULE_PATH 
${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common)
 include(HadoopCommon)
 
+# determine if container-executor.conf.dir is an absolute
+# path in case the OS we're compiling on doesn't have
+# a hook in get_executable. We'll use this define
+# later in the code to potentially throw a compile error
+string(REGEX MATCH . HCD_ONE "${HADOOP_CONF_DIR}")
+string(COMPARE EQUAL ${HCD_ONE} / HADOOP_CONF_DIR_IS_ABS)
+
 # Note: can't use -D_FILE_OFFSET_BITS=64, see MAPREDUCE-4258
 string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
 string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_CXX_FLAGS 
"${CMAKE_CXX_FLAGS}")
 
+include(CheckIncludeFiles)
+check_include_files("sys/types.h;sys/sysctl.h" HAVE_SYS_SYSCTL_H)
+
 include(CheckFunctionExists)
 check_function_exists(canonicalize_file_name HAVE_CANONICALIZE_FILE_NAME)
 check_function_exists(fcloseall HAVE_FCLOSEALL)
@@ -32,6 +42,9 @@ check_function_exists(fstatat HAVE_FSTATAT)
 check_function_exists(openat HAVE_OPENAT)
 check_function_exists(unlinkat HAVE_UNLINKAT)
 
+include(CheckSymbolExists)
+check_symbol_exists(sysctl "sys/types.h;sys/sysctl.h" HAVE_SYSCTL)
+
 if(APPLE)
   include_directories( /System/Library/Frameworks )
   find_library(COCOA_LIBRARY Cocoa)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5251de00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
index 0ab7bbd..d8e710f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
@@ -18,14 +18,26 @@
 #ifndef CONFIG_H
 #define CONFIG_H
 
+/* custom configs */
+
 #cmakedefine HADOOP_CONF_DIR "@HADOOP_CONF_DIR@"
 
+#cmakedefine HADOOP_CONF_DIR_IS_ABS "@HADOOP_CONF_DIR_IS_ABS@"
+
+/* specific functions */
+
 #cmakedefine HAVE_CANONICALIZE_FILE_NAME @HAVE_CANONICALIZE_FILE_NAME@
 #cmakedefine HAVE_FCHMODAT @HAVE_FCHMODAT@
 #cmakedefine HAVE_FCLOSEALL @HAVE_FCLOSEALL@
 #cmakedefine HAVE_FDOPENDIR @HAVE_FDOPENDIR@
 #cmakedefine HAVE_FSTATAT @HAVE_FSTATAT@
 #cmakedefine HAVE_OPENAT @HAVE_OPENAT@
+#cmakedefine HAVE_SYSCTL @HAVE_SYSCTL@
 #cmakedefine HAVE_UNLINKAT @HAVE_UNLINKAT@
 
+
+/* specific headers */
+
+#cmakedefine HAVE_SYS_SYSCTL_H @HAVE_SYS_SYSCTL_H@
+
 #endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5251de00/hadoop-yarn-project/hadoop-yarn/

[2/2] hadoop git commit: YARN-5121. fix some container-executor portability issues. Contributed by Allen Wittenauer.

2017-01-19 Thread vvasudev
YARN-5121. fix some container-executor portability issues. Contributed by Allen 
Wittenauer.

(cherry picked from commit ef501b1a0b4c34a2cc43eb082d1c2364684cd7f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/198bd84b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/198bd84b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/198bd84b

Branch: refs/heads/branch-2
Commit: 198bd84b3307cab6dcaa7d1e306e1ee6f0f324d2
Parents: 3c6ec20
Author: Chris Nauroth 
Authored: Sat Jul 30 08:26:00 2016 -0700
Committer: Varun Vasudev 
Committed: Thu Jan 19 20:48:27 2017 +0530

--
 LICENSE.txt |  32 +
 .../hadoop-yarn-server-nodemanager/pom.xml  |  12 ++
 .../src/CMakeLists.txt  |  16 ++-
 .../src/config.h.cmake  |   8 +-
 .../container-executor/impl/compat/fchmodat.h   |  56 
 .../container-executor/impl/compat/fdopendir.h  |  52 
 .../container-executor/impl/compat/fstatat.h|  67 ++
 .../container-executor/impl/compat/openat.h |  74 +++
 .../container-executor/impl/compat/unlinkat.h   |  67 ++
 .../container-executor/impl/configuration.c |  15 ++-
 .../container-executor/impl/configuration.h |   6 +-
 .../impl/container-executor.c   | 112 
 .../impl/container-executor.h   |   2 +-
 .../container-executor/impl/get_executable.c| 127 +++
 .../main/native/container-executor/impl/main.c  |  38 --
 .../test/test-container-executor.c  | 103 +++
 16 files changed, 691 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/198bd84b/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 0e4b492..61ebbd6 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -345,6 +345,38 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
LIABILITY, OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+For 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/compat/{fstatat|openat|unlinkat}.h:
+
+Copyright (c) 2012 The FreeBSD Foundation
+All rights reserved.
+
+This software was developed by Pawel Jakub Dawidek under sponsorship from
+the FreeBSD Foundation.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+=
+
 The binary distribution of this product bundles binaries of leveldb
 (http://code.google.com/p/leveldb/), which is available under the following
 license:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/198bd84b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index dd14309..83b217a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -274,6 +274,18 @@
   
 
   
+org.apache.rat
+apache-rat-plugin
+
+  
+
src/main/native/container-executor/impl/compat/fstatat.h
+
src/main/native/container-executor/impl/compat/

[2/2] hadoop git commit: YARN-5121. fix some container-executor portability issues. Contributed by Allen Wittenauer.

2017-01-19 Thread vvasudev
YARN-5121. fix some container-executor portability issues. Contributed by Allen 
Wittenauer.

(cherry picked from commit ef501b1a0b4c34a2cc43eb082d1c2364684cd7f1)
(cherry picked from commit 384803d09ac45886e74a0501f4b419a2b756c20c)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3668da2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3668da2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3668da2c

Branch: refs/heads/branch-2.8
Commit: 3668da2c18e85a4df1896fff145b11ee15e5d9e7
Parents: 77401bd
Author: Chris Nauroth 
Authored: Sat Jul 30 08:26:00 2016 -0700
Committer: Varun Vasudev 
Committed: Thu Jan 19 20:51:38 2017 +0530

--
 LICENSE.txt |  32 +
 .../hadoop-yarn-server-nodemanager/pom.xml  |  12 ++
 .../src/CMakeLists.txt  |  16 ++-
 .../src/config.h.cmake  |   8 +-
 .../container-executor/impl/compat/fchmodat.h   |  56 
 .../container-executor/impl/compat/fdopendir.h  |  52 
 .../container-executor/impl/compat/fstatat.h|  67 ++
 .../container-executor/impl/compat/openat.h |  74 +++
 .../container-executor/impl/compat/unlinkat.h   |  67 ++
 .../container-executor/impl/configuration.c |  15 ++-
 .../container-executor/impl/configuration.h |   6 +-
 .../impl/container-executor.c   | 112 
 .../impl/container-executor.h   |   2 +-
 .../container-executor/impl/get_executable.c| 127 +++
 .../main/native/container-executor/impl/main.c  |  38 --
 .../test/test-container-executor.c  | 104 +++
 16 files changed, 692 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3668da2c/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 0e4b492..61ebbd6 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -345,6 +345,38 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
LIABILITY, OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+For 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/compat/{fstatat|openat|unlinkat}.h:
+
+Copyright (c) 2012 The FreeBSD Foundation
+All rights reserved.
+
+This software was developed by Pawel Jakub Dawidek under sponsorship from
+the FreeBSD Foundation.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+=
+
 The binary distribution of this product bundles binaries of leveldb
 (http://code.google.com/p/leveldb/), which is available under the following
 license:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3668da2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 447271f..54a3d7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -

[1/2] hadoop git commit: YARN-5456. container-executor support for FreeBSD, NetBSD, and others if conf path is absolute. Contributed by Allen Wittenauer.

2017-01-19 Thread vvasudev
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.0 deb0b10e8 -> 6e5876724


YARN-5456. container-executor support for FreeBSD, NetBSD, and others if conf 
path is absolute. Contributed by Allen Wittenauer.

(cherry picked from commit b913677365ad77ca7daa5741c04c14df1a0313cd)
(cherry picked from commit c5203e170ddd68fc8532792878c37d4762068472)
(cherry picked from commit 30b3232a71afbfc177493510ea97adea5cb8b418)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e587672
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e587672
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e587672

Branch: refs/heads/branch-2.8.0
Commit: 6e5876724d19dc0f840342fee7a5f7d90ea56a8f
Parents: 35afd6d
Author: Chris Nauroth 
Authored: Tue Aug 2 22:24:34 2016 -0700
Committer: Varun Vasudev 
Committed: Thu Jan 19 20:54:19 2017 +0530

--
 .../src/CMakeLists.txt  |  13 ++
 .../src/config.h.cmake  |  12 ++
 .../container-executor/impl/configuration.h |   4 +
 .../impl/container-executor.c   |  26 +++-
 .../impl/container-executor.h   |   8 +-
 .../container-executor/impl/get_executable.c| 118 ---
 .../main/native/container-executor/impl/main.c  |   6 +-
 .../test/test-container-executor.c  |   4 +
 8 files changed, 167 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e587672/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 204b3ca..fbc794c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -19,10 +19,20 @@ cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
 list(APPEND CMAKE_MODULE_PATH 
${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common)
 include(HadoopCommon)
 
+# determine if container-executor.conf.dir is an absolute
+# path in case the OS we're compiling on doesn't have
+# a hook in get_executable. We'll use this define
+# later in the code to potentially throw a compile error
+string(REGEX MATCH . HCD_ONE "${HADOOP_CONF_DIR}")
+string(COMPARE EQUAL ${HCD_ONE} / HADOOP_CONF_DIR_IS_ABS)
+
 # Note: can't use -D_FILE_OFFSET_BITS=64, see MAPREDUCE-4258
 string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
 string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_CXX_FLAGS 
"${CMAKE_CXX_FLAGS}")
 
+include(CheckIncludeFiles)
+check_include_files("sys/types.h;sys/sysctl.h" HAVE_SYS_SYSCTL_H)
+
 include(CheckFunctionExists)
 check_function_exists(canonicalize_file_name HAVE_CANONICALIZE_FILE_NAME)
 check_function_exists(fcloseall HAVE_FCLOSEALL)
@@ -32,6 +42,9 @@ check_function_exists(fstatat HAVE_FSTATAT)
 check_function_exists(openat HAVE_OPENAT)
 check_function_exists(unlinkat HAVE_UNLINKAT)
 
+include(CheckSymbolExists)
+check_symbol_exists(sysctl "sys/types.h;sys/sysctl.h" HAVE_SYSCTL)
+
 if(APPLE)
   include_directories( /System/Library/Frameworks )
   find_library(COCOA_LIBRARY Cocoa)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e587672/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
index 0ab7bbd..d8e710f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
@@ -18,14 +18,26 @@
 #ifndef CONFIG_H
 #define CONFIG_H
 
+/* custom configs */
+
 #cmakedefine HADOOP_CONF_DIR "@HADOOP_CONF_DIR@"
 
+#cmakedefine HADOOP_CONF_DIR_IS_ABS "@HADOOP_CONF_DIR_IS_ABS@"
+
+/* specific functions */
+
 #cmakedefine HAVE_CANONICALIZE_FILE_NAME @HAVE_CANONICALIZE_FILE_NAME@
 #cmakedefine HAVE_FCHMODAT @HAVE_FCHMODAT@
 #cmakedefine HAVE_FCLOSEALL @HAVE_FCLOSEALL@
 #cmakedefine HAVE_FDOPENDIR @HAVE_FDOPENDIR@
 #cmakedefine HAVE_FSTATAT @HAVE_FSTATAT@
 #cmakedefine HAVE_OPENAT @HAVE_OPENAT@
+#cmakedefine HAVE_SYSCTL @HAVE_SYSCTL@
 #cmakedefine HAVE_UNLINKAT @HAVE_UNLINKAT@
 
+
+/* specific headers */
+
+#cmakede

[2/2] hadoop git commit: YARN-5121. fix some container-executor portability issues. Contributed by Allen Wittenauer.

2017-01-19 Thread vvasudev
YARN-5121. fix some container-executor portability issues. Contributed by Allen 
Wittenauer.

(cherry picked from commit ef501b1a0b4c34a2cc43eb082d1c2364684cd7f1)
(cherry picked from commit 384803d09ac45886e74a0501f4b419a2b756c20c)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c

(cherry picked from commit cc5965b54138aa51f5309d5683b3879f126d5d3b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35afd6d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35afd6d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35afd6d7

Branch: refs/heads/branch-2.8.0
Commit: 35afd6d781a1c04594900026aed91c26c9a01648
Parents: deb0b10
Author: Chris Nauroth 
Authored: Sat Jul 30 08:26:00 2016 -0700
Committer: Varun Vasudev 
Committed: Thu Jan 19 20:54:19 2017 +0530

--
 LICENSE.txt |  32 +
 .../hadoop-yarn-server-nodemanager/pom.xml  |  12 ++
 .../src/CMakeLists.txt  |  16 ++-
 .../src/config.h.cmake  |   8 +-
 .../container-executor/impl/compat/fchmodat.h   |  56 
 .../container-executor/impl/compat/fdopendir.h  |  52 
 .../container-executor/impl/compat/fstatat.h|  67 ++
 .../container-executor/impl/compat/openat.h |  74 +++
 .../container-executor/impl/compat/unlinkat.h   |  67 ++
 .../container-executor/impl/configuration.c |  15 ++-
 .../container-executor/impl/configuration.h |   6 +-
 .../impl/container-executor.c   | 112 
 .../impl/container-executor.h   |   2 +-
 .../container-executor/impl/get_executable.c| 127 +++
 .../main/native/container-executor/impl/main.c  |  38 --
 .../test/test-container-executor.c  | 104 +++
 16 files changed, 692 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35afd6d7/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 0e4b492..61ebbd6 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -345,6 +345,38 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
LIABILITY, OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+For 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/compat/{fstatat|openat|unlinkat}.h:
+
+Copyright (c) 2012 The FreeBSD Foundation
+All rights reserved.
+
+This software was developed by Pawel Jakub Dawidek under sponsorship from
+the FreeBSD Foundation.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+=
+
 The binary distribution of this product bundles binaries of leveldb
 (http://code.google.com/p/leveldb/), which is available under the following
 license:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35afd6d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 1768d5c..1d76c5f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ 
b/hadoop-yarn-project/ha

[1/2] hadoop git commit: YARN-5456. container-executor support for FreeBSD, NetBSD, and others if conf path is absolute. Contributed by Allen Wittenauer.

2017-01-19 Thread vvasudev
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 77401bd5f -> cf6c143c0


YARN-5456. container-executor support for FreeBSD, NetBSD, and others if conf 
path is absolute. Contributed by Allen Wittenauer.

(cherry picked from commit b913677365ad77ca7daa5741c04c14df1a0313cd)
(cherry picked from commit c5203e170ddd68fc8532792878c37d4762068472)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf6c143c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf6c143c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf6c143c

Branch: refs/heads/branch-2.8
Commit: cf6c143c0f409b9a1fa19d48a8737db7ab88e75e
Parents: 3668da2
Author: Chris Nauroth 
Authored: Tue Aug 2 22:24:34 2016 -0700
Committer: Varun Vasudev 
Committed: Thu Jan 19 20:51:38 2017 +0530

--
 .../src/CMakeLists.txt  |  13 ++
 .../src/config.h.cmake  |  12 ++
 .../container-executor/impl/configuration.h |   4 +
 .../impl/container-executor.c   |  26 +++-
 .../impl/container-executor.h   |   8 +-
 .../container-executor/impl/get_executable.c| 118 ---
 .../main/native/container-executor/impl/main.c  |   6 +-
 .../test/test-container-executor.c  |   4 +
 8 files changed, 167 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf6c143c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 204b3ca..fbc794c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -19,10 +19,20 @@ cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
 list(APPEND CMAKE_MODULE_PATH 
${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common)
 include(HadoopCommon)
 
+# determine if container-executor.conf.dir is an absolute
+# path in case the OS we're compiling on doesn't have
+# a hook in get_executable. We'll use this define
+# later in the code to potentially throw a compile error
+string(REGEX MATCH . HCD_ONE "${HADOOP_CONF_DIR}")
+string(COMPARE EQUAL ${HCD_ONE} / HADOOP_CONF_DIR_IS_ABS)
+
 # Note: can't use -D_FILE_OFFSET_BITS=64, see MAPREDUCE-4258
 string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
 string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_CXX_FLAGS 
"${CMAKE_CXX_FLAGS}")
 
+include(CheckIncludeFiles)
+check_include_files("sys/types.h;sys/sysctl.h" HAVE_SYS_SYSCTL_H)
+
 include(CheckFunctionExists)
 check_function_exists(canonicalize_file_name HAVE_CANONICALIZE_FILE_NAME)
 check_function_exists(fcloseall HAVE_FCLOSEALL)
@@ -32,6 +42,9 @@ check_function_exists(fstatat HAVE_FSTATAT)
 check_function_exists(openat HAVE_OPENAT)
 check_function_exists(unlinkat HAVE_UNLINKAT)
 
+include(CheckSymbolExists)
+check_symbol_exists(sysctl "sys/types.h;sys/sysctl.h" HAVE_SYSCTL)
+
 if(APPLE)
   include_directories( /System/Library/Frameworks )
   find_library(COCOA_LIBRARY Cocoa)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf6c143c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
index 0ab7bbd..d8e710f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
@@ -18,14 +18,26 @@
 #ifndef CONFIG_H
 #define CONFIG_H
 
+/* custom configs */
+
 #cmakedefine HADOOP_CONF_DIR "@HADOOP_CONF_DIR@"
 
+#cmakedefine HADOOP_CONF_DIR_IS_ABS "@HADOOP_CONF_DIR_IS_ABS@"
+
+/* specific functions */
+
 #cmakedefine HAVE_CANONICALIZE_FILE_NAME @HAVE_CANONICALIZE_FILE_NAME@
 #cmakedefine HAVE_FCHMODAT @HAVE_FCHMODAT@
 #cmakedefine HAVE_FCLOSEALL @HAVE_FCLOSEALL@
 #cmakedefine HAVE_FDOPENDIR @HAVE_FDOPENDIR@
 #cmakedefine HAVE_FSTATAT @HAVE_FSTATAT@
 #cmakedefine HAVE_OPENAT @HAVE_OPENAT@
+#cmakedefine HAVE_SYSCTL @HAVE_SYSCTL@
 #cmakedefine HAVE_UNLINKAT @HAVE_UNLINKAT@
 
+
+/* specific headers */
+
+#cmakedefine HAVE_SYS_SYSCTL_H @HAVE_SYS_SYSCTL_H@
+
 #endif

http://git-wip-us.a

hadoop git commit: YARN-6104. RegistrySecurity overrides zookeeper sasl system properties. Contributed by Billie Rinaldi

2017-01-19 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7e8d32147 -> efc8faa1b


YARN-6104. RegistrySecurity overrides zookeeper sasl system properties. 
Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efc8faa1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efc8faa1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efc8faa1

Branch: refs/heads/trunk
Commit: efc8faa1bae79c17047e920beeb8af983db08e93
Parents: 7e8d321
Author: Jian He 
Authored: Thu Jan 19 10:18:59 2017 -0800
Committer: Jian He 
Committed: Thu Jan 19 10:18:59 2017 -0800

--
 .../client/impl/zk/RegistrySecurity.java| 11 --
 .../registry/secure/TestSecureRegistry.java | 22 
 2 files changed, 31 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efc8faa1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
index a3ec77a..bdb79be 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
@@ -749,8 +749,15 @@ public class RegistrySecurity extends AbstractService {
   String context)  {
 RegistrySecurity.validateContext(context);
 enableZookeeperClientSASL();
-System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, username);
-System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, context);
+setSystemPropertyIfUnset(PROP_ZK_SASL_CLIENT_USERNAME, username);
+setSystemPropertyIfUnset(PROP_ZK_SASL_CLIENT_CONTEXT, context);
+  }
+
+  private static void setSystemPropertyIfUnset(String name, String value) {
+String existingValue = System.getProperty(name);
+if (existingValue == null || existingValue.isEmpty()) {
+  System.setProperty(name, value);
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efc8faa1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
index 083f7f9..9d5848e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
@@ -37,6 +37,8 @@ import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.LoginContext;
 
 import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
+import static 
org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.PROP_ZK_SASL_CLIENT_CONTEXT;
+import static 
org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.PROP_ZK_SASL_CLIENT_USERNAME;
 
 /**
  * Verify that the Mini ZK service can be started up securely
@@ -138,6 +140,26 @@ public class TestSecureRegistry extends 
AbstractSecureRegistryTest {
 }
   }
 
+  @Test
+  public void testSystemPropertyOverwrite() {
+System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, "");
+System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, "");
+RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
+ZOOKEEPER_CLIENT_CONTEXT);
+assertEquals(ZOOKEEPER, System.getProperty(PROP_ZK_SASL_CLIENT_USERNAME));
+assertEquals(ZOOKEEPER_CLIENT_CONTEXT,
+System.getProperty(PROP_ZK_SASL_CLIENT_CONTEXT));
+
+String userName = "user1";
+String context = "context1";
+System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, userName);
+System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, context);
+RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
+ZOOKEEPER_CLIENT_CONTEXT);
+assertEquals(userName, System.getProperty(PROP_ZK_SASL_CLIENT_USERNAME));
+assertEquals(context, System.getProperty(PROP_ZK_SASL_CLIENT_CONTEXT));
+  }
+
   /**
* Start a curator service instance
* @param name name


-

hadoop git commit: YARN-6104. RegistrySecurity overrides zookeeper sasl system properties. Contributed by Billie Rinaldi

2017-01-19 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5251de00f -> d37408767


YARN-6104. RegistrySecurity overrides zookeeper sasl system properties. 
Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3740876
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3740876
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3740876

Branch: refs/heads/branch-2
Commit: d374087670d39adf81567055b4684d905adc865d
Parents: 5251de0
Author: Jian He 
Authored: Thu Jan 19 10:18:59 2017 -0800
Committer: Jian He 
Committed: Thu Jan 19 10:19:24 2017 -0800

--
 .../client/impl/zk/RegistrySecurity.java| 11 --
 .../registry/secure/TestSecureRegistry.java | 22 
 2 files changed, 31 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3740876/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
index 3d3e811..bf6e5ba 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
@@ -737,8 +737,15 @@ public class RegistrySecurity extends AbstractService {
   String context)  {
 RegistrySecurity.validateContext(context);
 enableZookeeperClientSASL();
-System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, username);
-System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, context);
+setSystemPropertyIfUnset(PROP_ZK_SASL_CLIENT_USERNAME, username);
+setSystemPropertyIfUnset(PROP_ZK_SASL_CLIENT_CONTEXT, context);
+  }
+
+  private static void setSystemPropertyIfUnset(String name, String value) {
+String existingValue = System.getProperty(name);
+if (existingValue == null || existingValue.isEmpty()) {
+  System.setProperty(name, value);
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3740876/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
index 083f7f9..9d5848e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
@@ -37,6 +37,8 @@ import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.LoginContext;
 
 import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
+import static 
org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.PROP_ZK_SASL_CLIENT_CONTEXT;
+import static 
org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.PROP_ZK_SASL_CLIENT_USERNAME;
 
 /**
  * Verify that the Mini ZK service can be started up securely
@@ -138,6 +140,26 @@ public class TestSecureRegistry extends 
AbstractSecureRegistryTest {
 }
   }
 
+  @Test
+  public void testSystemPropertyOverwrite() {
+System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, "");
+System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, "");
+RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
+ZOOKEEPER_CLIENT_CONTEXT);
+assertEquals(ZOOKEEPER, System.getProperty(PROP_ZK_SASL_CLIENT_USERNAME));
+assertEquals(ZOOKEEPER_CLIENT_CONTEXT,
+System.getProperty(PROP_ZK_SASL_CLIENT_CONTEXT));
+
+String userName = "user1";
+String context = "context1";
+System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, userName);
+System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, context);
+RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
+ZOOKEEPER_CLIENT_CONTEXT);
+assertEquals(userName, System.getProperty(PROP_ZK_SASL_CLIENT_USERNAME));
+assertEquals(context, System.getProperty(PROP_ZK_SASL_CLIENT_CONTEXT));
+  }
+
   /**
* Start a curator service instance
* @param name name


---

hadoop git commit: Update release version in pom.xml.

2017-01-19 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.0 6e5876724 -> 94152e171


Update release version in pom.xml.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94152e17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94152e17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94152e17

Branch: refs/heads/branch-2.8.0
Commit: 94152e171178d34864ddf6362239f3c2dda0965f
Parents: 6e58767
Author: Junping Du 
Authored: Thu Jan 19 11:36:17 2017 -0800
Committer: Junping Du 
Committed: Thu Jan 19 11:36:17 2017 -0800

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client/pom.xml| 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml| 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-ant/pom.xml  | 4 ++--
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml| 4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml   | 4 ++--
 hadoop-tools/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml  | 4 ++--
 .../hadoop-yarn-applications-distributedshell/pom.xml| 4 ++--
 .../hadoop-yarn-applications-unmanaged-am-launcher/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 4 ++--
 .../hadoop-yarn-server-applicationhistoryservice/pom.xml | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-com

hadoop git commit: YARN-6028. Add document for container metrics (Contributed by Weiwei Yang via Daniel Templeton)

2017-01-19 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk efc8faa1b -> f5839fd6d


YARN-6028. Add document for container metrics (Contributed by Weiwei Yang via 
Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5839fd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5839fd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5839fd6

Branch: refs/heads/trunk
Commit: f5839fd6df77786353160b41eb4b5ceea9380017
Parents: efc8faa
Author: Daniel Templeton 
Authored: Thu Jan 19 12:45:17 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Jan 19 12:45:17 2017 -0800

--
 .../hadoop-common/src/site/markdown/Metrics.md  | 53 +++-
 1 file changed, 51 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5839fd6/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 9a19a9b..32d081a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -446,6 +446,57 @@ NodeManagerMetrics shows the statistics of the containers 
in the node. Each metr
 | `goodLocalDirsDiskUtilizationPerc` | Current disk utilization percentage 
across all good local directories |
 | `goodLogDirsDiskUtilizationPerc` | Current disk utilization percentage 
across all good log directories |
 
+ContainerMetrics
+--
+
+ContainerMetrics shows the resource utilization statistics of a container. 
Each metrics record contains tags such as ContainerPid and Hostname as 
additional information along with metrics.
+
+| Name | Description |
+|: |: |
+| `pMemLimitMBs` | Physical memory limit of the container in MB |
+| `vMemLimitMBs` | Virtual memory limit of the container in MB |
+| `vCoreLimit` | CPU limit of the container in number of vcores |
+| `launchDurationMs` | Container launch duration in msec  |
+| `localizationDurationMs` | Container localization duration in msec |
+| `StartTime` | Time in msec when container starts |
+| `FinishTime` | Time in msec when container finishes |
+| `ExitCode` | Container's exit code |
+| `PMemUsageMBsNumUsage` | Total number of physical memory used metrics |
+| `PMemUsageMBsAvgMBs` | Average physical memory used in MB |
+| `PMemUsageMBsStdevMBs` | Standard deviation of the physical memory used in 
MB |
+| `PMemUsageMBsMinMBs` | Minimum physical memory used in MB |
+| `PMemUsageMBsMaxMBs` | Maximum physical memory used in MB |
+| `PMemUsageMBsIMinMBs` | Minimum physical memory used in MB of current 
_interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `PMemUsageMBsIMaxMBs` | Maximum physical memory used in MB of current 
_interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `PMemUsageMBsINumUsage` | Total number of physical memory used metrics in 
current _interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `PCpuUsagePercentNumUsage` | Total number of physical CPU cores percent used 
metrics |
+| `PCpuUsagePercentAvgPercents` | Average physical CPU cores percent used |
+| `PCpuUsagePercentStdevPercents` | Standard deviation of physical CPU cores 
percent used |
+| `PCpuUsagePercentMinPercents` | Minimum physical CPU cores percent used|
+| `PCpuUsagePercentMaxPercents` | Maximum physical CPU cores percent used |
+| `PCpuUsagePercentIMinPercents` | Minimum physical CPU cores percent used in 
current _interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `PCpuUsagePercentIMaxPercents` | Maximum physical CPU cores percent used in 
current _interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `PCpuUsagePercentINumUsage` | Total number of physical CPU cores used 
metrics in current _interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `MilliVcoreUsageNumUsage` | Total number of vcores used metrics |
+| `MilliVcoreUsageAvgMilliVcores` | 1000 times the average vcores used |
+| `MilliVcoreUsageStdevMilliVcores` | 1000 times the standard deviation of 
vcores used |
+| `MilliVcoreUsageMinMilliVcores` | 1000 times the minimum vcores used |
+| `MilliVcoreUsageMaxMilliVcores` | 1000 times the maximum vcores used |
+| `MilliVcoreUsageIMinMilliVcores` | 1000 times the average vcores used in 
current _interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `MilliVcoreUsageIMaxM

hadoop git commit: YARN-6028. Add document for container metrics (Contributed by Weiwei Yang via Daniel Templeton)

2017-01-19 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/HEAD [created] d85fb264a


YARN-6028. Add document for container metrics (Contributed by Weiwei Yang via 
Daniel Templeton)

(cherry picked from commit f5839fd6df77786353160b41eb4b5ceea9380017)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d85fb264
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d85fb264
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d85fb264

Branch: refs/heads/HEAD
Commit: d85fb264a017d9db052b5ad08dd0028589d39611
Parents: d374087
Author: Daniel Templeton 
Authored: Thu Jan 19 12:45:17 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Jan 19 12:46:53 2017 -0800

--
 .../hadoop-common/src/site/markdown/Metrics.md  | 53 +++-
 1 file changed, 51 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d85fb264/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 53d1095..e52a6a7 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -443,6 +443,57 @@ NodeManagerMetrics shows the statistics of the containers 
in the node. Each metr
 | `goodLocalDirsDiskUtilizationPerc` | Current disk utilization percentage 
across all good local directories |
 | `goodLogDirsDiskUtilizationPerc` | Current disk utilization percentage 
across all good log directories |
 
+ContainerMetrics
+--
+
+ContainerMetrics shows the resource utilization statistics of a container. 
Each metrics record contains tags such as ContainerPid and Hostname as 
additional information along with metrics.
+
+| Name | Description |
+|: |: |
+| `pMemLimitMBs` | Physical memory limit of the container in MB |
+| `vMemLimitMBs` | Virtual memory limit of the container in MB |
+| `vCoreLimit` | CPU limit of the container in number of vcores |
+| `launchDurationMs` | Container launch duration in msec  |
+| `localizationDurationMs` | Container localization duration in msec |
+| `StartTime` | Time in msec when container starts |
+| `FinishTime` | Time in msec when container finishes |
+| `ExitCode` | Container's exit code |
+| `PMemUsageMBsNumUsage` | Total number of physical memory used metrics |
+| `PMemUsageMBsAvgMBs` | Average physical memory used in MB |
+| `PMemUsageMBsStdevMBs` | Standard deviation of the physical memory used in 
MB |
+| `PMemUsageMBsMinMBs` | Minimum physical memory used in MB |
+| `PMemUsageMBsMaxMBs` | Maximum physical memory used in MB |
+| `PMemUsageMBsIMinMBs` | Minimum physical memory used in MB of current 
_interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `PMemUsageMBsIMaxMBs` | Maximum physical memory used in MB of current 
_interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `PMemUsageMBsINumUsage` | Total number of physical memory used metrics in 
current _interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `PCpuUsagePercentNumUsage` | Total number of physical CPU cores percent used 
metrics |
+| `PCpuUsagePercentAvgPercents` | Average physical CPU cores percent used |
+| `PCpuUsagePercentStdevPercents` | Standard deviation of physical CPU cores 
percent used |
+| `PCpuUsagePercentMinPercents` | Minimum physical CPU cores percent used|
+| `PCpuUsagePercentMaxPercents` | Maximum physical CPU cores percent used |
+| `PCpuUsagePercentIMinPercents` | Minimum physical CPU cores percent used in 
current _interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `PCpuUsagePercentIMaxPercents` | Maximum physical CPU cores percent used in 
current _interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `PCpuUsagePercentINumUsage` | Total number of physical CPU cores used 
metrics in current _interval_ (the time of _interval_ is specified by 
yarn.nodemanager.container-metrics.period-ms) |
+| `MilliVcoreUsageNumUsage` | Total number of vcores used metrics |
+| `MilliVcoreUsageAvgMilliVcores` | 1000 times the average vcores used |
+| `MilliVcoreUsageStdevMilliVcores` | 1000 times the standard deviation of 
vcores used |
+| `MilliVcoreUsageMinMilliVcores` | 1000 times the minimum vcores used |
+| `MilliVcoreUsageMaxMilliVcores` | 1000 times the maximum vcores used |
+| `MilliVcoreUsageIMinMilliVcores` | 1000 times the average vcores used in 
current _interval_ (the time of _interval_ is specified by 
yarn.no

[2/2] hadoop git commit: YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed by Vrushali C.

2017-01-19 Thread sjlee
YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed 
by Vrushali C.

(cherry picked from commit 0327a79d79a4d56d9c7cb6889886afd2272b07d3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0177c95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0177c95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0177c95

Branch: refs/heads/YARN-5355-branch-2
Commit: e0177c952c3b64c8dcf0408562faa98f725280e0
Parents: c11078f
Author: Sangjin Lee 
Authored: Thu Jan 19 14:52:47 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 14:55:55 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../src/main/resources/yarn-default.xml |   9 +
 ...stTimelineReaderWebServicesHBaseStorage.java |   4 +-
 .../storage/DataGeneratorForTest.java   | 364 ---
 .../storage/TestHBaseTimelineStorageApps.java   |   6 +-
 .../TestHBaseTimelineStorageEntities.java   |   6 +-
 .../storage/TestHBaseTimelineStorageSchema.java |  12 +-
 .../storage/flow/TestFlowDataGenerator.java |  28 +-
 .../flow/TestHBaseStorageFlowActivity.java  |   8 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  46 ++-
 .../flow/TestHBaseStorageFlowRunCompaction.java |   8 +-
 .../storage/flow/FlowRunCoprocessor.java|  36 +-
 .../storage/flow/FlowRunTable.java  |  33 +-
 .../src/site/markdown/TimelineServiceV2.md  |  26 +-
 14 files changed, 322 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0177c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 659b5eb..e1ecaf9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2058,6 +2058,18 @@ public class YarnConfiguration extends Configuration {
   + "hbase.coprocessor.app-final-value-retention-milliseconds";
 
   /**
+   * The name of the setting for the location of the coprocessor
+   * jar on hdfs.
+   */
+  public static final String FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION =
+  TIMELINE_SERVICE_PREFIX
+  + "hbase.coprocessor.jar.hdfs.location";
+
+  /** default hdfs location for flowrun coprocessor jar. */
+  public static final String DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR =
+  "/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar";
+
+/**
* The name for setting that points to an optional HBase configuration
* (hbase-site.xml file) with settings that will override the ones found on
* the classpath.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0177c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 416d705..a21ee2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2272,6 +2272,15 @@
 
   
 
+The default hdfs location for flowrun coprocessor jar.
+
+yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+
+/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar
+  
+
+  
+
 The value of this parameter sets the prefix for all tables that are part of
 timeline service in the hbase storage schema. It can be set to "dev."
 or "staging." if it is to be used for development or staging instances.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0177c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-time

[1/2] hadoop git commit: YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed by Vrushali C.

2017-01-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 c9246f619 -> 0327a79d7
  refs/heads/YARN-5355-branch-2 c11078fd2 -> e0177c952


YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed 
by Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0327a79d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0327a79d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0327a79d

Branch: refs/heads/YARN-5355
Commit: 0327a79d79a4d56d9c7cb6889886afd2272b07d3
Parents: c9246f6
Author: Sangjin Lee 
Authored: Thu Jan 19 14:52:47 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 14:52:47 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../src/main/resources/yarn-default.xml |   9 +
 ...stTimelineReaderWebServicesHBaseStorage.java |   4 +-
 .../storage/DataGeneratorForTest.java   | 364 ---
 .../storage/TestHBaseTimelineStorageApps.java   |   6 +-
 .../TestHBaseTimelineStorageEntities.java   |   6 +-
 .../storage/TestHBaseTimelineStorageSchema.java |  12 +-
 .../storage/flow/TestFlowDataGenerator.java |  28 +-
 .../flow/TestHBaseStorageFlowActivity.java  |   8 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  46 ++-
 .../flow/TestHBaseStorageFlowRunCompaction.java |   8 +-
 .../storage/flow/FlowRunCoprocessor.java|  36 +-
 .../storage/flow/FlowRunTable.java  |  33 +-
 .../src/site/markdown/TimelineServiceV2.md  |  26 +-
 14 files changed, 322 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0327a79d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8752e5d..e45bfe3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2047,6 +2047,18 @@ public class YarnConfiguration extends Configuration {
   + "hbase.coprocessor.app-final-value-retention-milliseconds";
 
   /**
+   * The name of the setting for the location of the coprocessor
+   * jar on hdfs.
+   */
+  public static final String FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION =
+  TIMELINE_SERVICE_PREFIX
+  + "hbase.coprocessor.jar.hdfs.location";
+
+  /** default hdfs location for flowrun coprocessor jar. */
+  public static final String DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR =
+  "/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar";
+
+/**
* The name for setting that points to an optional HBase configuration
* (hbase-site.xml file) with settings that will override the ones found on
* the classpath.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0327a79d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 6f62fd8..16954a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2243,6 +2243,15 @@
 
   
 
+The default hdfs location for flowrun coprocessor jar.
+
+yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+
+/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar
+  
+
+  
+
 The value of this parameter sets the prefix for all tables that are part of
 timeline service in the hbase storage schema. It can be set to "dev."
 or "staging." if it is to be used for development or staging instances.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0327a79d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-p

hadoop git commit: Set maven version to 3.0.0-alpha2 for release branch.

2017-01-19 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-alpha2 [created] 18f64065d


Set maven version to 3.0.0-alpha2 for release branch.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18f64065
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18f64065
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18f64065

Branch: refs/heads/branch-3.0.0-alpha2
Commit: 18f64065d5db6208daf50b02c1b5ed4ee3ce547a
Parents: f5839fd
Author: Andrew Wang 
Authored: Thu Jan 19 15:49:22 2017 -0800
Committer: Andrew Wang 
Committed: Thu Jan 19 15:49:22 2017 -0800

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml| 4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml   | 4 ++--
 hadoop-tools/pom.xm

hadoop git commit: Preparing for 3.0.0-alpha3 development

2017-01-19 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk f5839fd6d -> 5d8b80ea9


Preparing for 3.0.0-alpha3 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d8b80ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d8b80ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d8b80ea

Branch: refs/heads/trunk
Commit: 5d8b80ea9bc95adf64ff4f43b00898b8ae38556f
Parents: f5839fd
Author: Andrew Wang 
Authored: Thu Jan 19 15:50:07 2017 -0800
Committer: Andrew Wang 
Committed: Thu Jan 19 15:50:07 2017 -0800

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml| 4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml   | 4 ++--
 hadoop-tools/pom.xml   

hadoop git commit: HADOOP-14002. Document -DskipShade property in BUILDING.txt. Contributed by Hanisha Koneru.

2017-01-19 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5d8b80ea9 -> 60865c8ea


HADOOP-14002. Document -DskipShade property in BUILDING.txt. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60865c8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60865c8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60865c8e

Branch: refs/heads/trunk
Commit: 60865c8ea08053f3d6ac23f81c3376a3de3ca996
Parents: 5d8b80e
Author: Arpit Agarwal 
Authored: Thu Jan 19 17:32:25 2017 -0800
Committer: Arpit Agarwal 
Committed: Thu Jan 19 17:32:25 2017 -0800

--
 BUILDING.txt | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60865c8e/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index a1721ba..5d331d4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -133,6 +133,8 @@ Maven build goals:
   * Use -Dtar to create a TAR with the distribution (using -Pdist)
   * Use -Preleasedocs to include the changelog and release docs (requires 
Internet connectivity)
   * Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity)
+  * Use -DskipShade to disable client jar shading to speed up build times (in
+development environments only, not to build release artifacts)
 
  Snappy build options:
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2017-01-19 Thread junping_du
Repository: hadoop
Updated Tags:  refs/tags/release-2.8.0-RC1 [created] 63ce23a44

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or mor

[07/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
deleted file mode 100644
index cccae26..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-
-/**
- * Set of utility methods used by timeline filter classes.
- */
-public final class TimelineFilterUtils {
-
-  private static final Log LOG = LogFactory.getLog(TimelineFilterUtils.class);
-
-  private TimelineFilterUtils() {
-  }
-
-  /**
-   * Returns the equivalent HBase filter list's {@link Operator}.
-   *
-   * @param op timeline filter list operator.
-   * @return HBase filter list's Operator.
-   */
-  private static Operator getHBaseOperator(TimelineFilterList.Operator op) {
-switch (op) {
-case AND:
-  return Operator.MUST_PASS_ALL;
-case OR:
-  return Operator.MUST_PASS_ONE;
-default:
-  throw new IllegalArgumentException("Invalid operator");
-}
-  }
-
-  /**
-   * Returns the equivalent HBase compare filter's {@link CompareOp}.
-   *
-   * @param op timeline compare op.
-   * @return HBase compare filter's CompareOp.
-   */
-  private static CompareOp getHBaseCompareOp(
-  TimelineCompareOp op) {
-switch (op) {
-case LESS_THAN:
-  return CompareOp.LESS;
-case LESS_OR_EQUAL:
-  return CompareOp.LESS_OR_EQUAL;
-case EQUAL:
-  return CompareOp.EQUAL;
-case NOT_EQUAL:
-  return CompareOp.NOT_EQUAL;
-case GREATER_OR_EQUAL:
-  return CompareOp.GREATER_OR_EQUAL;
-case GREATER_THAN:
-  return CompareOp.GREATER;
-default:
-  throw new IllegalArgumentException("Invalid compare operator");
-}
-  }
-
-  /**
-   * Converts a {@link TimelinePrefixFilter} to an equivalent HBase
-   * {@link QualifierFilter}.
-   * @param colPrefix
-   * @param filter
-   * @return a {@link QualifierFilter} object
-   */
-  private static  Filter createHBaseColQualPrefixFilter(
-  ColumnPrefix colPrefix, TimelinePrefixFilter filter) {
-return new QualifierFilter(getHBaseCompareOp(filter.getCompareOp()),
-new BinaryPrefixComparator(
-colPrefix.getColumnPrefixBytes(filter.getPrefix(;
-  }
-
-  /**
-   * Create a HBase {@link QualifierFilter} for the passed column prefix and
-   * compare op.
-   *
-   * @param  Describes the type of column prefix.
-   * @param compareOp compare op.
-   * @param columnPrefix column prefix.
-   * @return a column 

[05/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
deleted file mode 100644
index e93b470..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * A bunch of utility functions used in HBase TimelineService backend.
- */
-public final class HBaseTimelineStorageUtils {
-  /** milliseconds in one day. */
-  public static final long MILLIS_ONE_DAY = 8640L;
-  private static final Log LOG =
-  LogFactory.getLog(HBaseTimelineStorageUtils.class);
-
-  private HBaseTimelineStorageUtils() {
-  }
-
-
-  /**
-   * Combines the input array of attributes and the input aggregation operation
-   * into a new array of attributes.
-   *
-   * @param attributes Attributes to be combined.
-   * @param aggOp Aggregation operation.
-   * @return array of combined attributes.
-   */
-  public static Attribute[] combineAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int newLength = getNewLengthCombinedAttributes(attributes, aggOp);
-Attribute[] combinedAttributes = new Attribute[newLength];
-
-if (attributes != null) {
-  System.arraycopy(attributes, 0, combinedAttributes, 0, 
attributes.length);
-}
-
-if (aggOp != null) {
-  Attribute a2 = aggOp.getAttribute();
-  combinedAttributes[newLength - 1] = a2;
-}
-return combinedAttributes;
-  }
-
-  /**
-   * Returns a number for the new array size. The new array is the combination
-   * of input array of attributes and the input aggregation operation.
-   *
-   * @param attributes Attributes.
-   * @param aggOp Aggregation operation.
-   * @return the size for the new array
-   */
-  private static int getNewLengthCombinedAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int oldLength = getAttributesLength(attributes);
-int aggLength = getAppOpLength(aggOp);
-return oldLength + aggLength;
-  }
-
-  private static int getAppOpLength(AggregationOperation aggOp) {
-if (aggOp != null) {
-  return 1;
-}
-return 0;
-  }
-
-  private static int getAttributesLength(Attribute[] attributes) {
-if (attributes != null) {
-  return attributes.length;
-}
-return 0;
-  }
-
-  /**
-   * Returns the first seen aggregation operation as seen in the list of input
-   * tags or null otherwise.
-   *
-   * @param tags list of HBase tags.
-   * @return AggregationOperation
-   */
-  public static AggregationOperat

[19/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
deleted file mode 100644
index e93b470..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * A bunch of utility functions used in HBase TimelineService backend.
- */
-public final class HBaseTimelineStorageUtils {
-  /** milliseconds in one day. */
-  public static final long MILLIS_ONE_DAY = 8640L;
-  private static final Log LOG =
-  LogFactory.getLog(HBaseTimelineStorageUtils.class);
-
-  private HBaseTimelineStorageUtils() {
-  }
-
-
-  /**
-   * Combines the input array of attributes and the input aggregation operation
-   * into a new array of attributes.
-   *
-   * @param attributes Attributes to be combined.
-   * @param aggOp Aggregation operation.
-   * @return array of combined attributes.
-   */
-  public static Attribute[] combineAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int newLength = getNewLengthCombinedAttributes(attributes, aggOp);
-Attribute[] combinedAttributes = new Attribute[newLength];
-
-if (attributes != null) {
-  System.arraycopy(attributes, 0, combinedAttributes, 0, 
attributes.length);
-}
-
-if (aggOp != null) {
-  Attribute a2 = aggOp.getAttribute();
-  combinedAttributes[newLength - 1] = a2;
-}
-return combinedAttributes;
-  }
-
-  /**
-   * Returns a number for the new array size. The new array is the combination
-   * of input array of attributes and the input aggregation operation.
-   *
-   * @param attributes Attributes.
-   * @param aggOp Aggregation operation.
-   * @return the size for the new array
-   */
-  private static int getNewLengthCombinedAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int oldLength = getAttributesLength(attributes);
-int aggLength = getAppOpLength(aggOp);
-return oldLength + aggLength;
-  }
-
-  private static int getAppOpLength(AggregationOperation aggOp) {
-if (aggOp != null) {
-  return 1;
-}
-return 0;
-  }
-
-  private static int getAttributesLength(Attribute[] attributes) {
-if (attributes != null) {
-  return attributes.length;
-}
-return 0;
-  }
-
-  /**
-   * Returns the first seen aggregation operation as seen in the list of input
-   * tags or null otherwise.
-   *
-   * @param tags list of HBase tags.
-   * @return AggregationOperation
-   */
-  public static AggregationOperat

[01/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-alpha2 18f64065d -> 9a925cb8e
  refs/heads/trunk 60865c8ea -> b01514f65


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
deleted file mode 100644
index 5beb189..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
-import org.junit.Test;
-
-
-public class TestRowKeys {
-
-  private final static String QUALIFIER_SEP = Separator.QUALIFIERS.getValue();
-  private final static byte[] QUALIFIER_SEP_BYTES = Bytes
-  .toBytes(QUALIFIER_SEP);
-  private final static String CLUSTER = "cl" + QUALIFIER_SEP + "uster";
-  private final static String USER = QUALIFIER_SEP + "user";
-  private final static String FLOW_NAME = "dummy_" + QUALIFIER_SEP + "flow"
-  + QUALIFIER_SEP;
-  private final static Long FLOW_RUN_ID;
-  private final static String APPLICATION_ID;
-  static {
-long runid = Long.MAX_VALUE - 900L;
-byte[] longMaxByteArr = Bytes.toBytes(Long.MAX_VALUE);
-byte[] byteArr = Bytes.toBytes(runid);
-int sepByteLen = QUALIFIER_SEP_BYTES.length;
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[i] = (byte) (longMaxByteArr[i] - QUALIFIER_SEP_BYTES[i]);
-  }
-}
-FLOW_RUN_ID = Bytes.toLong(byteArr);
-long clusterTs = System.currentTimeMillis();
-byteArr = Bytes.toBytes(clusterTs);
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[byteArr.length - sepByteLen + i] =
-(byte) (longMaxByteArr[byteArr.length - sepByteLen + i] -
-QUALIFIER_SEP_BYTES[i]);
-  }
-}
-clusterTs = Bytes.toLong(byteArr);
-int seqId = 222;
-APPLICATION_ID = ApplicationId.newInstance(clusterTs, seqId).toString();
-  }
-
-  private static void verifyRowPrefixBytes(byte[] byteRowKeyPrefix) {
-int sepLen = QUALIFIER_SEP_BYTES.length;
-for (int i = 0; i < sepLen; i++) {
-  assertTrue(
-  "Row key prefix not encoded properly.",
-  byteRowKeyPrefix[byteRowKeyPrefix.length - sepLen + i] ==
-  QUALIFIER_SEP_BYTES[i]);
-}
-  }
-
-  @Test
-  public void testApplicationRowKey() {
-byte[] byteRowKey =
-new ApplicationRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
-APPLICATION_ID).getRowKey();
-ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(byteRowKey);
-

[25/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or mor

[03/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
deleted file mode 100644
index 2be6ef8..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
-
-/**
- * Coprocessor for flow run table.
- */
-public class FlowRunCoprocessor extends BaseRegionObserver {
-
-  private static final Log LOG = LogFactory.getLog(FlowRunCoprocessor.class);
-  private boolean isFlowRunRegion = false;
-
-  private Region region;
-  /**
-   * generate a timestamp that is unique per row in a region this is per 
region.
-   */
-  private final TimestampGenerator timestampGenerator =
-  new TimestampGenerator();
-
-  @Override
-  public void start(CoprocessorEnvironment e) throws IOException {
-if (e instanceof RegionCoprocessorEnvironment) {
-  RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
-  this.region = env.getRegion();
-  isFlowRunRegion = HBaseTimelineStorageUtils.isFlowRunTable(
-  region.getRegionInfo(), env.getConfiguration());
-}
-  }
-
-  public boolean isFlowRunRegion() {
-return isFlowRunRegion;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * This method adds the tags onto the cells in the Put. It is presumed that
-   * all the cells in one Put have the same set of Tags. The existing cell
-   * timestamp is overwritten for non-metric cells and each such cell gets a 
new
-   * unique timestamp generated by {@link TimestampGenerator}
-   *
-   * @see
-   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache
-   * .hadoop.hbase.coprocessor.ObserverContext,
-   * org.apache.hadoop.hbase.client.Put,
-   * org.apache.hadoop.hbase.regionserver.wal.WALEdit,
-   * org.apache.hadoop.hbase.client.Durabilit

[13/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the application table.
+ */
+public enum ApplicationColumnPrefix implements ColumnPrefix {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(ApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(ApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(ApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an application.
+   */
+  EVENT(ApplicationColumnFamily.INFO, "e"),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(ApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(ApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnHelper column;
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix) {
+this(columnFamily, columnPrefix, GenericConverter.getInstance());
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix, ValueConverter converter) {
+column = new ColumnHelper(columnFamily, converter);
+this.columnFamily = 

[15/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
deleted file mode 100644
index 5beb189..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
-import org.junit.Test;
-
-
-public class TestRowKeys {
-
-  private final static String QUALIFIER_SEP = Separator.QUALIFIERS.getValue();
-  private final static byte[] QUALIFIER_SEP_BYTES = Bytes
-  .toBytes(QUALIFIER_SEP);
-  private final static String CLUSTER = "cl" + QUALIFIER_SEP + "uster";
-  private final static String USER = QUALIFIER_SEP + "user";
-  private final static String FLOW_NAME = "dummy_" + QUALIFIER_SEP + "flow"
-  + QUALIFIER_SEP;
-  private final static Long FLOW_RUN_ID;
-  private final static String APPLICATION_ID;
-  static {
-long runid = Long.MAX_VALUE - 900L;
-byte[] longMaxByteArr = Bytes.toBytes(Long.MAX_VALUE);
-byte[] byteArr = Bytes.toBytes(runid);
-int sepByteLen = QUALIFIER_SEP_BYTES.length;
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[i] = (byte) (longMaxByteArr[i] - QUALIFIER_SEP_BYTES[i]);
-  }
-}
-FLOW_RUN_ID = Bytes.toLong(byteArr);
-long clusterTs = System.currentTimeMillis();
-byteArr = Bytes.toBytes(clusterTs);
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[byteArr.length - sepByteLen + i] =
-(byte) (longMaxByteArr[byteArr.length - sepByteLen + i] -
-QUALIFIER_SEP_BYTES[i]);
-  }
-}
-clusterTs = Bytes.toLong(byteArr);
-int seqId = 222;
-APPLICATION_ID = ApplicationId.newInstance(clusterTs, seqId).toString();
-  }
-
-  private static void verifyRowPrefixBytes(byte[] byteRowKeyPrefix) {
-int sepLen = QUALIFIER_SEP_BYTES.length;
-for (int i = 0; i < sepLen; i++) {
-  assertTrue(
-  "Row key prefix not encoded properly.",
-  byteRowKeyPrefix[byteRowKeyPrefix.length - sepLen + i] ==
-  QUALIFIER_SEP_BYTES[i]);
-}
-  }
-
-  @Test
-  public void testApplicationRowKey() {
-byte[] byteRowKey =
-new ApplicationRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
-APPLICATION_ID).getRowKey();
-ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(byteRowKey);
-assertEquals(CLUSTER, rowKey.getClusterId());
-assertEquals(USER, rowKey.getUserId());
-assertEquals(FLOW_NAME, rowKey.getFlo

[06/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
deleted file mode 100644
index a02f768..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-
-/**
- * The application table as column families info, config and metrics. Info
- * stores information about a YARN application entity, config stores
- * configuration data of a YARN application, metrics stores the metrics of a
- * YARN application. This table is entirely analogous to the entity table but
- * created for better performance.
- *
- * Example application table record:
- *
- * 
- * |-|
- * |  Row   | Column Family| Column Family| Column Family|
- * |  key   | info | metrics  | config   |
- * |-|
- * | clusterId! | id:appId | metricId1:   | configKey1:  |
- * | userName!  |  | metricValue1 | configValue1 |
- * | flowName!  | created_time:| @timestamp1  |  |
- * | flowRunId! | 1392993084018|  | configKey2:  |
- * | AppId  |  | metriciD1:   | configValue2 |
- * || i!infoKey:   | metricValue2 |  |
- * || infoValue| @timestamp2  |  |
- * ||  |  |  |
- * || r!relatesToKey:  | metricId2:   |  |
- * || id3=id4=id5  | metricValue1 |  |
- * ||  | @timestamp2  |  |
- * || s!isRelatedToKey:|  |  |
- * || id7=id9=id6  |  |  |
- * ||  |  |  |
- * || e!eventId=timestamp=infoKey: |  |  |
- * || eventInfoValue   |  |  |
- * ||  |  |  |
- * || flowVersion: |  |  |
- * || versionValue |  |  |
- * |-|
- * 
- */
-public class ApplicationTable extends BaseTable {
-  /** application prefix. */
-  private static final String PREFIX =
-  YarnConfi

[23/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation action

[08/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
new file mode 100644
index 000..4e1ab8a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -0,0 +1,648 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for generic entities that are stored in the entity
+ * table.
+ */
+class GenericEntityReader extends TimelineEntityReader {
+  private static final EntityTable ENTITY_TABLE = new EntityTable();
+
+  /**
+   * Used to look up the flow context.
+   */
+  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
+
+  /**
+

[12/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..be55db5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,388 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), attribute.getVa

[09/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation action

[20/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
deleted file mode 100644
index a02f768..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-
-/**
- * The application table as column families info, config and metrics. Info
- * stores information about a YARN application entity, config stores
- * configuration data of a YARN application, metrics stores the metrics of a
- * YARN application. This table is entirely analogous to the entity table but
- * created for better performance.
- *
- * Example application table record:
- *
- * 
- * |-|
- * |  Row   | Column Family| Column Family| Column Family|
- * |  key   | info | metrics  | config   |
- * |-|
- * | clusterId! | id:appId | metricId1:   | configKey1:  |
- * | userName!  |  | metricValue1 | configValue1 |
- * | flowName!  | created_time:| @timestamp1  |  |
- * | flowRunId! | 1392993084018|  | configKey2:  |
- * | AppId  |  | metriciD1:   | configValue2 |
- * || i!infoKey:   | metricValue2 |  |
- * || infoValue| @timestamp2  |  |
- * ||  |  |  |
- * || r!relatesToKey:  | metricId2:   |  |
- * || id3=id4=id5  | metricValue1 |  |
- * ||  | @timestamp2  |  |
- * || s!isRelatedToKey:|  |  |
- * || id7=id9=id6  |  |  |
- * ||  |  |  |
- * || e!eventId=timestamp=infoKey: |  |  |
- * || eventInfoValue   |  |  |
- * ||  |  |  |
- * || flowVersion: |  |  |
- * || versionValue |  |  |
- * |-|
- * 
- */
-public class ApplicationTable extends BaseTable {
-  /** application prefix. */
-  private static final String PREFIX =
-  YarnConfi

[26/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..be55db5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,388 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), attribute.getVa

[24/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[14/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b01514f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b01514f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b01514f6

Branch: refs/heads/trunk
Commit: b01514f65bc6090a50a583f67d1ecb5d74b6d276
Parents: 60865c8
Author: Sangjin Lee 
Authored: Thu Jan 19 20:52:55 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 20:52:55 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 191 +
 .../reader/filter/TimelineFilterUtils.java  | 290 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  88 +++
 .../storage/HBaseTimelineWriterImpl.java| 566 ++
 .../storage/TimelineSchemaCreator.java  | 250 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../storage/apptoflow/AppToFlowColumn.java  | 148 
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../storage/apptoflow/AppToFlowRowKey.java  | 143 
 .../storage/apptoflow/AppToFlowTable.java   | 113 +++
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 140 
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 388 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 243 +++
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 225 ++
 .../storage/entity/EntityRowKeyPrefix.java  |  74 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 304 
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 141 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOperation.java  |  46 ++
 .../storage/flow/package-info.java  |  29 +
 .../timelineservice/storage/package-info.java   |  28 +
 ..

[04/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
deleted file mode 100644
index ff22178..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the entity table.
- */
-public class EntityRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final String entityType;
-  private final String entityId;
-  private final KeyConverter entityRowKeyConverter =
-  new EntityRowKeyConverter();
-
-  public EntityRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId, String entityType, String entityId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-this.entityType = entityType;
-this.entityId = entityId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  public String getEntityType() {
-return entityType;
-  }
-
-  public String getEntityId() {
-return entityId;
-  }
-
-  /**
-   * Constructs a row key for the entity table as follows:
-   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
-   * Typically used while querying a specific entity.
-   *
-   * @return byte array with the row key.
-   */
-  public byte[] getRowKey() {
-return entityRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey byte representation of row key.
-   * @return An EntityRowKey object.
-   */
-  public static EntityRowKey parseRowKey(byte[] rowKey) {
-return new EntityRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for entity table. The row key is of the form :
-   * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId
-   * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and
-   * rest are strings.
-   * 
-   */
-  final private static class EntityRowKeyConverter implements
-  KeyConverter {
-
-private final AppIdKeyConverter appIDKeyConverter = new 
AppIdKeyConverter();
-
-private EntityRowKeyConverter() {
-}
-
-/**
- * Entity row key is of the form
- * userName!clusterId!flowName!flowRunId!appId!entityType!entityId w. each
- * segment separated by !. The sizes below indicate sizes of each one of
- * these segments in sequence. clusterId, userName, flowName, entityType 
and
- * entityId are strings. flowrunId is a long hence 8 bytes in size. app id
- * is represented as 12 bytes

[10/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[28/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.

(cherry picked from commit b01514f65bc6090a50a583f67d1ecb5d74b6d276)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a925cb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a925cb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a925cb8

Branch: refs/heads/branch-3.0.0-alpha2
Commit: 9a925cb8e8f438d29934043ceabb4c0066279cd5
Parents: 18f6406
Author: Sangjin Lee 
Authored: Thu Jan 19 20:52:55 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 21:00:24 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 191 +
 .../reader/filter/TimelineFilterUtils.java  | 290 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  88 +++
 .../storage/HBaseTimelineWriterImpl.java| 566 ++
 .../storage/TimelineSchemaCreator.java  | 250 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../storage/apptoflow/AppToFlowColumn.java  | 148 
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../storage/apptoflow/AppToFlowRowKey.java  | 143 
 .../storage/apptoflow/AppToFlowTable.java   | 113 +++
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 140 
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 388 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 243 +++
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 225 ++
 .../storage/entity/EntityRowKeyPrefix.java  |  74 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 304 
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 141 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOperation.java  |  46 ++
 .../storage/flow/package-info.ja

[27/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the application table.
+ */
+public enum ApplicationColumnPrefix implements ColumnPrefix {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(ApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(ApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(ApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an application.
+   */
+  EVENT(ApplicationColumnFamily.INFO, "e"),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(ApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(ApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnHelper column;
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix) {
+this(columnFamily, columnPrefix, GenericConverter.getInstance());
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix, ValueConverter converter) {
+column = new ColumnHelper(columnFamily, converter);
+this.columnFamily = 

[21/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
deleted file mode 100644
index cccae26..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-
-/**
- * Set of utility methods used by timeline filter classes.
- */
-public final class TimelineFilterUtils {
-
-  private static final Log LOG = LogFactory.getLog(TimelineFilterUtils.class);
-
-  private TimelineFilterUtils() {
-  }
-
-  /**
-   * Returns the equivalent HBase filter list's {@link Operator}.
-   *
-   * @param op timeline filter list operator.
-   * @return HBase filter list's Operator.
-   */
-  private static Operator getHBaseOperator(TimelineFilterList.Operator op) {
-switch (op) {
-case AND:
-  return Operator.MUST_PASS_ALL;
-case OR:
-  return Operator.MUST_PASS_ONE;
-default:
-  throw new IllegalArgumentException("Invalid operator");
-}
-  }
-
-  /**
-   * Returns the equivalent HBase compare filter's {@link CompareOp}.
-   *
-   * @param op timeline compare op.
-   * @return HBase compare filter's CompareOp.
-   */
-  private static CompareOp getHBaseCompareOp(
-  TimelineCompareOp op) {
-switch (op) {
-case LESS_THAN:
-  return CompareOp.LESS;
-case LESS_OR_EQUAL:
-  return CompareOp.LESS_OR_EQUAL;
-case EQUAL:
-  return CompareOp.EQUAL;
-case NOT_EQUAL:
-  return CompareOp.NOT_EQUAL;
-case GREATER_OR_EQUAL:
-  return CompareOp.GREATER_OR_EQUAL;
-case GREATER_THAN:
-  return CompareOp.GREATER;
-default:
-  throw new IllegalArgumentException("Invalid compare operator");
-}
-  }
-
-  /**
-   * Converts a {@link TimelinePrefixFilter} to an equivalent HBase
-   * {@link QualifierFilter}.
-   * @param colPrefix
-   * @param filter
-   * @return a {@link QualifierFilter} object
-   */
-  private static  Filter createHBaseColQualPrefixFilter(
-  ColumnPrefix colPrefix, TimelinePrefixFilter filter) {
-return new QualifierFilter(getHBaseCompareOp(filter.getCompareOp()),
-new BinaryPrefixComparator(
-colPrefix.getColumnPrefixBytes(filter.getPrefix(;
-  }
-
-  /**
-   * Create a HBase {@link QualifierFilter} for the passed column prefix and
-   * compare op.
-   *
-   * @param  Describes the type of column prefix.
-   * @param compareOp compare op.
-   * @param columnPrefix column prefix.
-   * @return a column 

[22/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
new file mode 100644
index 000..4e1ab8a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -0,0 +1,648 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for generic entities that are stored in the entity
+ * table.
+ */
+class GenericEntityReader extends TimelineEntityReader {
+  private static final EntityTable ENTITY_TABLE = new EntityTable();
+
+  /**
+   * Used to look up the flow context.
+   */
+  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
+
+  /**
+

[02/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
deleted file mode 100644
index 9ba5e38..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for flow activity entities that are stored in the
- * flow activity table.
- */
-class FlowActivityEntityReader extends TimelineEntityReader {
-  private static final FlowActivityTable FLOW_ACTIVITY_TABLE =
-  new FlowActivityTable();
-
-  /**
-   * Used to convert Long key components to and from storage format.
-   */
-  private final KeyConverter longKeyConverter = new LongKeyConverter();
-
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-super(ctxt, entityFilters, toRetrieve, true);
-  }
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineDataToRetrieve toRetrieve) {
-super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link FlowActivityTable}.
-   */
-  @Override
-  protected BaseTable getTable() {
-return FLOW_ACTIVITY_TABLE;
-  }
-
-  @Override
-  protected void validateParams() {
-Preconditions.checkNotNull(getContext().getClusterId(),
-"clusterId shouldn't be null");
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-  throws IOException {
-createFiltersIfNull();
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-return null;
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() {
-return null;
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-  FilterLi

[18/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
deleted file mode 100644
index ff22178..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the entity table.
- */
-public class EntityRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final String entityType;
-  private final String entityId;
-  private final KeyConverter entityRowKeyConverter =
-  new EntityRowKeyConverter();
-
-  public EntityRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId, String entityType, String entityId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-this.entityType = entityType;
-this.entityId = entityId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  public String getEntityType() {
-return entityType;
-  }
-
-  public String getEntityId() {
-return entityId;
-  }
-
-  /**
-   * Constructs a row key for the entity table as follows:
-   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
-   * Typically used while querying a specific entity.
-   *
-   * @return byte array with the row key.
-   */
-  public byte[] getRowKey() {
-return entityRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey byte representation of row key.
-   * @return An EntityRowKey object.
-   */
-  public static EntityRowKey parseRowKey(byte[] rowKey) {
-return new EntityRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for entity table. The row key is of the form :
-   * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId
-   * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and
-   * rest are strings.
-   * 
-   */
-  final private static class EntityRowKeyConverter implements
-  KeyConverter {
-
-private final AppIdKeyConverter appIDKeyConverter = new 
AppIdKeyConverter();
-
-private EntityRowKeyConverter() {
-}
-
-/**
- * Entity row key is of the form
- * userName!clusterId!flowName!flowRunId!appId!entityType!entityId w. each
- * segment separated by !. The sizes below indicate sizes of each one of
- * these segments in sequence. clusterId, userName, flowName, entityType 
and
- * entityId are strings. flowrunId is a long hence 8 bytes in size. app id
- * is represented as 12 bytes

[16/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
deleted file mode 100644
index 9ba5e38..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for flow activity entities that are stored in the
- * flow activity table.
- */
-class FlowActivityEntityReader extends TimelineEntityReader {
-  private static final FlowActivityTable FLOW_ACTIVITY_TABLE =
-  new FlowActivityTable();
-
-  /**
-   * Used to convert Long key components to and from storage format.
-   */
-  private final KeyConverter longKeyConverter = new LongKeyConverter();
-
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-super(ctxt, entityFilters, toRetrieve, true);
-  }
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineDataToRetrieve toRetrieve) {
-super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link FlowActivityTable}.
-   */
-  @Override
-  protected BaseTable getTable() {
-return FLOW_ACTIVITY_TABLE;
-  }
-
-  @Override
-  protected void validateParams() {
-Preconditions.checkNotNull(getContext().getClusterId(),
-"clusterId shouldn't be null");
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-  throws IOException {
-createFiltersIfNull();
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-return null;
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() {
-return null;
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-  FilterLi

[17/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
deleted file mode 100644
index 2be6ef8..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
-
-/**
- * Coprocessor for flow run table.
- */
-public class FlowRunCoprocessor extends BaseRegionObserver {
-
-  private static final Log LOG = LogFactory.getLog(FlowRunCoprocessor.class);
-  private boolean isFlowRunRegion = false;
-
-  private Region region;
-  /**
-   * generate a timestamp that is unique per row in a region this is per 
region.
-   */
-  private final TimestampGenerator timestampGenerator =
-  new TimestampGenerator();
-
-  @Override
-  public void start(CoprocessorEnvironment e) throws IOException {
-if (e instanceof RegionCoprocessorEnvironment) {
-  RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
-  this.region = env.getRegion();
-  isFlowRunRegion = HBaseTimelineStorageUtils.isFlowRunTable(
-  region.getRegionInfo(), env.getConfiguration());
-}
-  }
-
-  public boolean isFlowRunRegion() {
-return isFlowRunRegion;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * This method adds the tags onto the cells in the Put. It is presumed that
-   * all the cells in one Put have the same set of Tags. The existing cell
-   * timestamp is overwritten for non-metric cells and each such cell gets a 
new
-   * unique timestamp generated by {@link TimestampGenerator}
-   *
-   * @see
-   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache
-   * .hadoop.hbase.coprocessor.ObserverContext,
-   * org.apache.hadoop.hbase.client.Put,
-   * org.apache.hadoop.hbase.regionserver.wal.WALEdit,
-   * org.apache.hadoop.hbase.client.Durabilit

[03/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
deleted file mode 100644
index 90dd345..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies fully qualified columns for the {@link FlowRunTable}.
- */
-public enum FlowRunColumn implements Column {
-
-  /**
-   * When the flow was started. This is the minimum of currently known
-   * application start times.
-   */
-  MIN_START_TIME(FlowRunColumnFamily.INFO, "min_start_time",
-  AggregationOperation.GLOBAL_MIN, new LongConverter()),
-
-  /**
-   * When the flow ended. This is the maximum of currently known application 
end
-   * times.
-   */
-  MAX_END_TIME(FlowRunColumnFamily.INFO, "max_end_time",
-  AggregationOperation.GLOBAL_MAX, new LongConverter()),
-
-  /**
-   * The version of the flow that this flow belongs to.
-   */
-  FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null);
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-  private final AggregationOperation aggOp;
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp) {
-this(columnFamily, columnQualifier, aggOp,
-GenericConverter.getInstance());
-  }
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp,
-  ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-this.aggOp = aggOp;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
-.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-return columnFamily.getBytes();
-  }
-
-  public AggregationOperation getAggregationOperation() {
-return aggOp;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.Column#store
-   * (byte[], org.apache.hadoop.yarn.server.timelineservice.s

[11/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or mor

[23/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation action

[05/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
deleted file mode 100644
index 89aa013..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Used to represent a partially qualified column, where the actual column name
- * will be composed of a prefix and the remainder of the column qualifier. The
- * prefix can be null, in which case the column qualifier will be completely
- * determined when the values are stored.
- */
-public interface ColumnPrefix {
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  byte[] qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  String qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Get the latest version of this specified column. Note: this call clones 
the
-   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
-   *
-   * @param result Cannot be null
-   * @param qualifier column qualifier. Nothing gets read when null.
-   * @return result object (can be cas

[08/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
new file mode 100644
index 000..cedf96a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for flow run entities that are stored in the flow run
+ * table.
+ */
+class FlowRunEntityReader extends TimelineEntityReader {
+  private static final FlowRunTable FLOW_RUN_TABLE = new FlowRunTable();
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+super(ctxt, entityFilters, toRetrieve);
+  }
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineDataToRetrieve toRetrieve) {
+super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link FlowRunTable}.
+   */
+  @Override
+  protected BaseTable getTable() {
+return FLOW_RUN_TABLE;
+  }
+
+  @Override
+  protected void validateParams() 

[27/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
new file mode 100644
index 000..97e5f7b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the application table column families.
+ */
+public enum ApplicationColumnFamily implements ColumnFamily {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons: a) the 
size
+   * of the config values can be very large and b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value create a column family with this name. Must be lower case and
+   *  without spaces.
+   */
+  private ApplicationColumnFamily(String value) {
+// column families should be lower case and not contain any spaces.
+this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KI

[09/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation action

[04/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
deleted file mode 100644
index 93b4b36..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies fully qualified columns for the {@link EntityTable}.
- */
-public enum EntityColumn implements Column {
-
-  /**
-   * Identifier for the entity.
-   */
-  ID(EntityColumnFamily.INFO, "id"),
-
-  /**
-   * The type of entity.
-   */
-  TYPE(EntityColumnFamily.INFO, "type"),
-
-  /**
-   * When the entity was created.
-   */
-  CREATED_TIME(EntityColumnFamily.INFO, "created_time", new LongConverter()),
-
-  /**
-   * The version of the flow that this entity belongs to.
-   */
-  FLOW_VERSION(EntityColumnFamily.INFO, "flow_version");
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier) {
-this(columnFamily, columnQualifier, GenericConverter.getInstance());
-  }
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier, ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes =
-Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  public void store(byte[] rowKey,
-  TypedBufferedMutator tableMutator, Long timestamp,
-  Object inputValue, Attribute... attributes) throws IOException {
-column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-inputValue, attributes);
-  }
-
-  public Object readResult(Result result) throws IOException {
-return column.readResult(result, columnQualifierBytes);
-  }
-
-  /**
-   * Retrieve an {@link EntityColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnQualifier Name of the column to retrieve
-   * @return the corresponding {@link EntityColumn} or null
-   */
-  public static final EntityColumn columnFor(String columnQuali

[28/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.

(cherry picked from commit b92089c0e8ab1b87b8b5b55b1e3d4367ae5d847a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47ec7f92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47ec7f92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47ec7f92

Branch: refs/heads/YARN-5355-branch-2
Commit: 47ec7f927e8b1b1eeb8a2287ae2a7795cab131dd
Parents: e0177c9
Author: Sangjin Lee 
Authored: Thu Jan 19 21:21:48 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 21:38:38 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../TestRMHATimelineCollectors.java |   6 +
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 190 +
 .../reader/filter/TimelineFilterUtils.java  | 307 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  96 +++
 .../storage/HBaseTimelineWriterImpl.java| 542 ++
 .../storage/TimelineSchemaCreator.java  | 251 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../apptoflow/AppToFlowColumnPrefix.java| 206 ++
 .../storage/apptoflow/AppToFlowRowKey.java  |  58 ++
 .../storage/apptoflow/AppToFlowTable.java   | 124 
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 167 +
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 389 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 306 
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 249 +++
 .../storage/entity/EntityRowKeyPrefix.java  |  77 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 274 +++
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 150 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOpe

[26/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..b9815eb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,389 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), attribute.getVa

[01/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 0327a79d7 -> b92089c0e
  refs/heads/YARN-5355-branch-2 e0177c952 -> 47ec7f927


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
deleted file mode 100644
index f6904c5..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ /dev/null
@@ -1,628 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for generic entities that are stored in the entity
- * table.
- */
-class GenericEntityReader extends TimelineEntityReader {
-  private static final EntityTable ENTITY_TABLE = new EntityTable();
-
-  /**
-   * Used to convert strings key components to and from storage format.
-   */
-  private final KeyConverter stringKeyConverter =
-  new StringKeyConvert

[02/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
deleted file mode 100644
index 5bacf66..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-
-/**
- * The base class for reading timeline data from the HBase storage. This class
- * provides basic support to validate and augment reader context.
- */
-public abstract class AbstractTimelineStorageReader {
-
-  private final TimelineReaderContext context;
-  /**
-   * Used to look up the flow context.
-   */
-  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
-
-  public AbstractTimelineStorageReader(TimelineReaderContext ctxt) {
-context = ctxt;
-  }
-
-  protected TimelineReaderContext getContext() {
-return context;
-  }
-
-  /**
-   * Looks up flow context from AppToFlow table.
-   *
-   * @param appToFlowRowKey to identify Cluster and App Ids.
-   * @param clusterId the cluster id.
-   * @param hbaseConf HBase configuration.
-   * @param conn HBase Connection.
-   * @return flow context information.
-   * @throws IOException if any problem occurs while fetching flow information.
-   */
-  protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
-  String clusterId, Configuration hbaseConf, Connection conn)
-  throws IOException {
-byte[] rowKey = appToFlowRowKey.getRowKey();
-Get get = new Get(rowKey);
-Result result = appToFlowTable.getResult(hbaseConf, conn, get);
-if (result != null && !result.isEmpty()) {
-  Object flowName =
-  AppToFlowColumnPrefix.FLOW_NAME.readResult(result, clusterId);
-  Object flowRunId =
-  AppToFlowColumnPrefix.FLOW_RUN_ID.readResult(result, clusterId);
-  Object userId =
-  AppToFlowColumnPrefix.USER_ID.readResult(result, clusterId);
-  if (flowName == null || userId == null || flowRunId == null) {
-throw new NotFoundException(
-"Unable to find the context flow name, and flow run id, "
-+ "and user id for clusterId=" + clusterId
-+ ", appId=" + appToFlowRowKey.getAppId());
-  }
-  return new FlowContext((String)userId, (String)flowName,
-  ((Number)flowRunId).longValue());
-} else {
-  throw new NotFoundException(
-  "Unable to find the context flow name, and flow run id, "
-  + "and user id for clusterId=" + clusterId
-  + ", appId=" + appToFlowRowKey.getAppId());
-}
-  }
-
-  /**
-* Sets certain parameters to defaults if the values are not provided.
-*
-* @param hbaseConf HBase Configuration.
-* @param conn HBase Connection.
-* @throws IOE

[19/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
deleted file mode 100644
index 89aa013..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Used to represent a partially qualified column, where the actual column name
- * will be composed of a prefix and the remainder of the column qualifier. The
- * prefix can be null, in which case the column qualifier will be completely
- * determined when the values are stored.
- */
-public interface ColumnPrefix {
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  byte[] qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  String qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Get the latest version of this specified column. Note: this call clones 
the
-   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
-   *
-   * @param result Cannot be null
-   * @param qualifier column qualifier. Nothing gets read when null.
-   * @return result object (can be cas

[25/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or mor

[13/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
new file mode 100644
index 000..97e5f7b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the application table column families.
+ */
+public enum ApplicationColumnFamily implements ColumnFamily {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons: a) the 
size
+   * of the config values can be very large and b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value create a column family with this name. Must be lower case and
+   *  without spaces.
+   */
+  private ApplicationColumnFamily(String value) {
+// column families should be lower case and not contain any spaces.
+this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KI

[17/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
deleted file mode 100644
index 90dd345..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies fully qualified columns for the {@link FlowRunTable}.
- */
-public enum FlowRunColumn implements Column {
-
-  /**
-   * When the flow was started. This is the minimum of currently known
-   * application start times.
-   */
-  MIN_START_TIME(FlowRunColumnFamily.INFO, "min_start_time",
-  AggregationOperation.GLOBAL_MIN, new LongConverter()),
-
-  /**
-   * When the flow ended. This is the maximum of currently known application 
end
-   * times.
-   */
-  MAX_END_TIME(FlowRunColumnFamily.INFO, "max_end_time",
-  AggregationOperation.GLOBAL_MAX, new LongConverter()),
-
-  /**
-   * The version of the flow that this flow belongs to.
-   */
-  FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null);
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-  private final AggregationOperation aggOp;
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp) {
-this(columnFamily, columnQualifier, aggOp,
-GenericConverter.getInstance());
-  }
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp,
-  ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-this.aggOp = aggOp;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
-.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-return columnFamily.getBytes();
-  }
-
-  public AggregationOperation getAggregationOperation() {
-return aggOp;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.Column#store
-   * (byte[], org.apache.hadoop.yarn.server.timelineservice.s

[14/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b92089c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b92089c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b92089c0

Branch: refs/heads/YARN-5355
Commit: b92089c0e8ab1b87b8b5b55b1e3d4367ae5d847a
Parents: 0327a79
Author: Sangjin Lee 
Authored: Thu Jan 19 21:21:48 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 21:21:48 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../TestRMHATimelineCollectors.java |   6 +
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 190 +
 .../reader/filter/TimelineFilterUtils.java  | 307 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  96 +++
 .../storage/HBaseTimelineWriterImpl.java| 547 ++
 .../storage/TimelineSchemaCreator.java  | 251 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../apptoflow/AppToFlowColumnPrefix.java| 206 ++
 .../storage/apptoflow/AppToFlowRowKey.java  |  58 ++
 .../storage/apptoflow/AppToFlowTable.java   | 124 
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 167 +
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 389 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 306 
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 249 +++
 .../storage/entity/EntityRowKeyPrefix.java  |  77 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 274 +++
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 150 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOperation.java  |  46 ++
 .../storage/flow/package-info.java  |  2

[20/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
deleted file mode 100644
index da62fdf..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the application table.
- */
-public class ApplicationRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final KeyConverter appRowKeyConverter =
-  new ApplicationRowKeyConverter();
-
-  public ApplicationRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  /**
-   * Constructs a row key for the application table as follows:
-   * {@code clusterId!userName!flowName!flowRunId!AppId}.
-   *
-   * @return byte array with the row key
-   */
-  public byte[] getRowKey() {
-return appRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey Byte representation of row key.
-   * @return An ApplicationRowKey object.
-   */
-  public static ApplicationRowKey parseRowKey(byte[] rowKey) {
-return new ApplicationRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for application table. The row key is of the
-   * form: clusterId!userName!flowName!flowRunId!appId. flowRunId is a long,
-   * appId is encoded and decoded using {@link AppIdKeyConverter} and rest are
-   * strings.
-   * 
-   */
-  final private static class ApplicationRowKeyConverter implements
-  KeyConverter {
-
-private final KeyConverter appIDKeyConverter =
-new AppIdKeyConverter();
-
-/**
- * Intended for use in ApplicationRowKey only.
- */
-private ApplicationRowKeyConverter() {
-}
-
-/**
- * Application row key is of the form
- * clusterId!userName!flowName!flowRunId!appId with each segment separated
- * by !. The sizes below indicate sizes of each one of these segements in
- * sequence. clusterId, userName and flowName are strings. flowrunId is a
- * long hence 8 bytes in size. app id is represented as 12 bytes with
- * cluster timestamp part of appid takes 8 bytes(long) and seq id takes 4
- * bytes(int). Strings are variable in size (i.e. end whenever separator is
- * encountered). This is used while decoding and helps in determining where
- * to split.
- 

[24/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[22/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
new file mode 100644
index 000..cedf96a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for flow run entities that are stored in the flow run
+ * table.
+ */
+class FlowRunEntityReader extends TimelineEntityReader {
+  private static final FlowRunTable FLOW_RUN_TABLE = new FlowRunTable();
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+super(ctxt, entityFilters, toRetrieve);
+  }
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineDataToRetrieve toRetrieve) {
+super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link FlowRunTable}.
+   */
+  @Override
+  protected BaseTable getTable() {
+return FLOW_RUN_TABLE;
+  }
+
+  @Override
+  protected void validateParams() 

[15/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
deleted file mode 100644
index f6904c5..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ /dev/null
@@ -1,628 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for generic entities that are stored in the entity
- * table.
- */
-class GenericEntityReader extends TimelineEntityReader {
-  private static final EntityTable ENTITY_TABLE = new EntityTable();
-
-  /**
-   * Used to convert strings key components to and from storage format.
-   */
-  private final KeyConverter stringKeyConverter =
-  new StringKeyConverter();
-
-  public GenericEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetri

[06/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
deleted file mode 100644
index da62fdf..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the application table.
- */
-public class ApplicationRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final KeyConverter appRowKeyConverter =
-  new ApplicationRowKeyConverter();
-
-  public ApplicationRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  /**
-   * Constructs a row key for the application table as follows:
-   * {@code clusterId!userName!flowName!flowRunId!AppId}.
-   *
-   * @return byte array with the row key
-   */
-  public byte[] getRowKey() {
-return appRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey Byte representation of row key.
-   * @return An ApplicationRowKey object.
-   */
-  public static ApplicationRowKey parseRowKey(byte[] rowKey) {
-return new ApplicationRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for application table. The row key is of the
-   * form: clusterId!userName!flowName!flowRunId!appId. flowRunId is a long,
-   * appId is encoded and decoded using {@link AppIdKeyConverter} and rest are
-   * strings.
-   * 
-   */
-  final private static class ApplicationRowKeyConverter implements
-  KeyConverter {
-
-private final KeyConverter appIDKeyConverter =
-new AppIdKeyConverter();
-
-/**
- * Intended for use in ApplicationRowKey only.
- */
-private ApplicationRowKeyConverter() {
-}
-
-/**
- * Application row key is of the form
- * clusterId!userName!flowName!flowRunId!appId with each segment separated
- * by !. The sizes below indicate sizes of each one of these segements in
- * sequence. clusterId, userName and flowName are strings. flowrunId is a
- * long hence 8 bytes in size. app id is represented as 12 bytes with
- * cluster timestamp part of appid takes 8 bytes(long) and seq id takes 4
- * bytes(int). Strings are variable in size (i.e. end whenever separator is
- * encountered). This is used while decoding and helps in determining where
- * to split.
- 

[18/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
deleted file mode 100644
index 93b4b36..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies fully qualified columns for the {@link EntityTable}.
- */
-public enum EntityColumn implements Column {
-
-  /**
-   * Identifier for the entity.
-   */
-  ID(EntityColumnFamily.INFO, "id"),
-
-  /**
-   * The type of entity.
-   */
-  TYPE(EntityColumnFamily.INFO, "type"),
-
-  /**
-   * When the entity was created.
-   */
-  CREATED_TIME(EntityColumnFamily.INFO, "created_time", new LongConverter()),
-
-  /**
-   * The version of the flow that this entity belongs to.
-   */
-  FLOW_VERSION(EntityColumnFamily.INFO, "flow_version");
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier) {
-this(columnFamily, columnQualifier, GenericConverter.getInstance());
-  }
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier, ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes =
-Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  public void store(byte[] rowKey,
-  TypedBufferedMutator tableMutator, Long timestamp,
-  Object inputValue, Attribute... attributes) throws IOException {
-column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-inputValue, attributes);
-  }
-
-  public Object readResult(Result result) throws IOException {
-return column.readResult(result, columnQualifierBytes);
-  }
-
-  /**
-   * Retrieve an {@link EntityColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnQualifier Name of the column to retrieve
-   * @return the corresponding {@link EntityColumn} or null
-   */
-  public static final EntityColumn columnFor(String columnQuali

[10/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[07/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
new file mode 100644
index 000..7d37206
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class TestSeparator {
+
+  private static String villain = "Dr. Heinz Doofenshmirtz";
+  private static String special =
+  ".   *   |   ?   +   \t   (   )   [   ]   {   }   ^   $  \\ \"  %";
+
+  /**
+   *
+   */
+  @Test
+  public void testEncodeDecodeString() {
+
+for (Separator separator : Separator.values()) {
+  testEncodeDecode(separator, "");
+  testEncodeDecode(separator, " ");
+  testEncodeDecode(separator, "!");
+  testEncodeDecode(separator, "?");
+  testEncodeDecode(separator, "&");
+  testEncodeDecode(separator, "+");
+  testEncodeDecode(separator, "\t");
+  testEncodeDecode(separator, "Dr.");
+  testEncodeDecode(separator, "Heinz");
+  testEncodeDecode(separator, "Doofenshmirtz");
+  testEncodeDecode(separator, villain);
+  testEncodeDecode(separator, special);
+
+  assertNull(separator.encode(null));
+
+}
+  }
+
+  private void testEncodeDecode(Separator separator, String token) {
+String encoded = separator.encode(token);
+String decoded = separator.decode(encoded);
+String msg = "token:" + token + " separator:" + separator + ".";
+assertEquals(msg, token, decoded);
+  }
+
+  @Test
+  public void testEncodeDecode() {
+testEncodeDecode("Dr.", Separator.QUALIFIERS);
+testEncodeDecode("Heinz", Separator.QUALIFIERS, Separator.QUALIFIERS);
+testEncodeDecode("Doofenshmirtz", Separator.QUALIFIERS, null,
+Separator.QUALIFIERS);
+testEncodeDecode("&Perry", Separator.QUALIFIERS, Separator.VALUES, null);
+testEncodeDecode("the ", Separator.QUALIFIERS, Separator.SPACE);
+testEncodeDecode("Platypus...", (Separator) null);
+testEncodeDecode("The what now ?!?", Separator.QUALIFIERS,
+Separator.VALUES, Separator.SPACE);
+
+  }
+  @Test
+  public void testEncodedValues() {
+testEncodeDecode("Double-escape %2$ and %9$ or %%2$ or %%3$, nor  %%%2$" +
+"= no problem!",
+Separator.QUALIFIERS, Separator.VALUES, Separator.SPACE, 
Separator.TAB);
+  }
+
+  @Test
+  public void testSplits() {
+byte[] maxLongBytes = Bytes.toBytes(Long.MAX_VALUE);
+byte[] maxIntBytes = Bytes.toBytes(Integer.MAX_VALUE);
+for (Separator separator : Separator.values()) {
+  String str1 = "cl" + separator.getValue() + "us";
+  String str2 = separator.getValue() + "rst";
+  byte[] sepByteArr = Bytes.toBytes(separator.getValue());
+  byte[] longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
+  sepByteArr.length, Bytes.SIZEOF_LONG - sepByteArr.length));
+  byte[] intVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxIntBytes,
+  sepByteArr.length, Bytes.SIZEOF_INT - sepByteArr.length));
+  byte[] arr = separator.join(
+  Bytes.toBytes(s

[16/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
deleted file mode 100644
index 5bacf66..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-
-/**
- * The base class for reading timeline data from the HBase storage. This class
- * provides basic support to validate and augment reader context.
- */
-public abstract class AbstractTimelineStorageReader {
-
-  private final TimelineReaderContext context;
-  /**
-   * Used to look up the flow context.
-   */
-  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
-
-  public AbstractTimelineStorageReader(TimelineReaderContext ctxt) {
-context = ctxt;
-  }
-
-  protected TimelineReaderContext getContext() {
-return context;
-  }
-
-  /**
-   * Looks up flow context from AppToFlow table.
-   *
-   * @param appToFlowRowKey to identify Cluster and App Ids.
-   * @param clusterId the cluster id.
-   * @param hbaseConf HBase configuration.
-   * @param conn HBase Connection.
-   * @return flow context information.
-   * @throws IOException if any problem occurs while fetching flow information.
-   */
-  protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
-  String clusterId, Configuration hbaseConf, Connection conn)
-  throws IOException {
-byte[] rowKey = appToFlowRowKey.getRowKey();
-Get get = new Get(rowKey);
-Result result = appToFlowTable.getResult(hbaseConf, conn, get);
-if (result != null && !result.isEmpty()) {
-  Object flowName =
-  AppToFlowColumnPrefix.FLOW_NAME.readResult(result, clusterId);
-  Object flowRunId =
-  AppToFlowColumnPrefix.FLOW_RUN_ID.readResult(result, clusterId);
-  Object userId =
-  AppToFlowColumnPrefix.USER_ID.readResult(result, clusterId);
-  if (flowName == null || userId == null || flowRunId == null) {
-throw new NotFoundException(
-"Unable to find the context flow name, and flow run id, "
-+ "and user id for clusterId=" + clusterId
-+ ", appId=" + appToFlowRowKey.getAppId());
-  }
-  return new FlowContext((String)userId, (String)flowName,
-  ((Number)flowRunId).longValue());
-} else {
-  throw new NotFoundException(
-  "Unable to find the context flow name, and flow run id, "
-  + "and user id for clusterId=" + clusterId
-  + ", appId=" + appToFlowRowKey.getAppId());
-}
-  }
-
-  /**
-* Sets certain parameters to defaults if the values are not provided.
-*
-* @param hbaseConf HBase Configuration.
-* @param conn HBase Connection.
-* @throws IOE

[21/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
new file mode 100644
index 000..7d37206
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class TestSeparator {
+
+  private static String villain = "Dr. Heinz Doofenshmirtz";
+  private static String special =
+  ".   *   |   ?   +   \t   (   )   [   ]   {   }   ^   $  \\ \"  %";
+
+  /**
+   *
+   */
+  @Test
+  public void testEncodeDecodeString() {
+
+for (Separator separator : Separator.values()) {
+  testEncodeDecode(separator, "");
+  testEncodeDecode(separator, " ");
+  testEncodeDecode(separator, "!");
+  testEncodeDecode(separator, "?");
+  testEncodeDecode(separator, "&");
+  testEncodeDecode(separator, "+");
+  testEncodeDecode(separator, "\t");
+  testEncodeDecode(separator, "Dr.");
+  testEncodeDecode(separator, "Heinz");
+  testEncodeDecode(separator, "Doofenshmirtz");
+  testEncodeDecode(separator, villain);
+  testEncodeDecode(separator, special);
+
+  assertNull(separator.encode(null));
+
+}
+  }
+
+  private void testEncodeDecode(Separator separator, String token) {
+String encoded = separator.encode(token);
+String decoded = separator.decode(encoded);
+String msg = "token:" + token + " separator:" + separator + ".";
+assertEquals(msg, token, decoded);
+  }
+
+  @Test
+  public void testEncodeDecode() {
+testEncodeDecode("Dr.", Separator.QUALIFIERS);
+testEncodeDecode("Heinz", Separator.QUALIFIERS, Separator.QUALIFIERS);
+testEncodeDecode("Doofenshmirtz", Separator.QUALIFIERS, null,
+Separator.QUALIFIERS);
+testEncodeDecode("&Perry", Separator.QUALIFIERS, Separator.VALUES, null);
+testEncodeDecode("the ", Separator.QUALIFIERS, Separator.SPACE);
+testEncodeDecode("Platypus...", (Separator) null);
+testEncodeDecode("The what now ?!?", Separator.QUALIFIERS,
+Separator.VALUES, Separator.SPACE);
+
+  }
+  @Test
+  public void testEncodedValues() {
+testEncodeDecode("Double-escape %2$ and %9$ or %%2$ or %%3$, nor  %%%2$" +
+"= no problem!",
+Separator.QUALIFIERS, Separator.VALUES, Separator.SPACE, 
Separator.TAB);
+  }
+
+  @Test
+  public void testSplits() {
+byte[] maxLongBytes = Bytes.toBytes(Long.MAX_VALUE);
+byte[] maxIntBytes = Bytes.toBytes(Integer.MAX_VALUE);
+for (Separator separator : Separator.values()) {
+  String str1 = "cl" + separator.getValue() + "us";
+  String str2 = separator.getValue() + "rst";
+  byte[] sepByteArr = Bytes.toBytes(separator.getValue());
+  byte[] longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
+  sepByteArr.length, Bytes.SIZEOF_LONG - sepByteArr.length));
+  byte[] intVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxIntBytes,
+  sepByteArr.length, Bytes.SIZEOF_INT - sepByteArr.length));
+  byte[] arr = separator.join(
+  Bytes.toBytes(s

[12/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..b9815eb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,389 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), attribute.getVa

[2/2] hadoop git commit: Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk

2017-01-19 Thread mingma
Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into 
trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdf72029
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdf72029
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdf72029

Branch: refs/heads/trunk
Commit: fdf72029929bd31f3cb996525621c8d2cdfd6326
Parents: f3fb94b b01514f
Author: Ming Ma 
Authored: Thu Jan 19 22:31:43 2017 -0800
Committer: Ming Ma 
Committed: Thu Jan 19 22:31:43 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 191 +
 .../reader/filter/TimelineFilterUtils.java  | 290 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  88 +++
 .../storage/HBaseTimelineWriterImpl.java| 566 ++
 .../storage/TimelineSchemaCreator.java  | 250 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../storage/apptoflow/AppToFlowColumn.java  | 148 
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../storage/apptoflow/AppToFlowRowKey.java  | 143 
 .../storage/apptoflow/AppToFlowTable.java   | 113 +++
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 140 
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 388 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 243 +++
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 225 ++
 .../storage/entity/EntityRowKeyPrefix.java  |  74 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 304 
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 141 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOperation.java  |  46 ++
 .../storage/flow/package-info.java  |  29 +
 .../timelineservice/storage/package-info.java   |  28 +
 .../storage/reader/ApplicationEntityReader.java | 481 +++

[1/2] hadoop git commit: HDFS-11296. Maintenance state expiry should be an epoch time and not jvm monotonic. (Manoj Govindassamy via mingma)

2017-01-19 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/trunk b01514f65 -> fdf720299


HDFS-11296. Maintenance state expiry should be an epoch time and not jvm 
monotonic. (Manoj Govindassamy via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3fb94be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3fb94be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3fb94be

Branch: refs/heads/trunk
Commit: f3fb94be05a61a4c4c06ab279897e5de2b181b0e
Parents: 60865c8
Author: Ming Ma 
Authored: Thu Jan 19 22:31:15 2017 -0800
Committer: Ming Ma 
Committed: Thu Jan 19 22:31:15 2017 -0800

--
 .../org/apache/hadoop/hdfs/protocol/DatanodeInfo.java   |  2 +-
 .../org/apache/hadoop/hdfs/TestMaintenanceState.java| 12 ++--
 .../hadoop/hdfs/server/namenode/TestNameNodeMXBean.java |  2 +-
 3 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3fb94be/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 8f9f3d5..41735b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -511,7 +511,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   }
 
   public static boolean maintenanceNotExpired(long maintenanceExpireTimeInMS) {
-return Time.monotonicNow() < maintenanceExpireTimeInMS;
+return Time.now() < maintenanceExpireTimeInMS;
   }
   /**
* Returns true if the node is is entering_maintenance

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3fb94be/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index c125f45..9cc130b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -114,7 +114,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
 // Adjust the expiration.
 takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(),
-Time.monotonicNow() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
+Time.now() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
 
 cleanupFile(fileSys, file);
   }
@@ -133,8 +133,8 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 final FileSystem fileSys = getCluster().getFileSystem(0);
 writeFile(fileSys, file, replicas, 1);
 
-// expiration has to be greater than Time.monotonicNow().
-takeNodeOutofService(0, null, Time.monotonicNow(), null,
+// expiration has to be greater than Time.now().
+takeNodeOutofService(0, null, Time.now(), null,
 AdminStates.NORMAL);
 
 cleanupFile(fileSys, file);
@@ -203,7 +203,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
 // Adjust the expiration.
 takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(),
-Time.monotonicNow() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
+Time.now() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
 
 // no change
 assertEquals(deadInMaintenance, ns.getNumInMaintenanceDeadDataNodes());
@@ -257,7 +257,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
 // Adjust the expiration.
 takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(),
-Time.monotonicNow() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
+Time.now() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
 
 cleanupFile(fileSys, file);
   }
@@ -398,7 +398,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
 // Adjust the expiration.
 takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(),
-Time.monotonicNow() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
+Time.now() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
 
 cleanupFile(fileSys, file);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3fb94be/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
---

hadoop git commit: HDFS-11296. Maintenance state expiry should be an epoch time and not jvm monotonic. (Manoj Govindassamy via mingma)

2017-01-19 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d37408767 -> bed700e98


HDFS-11296. Maintenance state expiry should be an epoch time and not jvm 
monotonic. (Manoj Govindassamy via mingma)

(cherry picked from commit f3fb94be05a61a4c4c06ab279897e5de2b181b0e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bed700e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bed700e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bed700e9

Branch: refs/heads/branch-2
Commit: bed700e98f08c37db7cd1a42d458add97b2b3409
Parents: d374087
Author: Ming Ma 
Authored: Thu Jan 19 22:31:15 2017 -0800
Committer: Ming Ma 
Committed: Thu Jan 19 22:33:43 2017 -0800

--
 .../org/apache/hadoop/hdfs/protocol/DatanodeInfo.java   |  2 +-
 .../org/apache/hadoop/hdfs/TestMaintenanceState.java| 12 ++--
 .../hadoop/hdfs/server/namenode/TestNameNodeMXBean.java |  2 +-
 3 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bed700e9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index db30075..c6a69ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -509,7 +509,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   }
 
   public static boolean maintenanceNotExpired(long maintenanceExpireTimeInMS) {
-return Time.monotonicNow() < maintenanceExpireTimeInMS;
+return Time.now() < maintenanceExpireTimeInMS;
   }
   /**
* Returns true if the node is is entering_maintenance

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bed700e9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index c125f45..9cc130b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -114,7 +114,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
 // Adjust the expiration.
 takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(),
-Time.monotonicNow() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
+Time.now() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
 
 cleanupFile(fileSys, file);
   }
@@ -133,8 +133,8 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 final FileSystem fileSys = getCluster().getFileSystem(0);
 writeFile(fileSys, file, replicas, 1);
 
-// expiration has to be greater than Time.monotonicNow().
-takeNodeOutofService(0, null, Time.monotonicNow(), null,
+// expiration has to be greater than Time.now().
+takeNodeOutofService(0, null, Time.now(), null,
 AdminStates.NORMAL);
 
 cleanupFile(fileSys, file);
@@ -203,7 +203,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
 // Adjust the expiration.
 takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(),
-Time.monotonicNow() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
+Time.now() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
 
 // no change
 assertEquals(deadInMaintenance, ns.getNumInMaintenanceDeadDataNodes());
@@ -257,7 +257,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
 // Adjust the expiration.
 takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(),
-Time.monotonicNow() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
+Time.now() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
 
 cleanupFile(fileSys, file);
   }
@@ -398,7 +398,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
 // Adjust the expiration.
 takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(),
-Time.monotonicNow() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
+Time.now() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
 
 cleanupFile(fileSys, file);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bed700e9/hadoop-hdfs-project/hadoop-hdfs/src/test/jav

hadoop git commit: HADOOP-13956. Read ADLS credentials from Credential Provider. (John Zhuge via lei)

2017-01-19 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk fdf720299 -> e015b5631


HADOOP-13956. Read ADLS credentials from Credential Provider. (John Zhuge via 
lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e015b563
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e015b563
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e015b563

Branch: refs/heads/trunk
Commit: e015b563197a475e354bf84fd27e7bbcc67e00a4
Parents: fdf7202
Author: Lei Xu 
Authored: Fri Jan 20 14:34:02 2017 +0800
Committer: Lei Xu 
Committed: Fri Jan 20 14:48:04 2017 +0800

--
 .../src/site/markdown/CredentialProviderAPI.md  |   1 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  37 -
 .../src/site/markdown/index.md  |  44 ++
 .../hadoop/fs/adl/TestAzureADTokenProvider.java | 152 +++
 4 files changed, 226 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e015b563/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
index a40bf2b..30dfdd8 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
@@ -102,6 +102,7 @@ In summary, first, provision the credentials into a 
provider then configure the
 |YARN |WebAppUtils uptakes the use of the credential provider 
API through the new method on Configuration called getPassword. This provides 
an alternative to storing the passwords in clear text within the ssl-server.xml 
file while maintaining backward compatibility.|TODO|
 |AWS  S3/S3A |Uses Configuration.getPassword to get the S3 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[AWS S3/S3A 
Usage](../../hadoop-aws/tools/hadoop-aws/index.html)|
 |Azure  WASB |Uses Configuration.getPassword to get the WASB 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[Azure WASB 
Usage](../../hadoop-azure/index.html)|
+|Azure  ADLS |Uses Configuration.getPassword to get the ADLS 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[Azure ADLS 
Usage](../../hadoop-azure-datalake/index.html)|
 |Apache  Accumulo|The trace.password property is used by the Tracer to 
authenticate with Accumulo and persist the traces in the trace table. The 
credential provider API is used to acquire the trace.password from a provider 
or from configuration for backward compatibility.|TODO|
 |Apache  Slider  |A capability has been added to Slider to prompt the 
user for needed passwords and store them using CredentialProvider so they can 
be retrieved by an app later.|TODO|
 |Apache  Hive|Protection of the metastore password, SSL related 
passwords and JDO string password has been added through the use of the 
Credential Provider API|TODO|

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e015b563/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index bd43c52..3d41025 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -58,10 +58,12 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.VersionInfo;
+
 import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 
 /**
@@ -224,8 +226,10 @@ public class AdlFileSystem extends FileSystem {
 return azureTokenProvider;
   }
 
-  private AccessTokenProvider getAccessTokenProvider(Configuration conf)
+  private AccessTokenProvider getAccessTokenProvider(Configuration config)
   throws IOException {
+Configuration conf = ProviderUtils.excludeIncompatibleCrede