hadoop git commit: MAPREDUCE-6881. Fix warnings from Spotbugs in hadoop-mapreduce. Contributed by Weiwei Yang.

2017-04-26 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 28eb2aabe -> 3ed3062fe


MAPREDUCE-6881. Fix warnings from Spotbugs in hadoop-mapreduce. Contributed by 
Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ed3062f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ed3062f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ed3062f

Branch: refs/heads/trunk
Commit: 3ed3062fe3979ff55a411b730a8eee2b2c96d6b3
Parents: 28eb2aa
Author: Akira Ajisaka 
Authored: Thu Apr 27 15:45:33 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Apr 27 15:45:33 2017 +0900

--
 .../hadoop/mapred/LocalContainerLauncher.java   | 46 
 .../hadoop/mapreduce/v2/app/MRAppMaster.java| 14 +++---
 .../java/org/apache/hadoop/mapred/JVMId.java|  2 +-
 .../org/apache/hadoop/mapred/Operation.java | 14 --
 .../mapreduce/v2/hs/HistoryFileManager.java |  5 ---
 .../org/apache/hadoop/examples/pi/Parser.java   |  8 +++-
 6 files changed, 53 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ed3062f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index 190d988..0b942b0 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -27,6 +27,8 @@ import java.lang.management.ThreadMXBean;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
+import java.util.Collections;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
@@ -81,7 +83,7 @@ public class LocalContainerLauncher extends AbstractService 
implements
   private static final Log LOG = 
LogFactory.getLog(LocalContainerLauncher.class);
 
   private FileContext curFC = null;
-  private final HashSet localizedFiles;
+  private Set localizedFiles = new HashSet();
   private final AppContext context;
   private final TaskUmbilicalProtocol umbilical;
   private final ClassLoader jobClassLoader;
@@ -121,9 +123,12 @@ public class LocalContainerLauncher extends 
AbstractService implements
 // users who do that get what they deserve (and will have to disable
 // uberization in order to run correctly).
 File[] curLocalFiles = curDir.listFiles();
-localizedFiles = new HashSet(curLocalFiles.length);
-for (int j = 0; j < curLocalFiles.length; ++j) {
-  localizedFiles.add(curLocalFiles[j]);
+if (curLocalFiles != null) {
+  HashSet lf = new HashSet(curLocalFiles.length);
+  for (int j = 0; j < curLocalFiles.length; ++j) {
+lf.add(curLocalFiles[j]);
+  }
+  localizedFiles = Collections.unmodifiableSet(lf);
 }
 
 // Relocalization note/future FIXME (per chrisdo, 20110315):  At moment,
@@ -521,26 +526,29 @@ public class LocalContainerLauncher extends 
AbstractService implements
  */
 private void relocalize() {
   File[] curLocalFiles = curDir.listFiles();
-  for (int j = 0; j < curLocalFiles.length; ++j) {
-if (!localizedFiles.contains(curLocalFiles[j])) {
-  // found one that wasn't there before:  delete it
-  boolean deleted = false;
-  try {
-if (curFC != null) {
-  // this is recursive, unlike File delete():
-  deleted = curFC.delete(new 
Path(curLocalFiles[j].getName()),true);
+  if (curLocalFiles != null) {
+for (int j = 0; j < curLocalFiles.length; ++j) {
+  if (!localizedFiles.contains(curLocalFiles[j])) {
+// found one that wasn't there before:  delete it
+boolean deleted = false;
+try {
+  if (curFC != null) {
+// this is recursive, unlike File delete():
+deleted =
+curFC.delete(new Path(curLocalFiles[j].getName()), true);
+  }
+} catch (IOException e) {
+  deleted = false;
+}
+if (!deleted) {
+  LOG.warn("Unable to delete unexpected local file/dir "
+  + curLocalFiles[j].getName()
+  + ":

hadoop git commit: HDFS-11384. Balancer disperses getBlocks calls to avoid NameNode's rpc queue saturation. Contributed by Konstantin V Shvachko.

2017-04-26 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e84588eb0 -> 4cbf5c5c4


HDFS-11384. Balancer disperses getBlocks calls to avoid NameNode's rpc queue 
saturation. Contributed by Konstantin V Shvachko.

(cherry picked from commit 28eb2aabebd15c15a357d86e23ca407d3c85211c)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cbf5c5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cbf5c5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cbf5c5c

Branch: refs/heads/branch-2
Commit: 4cbf5c5c4138329747d5cd04f24712795e7aba0a
Parents: e84588e
Author: Konstantin V Shvachko 
Authored: Wed Apr 26 17:28:49 2017 -0700
Committer: Konstantin V Shvachko 
Committed: Wed Apr 26 19:02:48 2017 -0700

--
 .../hadoop/hdfs/server/balancer/Dispatcher.java | 41 +++-
 .../hdfs/server/balancer/TestBalancer.java  | 98 ++--
 .../server/balancer/TestBalancerRPCDelay.java   | 32 +++
 .../blockmanagement/BlockManagerTestUtil.java   |  5 +
 .../hdfs/server/namenode/NameNodeAdapter.java   | 31 ++-
 5 files changed, 195 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cbf5c5c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 40036e5..fd09474 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -40,6 +40,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -806,8 +807,11 @@ public class Dispatcher {
  * namenode for more blocks. It terminates when it has dispatch enough 
block
  * move tasks or it has received enough blocks from the namenode, or the
  * elapsed time of the iteration has exceeded the max time limit.
+ *
+ * @param delay - time to sleep before sending getBlocks. Intended to
+ * disperse Balancer RPCs to NameNode for large clusters. See HDFS-11384.
  */
-private void dispatchBlocks() {
+private void dispatchBlocks(long delay) {
   this.blocksToReceive = 2 * getScheduledSize();
   long previousMoveTimestamp = Time.monotonicNow();
   while (getScheduledSize() > 0 && !isIterationOver()
@@ -832,15 +836,25 @@ public class Dispatcher {
 if (shouldFetchMoreBlocks()) {
   // fetch new blocks
   try {
+if(delay > 0) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Sleeping " + delay + "  msec.");
+  }
+  Thread.sleep(delay);
+}
 final long received = getBlockList();
 if (received == 0) {
   return;
 }
 blocksToReceive -= received;
 continue;
+  } catch (InterruptedException ignored) {
+// nothing to do
   } catch (IOException e) {
 LOG.warn("Exception while getting block list", e);
 return;
+  } finally {
+delay = 0L;
   }
 } else {
   // jump out of while-loop after the configured timeout.
@@ -1032,6 +1046,12 @@ public class Dispatcher {
   }
 
   /**
+   * The best-effort limit on the number of RPCs per second
+   * the Balancer will send to the NameNode.
+   */
+  final static int BALANCER_NUM_RPC_PER_SEC = 20;
+
+  /**
* Dispatch block moves for each source. The thread selects blocks to move &
* sends request to proxy source to initiate block move. The process is flow
* controlled. Block selection is blocked if there are too many un-confirmed
@@ -1043,15 +1063,32 @@ public class Dispatcher {
 final long bytesLastMoved = getBytesMoved();
 final Future[] futures = new Future[sources.size()];
 
+int concurrentThreads = Math.min(sources.size(),
+((ThreadPoolExecutor)dispatchExecutor).getCorePoolSize());
+assert concurrentThreads > 0 : "Number of concurrent threads is 0.";
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Balancer allowed RPCs per sec = " + BALANCER_NUM_RPC_PER_SEC);
+  LOG.debug("B

hadoop git commit: HDFS-11384. Balancer disperses getBlocks calls to avoid NameNode's rpc queue saturation. Contributed by Konstantin V Shvachko.

2017-04-26 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8b5f2c372 -> 28eb2aabe


HDFS-11384. Balancer disperses getBlocks calls to avoid NameNode's rpc queue 
saturation. Contributed by Konstantin V Shvachko.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28eb2aab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28eb2aab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28eb2aab

Branch: refs/heads/trunk
Commit: 28eb2aabebd15c15a357d86e23ca407d3c85211c
Parents: 8b5f2c3
Author: Konstantin V Shvachko 
Authored: Wed Apr 26 17:28:49 2017 -0700
Committer: Konstantin V Shvachko 
Committed: Wed Apr 26 17:28:49 2017 -0700

--
 .../hadoop/hdfs/server/balancer/Dispatcher.java | 41 +++-
 .../hdfs/server/balancer/TestBalancer.java  | 98 ++--
 .../server/balancer/TestBalancerRPCDelay.java   | 32 +++
 .../blockmanagement/BlockManagerTestUtil.java   |  5 +
 .../hdfs/server/namenode/NameNodeAdapter.java   | 31 ++-
 5 files changed, 195 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28eb2aab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index dc81901..91dc907 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -42,6 +42,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -901,8 +902,11 @@ public class Dispatcher {
  * namenode for more blocks. It terminates when it has dispatch enough 
block
  * move tasks or it has received enough blocks from the namenode, or the
  * elapsed time of the iteration has exceeded the max time limit.
+ *
+ * @param delay - time to sleep before sending getBlocks. Intended to
+ * disperse Balancer RPCs to NameNode for large clusters. See HDFS-11384.
  */
-private void dispatchBlocks() {
+private void dispatchBlocks(long delay) {
   this.blocksToReceive = 2 * getScheduledSize();
   long previousMoveTimestamp = Time.monotonicNow();
   while (getScheduledSize() > 0 && !isIterationOver()
@@ -927,15 +931,25 @@ public class Dispatcher {
 if (shouldFetchMoreBlocks()) {
   // fetch new blocks
   try {
+if(delay > 0) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Sleeping " + delay + "  msec.");
+  }
+  Thread.sleep(delay);
+}
 final long received = getBlockList();
 if (received == 0) {
   return;
 }
 blocksToReceive -= received;
 continue;
+  } catch (InterruptedException ignored) {
+// nothing to do
   } catch (IOException e) {
 LOG.warn("Exception while getting reportedBlock list", e);
 return;
+  } finally {
+delay = 0L;
   }
 } else {
   // jump out of while-loop after the configured timeout.
@@ -1125,6 +1139,12 @@ public class Dispatcher {
   }
 
   /**
+   * The best-effort limit on the number of RPCs per second
+   * the Balancer will send to the NameNode.
+   */
+  final static int BALANCER_NUM_RPC_PER_SEC = 20;
+
+  /**
* Dispatch block moves for each source. The thread selects blocks to move &
* sends request to proxy source to initiate block move. The process is flow
* controlled. Block selection is blocked if there are too many un-confirmed
@@ -1136,15 +1156,32 @@ public class Dispatcher {
 final long bytesLastMoved = getBytesMoved();
 final Future[] futures = new Future[sources.size()];
 
+int concurrentThreads = Math.min(sources.size(),
+((ThreadPoolExecutor)dispatchExecutor).getCorePoolSize());
+assert concurrentThreads > 0 : "Number of concurrent threads is 0.";
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Balancer allowed RPCs per sec = " + BALANCER_NUM_RPC_PER_SEC);
+  LOG.debug("Balancer concurrent threads = " + concurrentThreads);
+  LOG.debug("Disperse Interval sec = " +
+  concurrentThreads / BALANCER_NUM_RPC_PER_SEC);
+}
+long dSec = 0;
 fina

hadoop git commit: HDFS-10455. Logging the username when deny the setOwner operation. Contributed by Rakesh R.

2017-04-26 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 54bbdb4c0 -> ac12063ee


HDFS-10455. Logging the username when deny the setOwner operation. Contributed 
by Rakesh R.

(cherry picked from commit 1c5708f6ded6ba9fc007eed93c1f65ba44451241)
(cherry picked from commit c895f0893621fd6196c42ed46b97cb99cc2e9bee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac12063e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac12063e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac12063e

Branch: refs/heads/branch-2.7
Commit: ac12063eeb2fec1e686ee4f9f992b229dbca8f6f
Parents: 54bbdb4
Author: Brahma Reddy Battula 
Authored: Tue Nov 1 10:49:43 2016 +0530
Committer: Zhe Zhang 
Committed: Wed Apr 26 15:59:21 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java| 6 --
 2 files changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac12063e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 33544e0..07c2666 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -241,6 +241,9 @@ Release 2.7.4 - UNRELEASED
 HDFS-11615. FSNamesystemLock metrics can be inaccurate due to millisecond 
precision.
 (Erik Krogen via zhz)
 
+HDFS-10455. Logging the username when deny the setOwner operation.
+(Tianyi Xu and Rakesh R via Brahma Reddy Battula)
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac12063e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index e35b1fb..8b4793c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -78,10 +78,12 @@ public class FSDirAttrOp {
   fsd.checkOwner(pc, iip);
   if (!pc.isSuperUser()) {
 if (username != null && !pc.getUser().equals(username)) {
-  throw new AccessControlException("Non-super user cannot change 
owner");
+  throw new AccessControlException("User " + username
+  + " is not a super user (non-super user cannot change owner).");
 }
 if (group != null && !pc.containsGroup(group)) {
-  throw new AccessControlException("User does not belong to " + group);
+  throw new AccessControlException(
+  "User " + username + " does not belong to " + group);
 }
   }
   unprotectedSetOwner(fsd, src, username, group);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-14351. Azure: RemoteWasbAuthorizerImpl and RemoteSASKeyGeneratorImpl should not use Kerberos interactive user cache. Contributed by Santhosh G Nayak

2017-04-26 Thread liuml07
HADOOP-14351. Azure: RemoteWasbAuthorizerImpl and RemoteSASKeyGeneratorImpl 
should not use Kerberos interactive user cache. Contributed by Santhosh G Nayak

(cherry picked from commit 8b5f2c372e70999f3ee0a0bd685a494e06bc3652)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e84588eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e84588eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e84588eb

Branch: refs/heads/branch-2
Commit: e84588eb031feb59095847b90958e630515552de
Parents: 8945216
Author: Mingliang Liu 
Authored: Wed Apr 26 13:46:59 2017 -0700
Committer: Mingliang Liu 
Committed: Wed Apr 26 13:51:58 2017 -0700

--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  3 ---
 .../fs/azure/RemoteSASKeyGeneratorImpl.java | 26 ++--
 .../fs/azure/RemoteWasbAuthorizerImpl.java  | 22 +
 .../fs/azure/security/WasbTokenRenewer.java |  6 -
 4 files changed, 14 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e84588eb/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index f6ed607..abad169 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2987,9 +2987,6 @@ public class NativeAzureFileSystem extends FileSystem {
 if (connectUgi == null) {
   connectUgi = ugi;
 }
-if (!connectUgi.hasKerberosCredentials()) {
-  connectUgi = UserGroupInformation.getLoginUser();
-}
 connectUgi.checkTGTAndReloginFromKeytab();
 return connectUgi.doAs(new PrivilegedExceptionAction>() {
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e84588eb/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
index ed4c2b8..11b3b1e 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
@@ -96,7 +96,7 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
   private static final String RELATIVE_PATH_QUERY_PARAM_NAME =
   "relative_path";
 
-  private String delegationToken = "";
+  private String delegationToken;
   private String credServiceUrl = "";
   private WasbRemoteCallHelper remoteCallHelper = null;
   private boolean isSecurityEnabled;
@@ -109,14 +109,7 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
   public void initialize(Configuration conf) throws IOException {
 
 LOG.debug("Initializing RemoteSASKeyGeneratorImpl instance");
-try {
-  delegationToken = SecurityUtils.getDelegationTokenFromCredentials();
-} catch (IOException e) {
-  final String msg = "Error in fetching the WASB delegation token";
-  LOG.error(msg, e);
-  throw new IOException(msg, e);
-}
-
+setDelegationToken();
 try {
   credServiceUrl = SecurityUtils.getCredServiceUrls(conf);
 } catch (UnknownHostException e) {
@@ -145,6 +138,7 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
 try {
   LOG.debug("Generating Container SAS Key for Container {} "
   + "inside Storage Account {} ", container, storageAccount);
+  setDelegationToken();
   URIBuilder uriBuilder = new URIBuilder(credServiceUrl);
   uriBuilder.setPath("/" + CONTAINER_SAS_OP);
   uriBuilder.addParameter(STORAGE_ACCOUNT_QUERY_PARAM_NAME,
@@ -165,10 +159,6 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
   } else {
 uriBuilder.addParameter(Constants.DOAS_PARAM, ugi.getShortUserName());
   }
-
-  if (isSecurityEnabled && !connectUgi.hasKerberosCredentials()) {
-connectUgi = UserGroupInformation.getLoginUser();
-  }
   return getSASKey(uriBuilder.build(), connectUgi);
 } catch (URISyntaxException uriSyntaxEx) {
   throw new SASKeyGenerationException("Encountered URISyntaxException "
@@ -187,6 +177,7 @@ public class RemoteSASKeyGeneratorImpl extends 

[1/2] hadoop git commit: HADOOP-14351. Azure: RemoteWasbAuthorizerImpl and RemoteSASKeyGeneratorImpl should not use Kerberos interactive user cache. Contributed by Santhosh G Nayak

2017-04-26 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 894521673 -> e84588eb0
  refs/heads/trunk 4f3ca0396 -> 8b5f2c372


HADOOP-14351. Azure: RemoteWasbAuthorizerImpl and RemoteSASKeyGeneratorImpl 
should not use Kerberos interactive user cache. Contributed by Santhosh G Nayak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b5f2c37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b5f2c37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b5f2c37

Branch: refs/heads/trunk
Commit: 8b5f2c372e70999f3ee0a0bd685a494e06bc3652
Parents: 4f3ca03
Author: Mingliang Liu 
Authored: Wed Apr 26 13:46:59 2017 -0700
Committer: Mingliang Liu 
Committed: Wed Apr 26 13:47:18 2017 -0700

--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  3 ---
 .../fs/azure/RemoteSASKeyGeneratorImpl.java | 26 ++--
 .../fs/azure/RemoteWasbAuthorizerImpl.java  | 22 +
 .../fs/azure/security/WasbTokenRenewer.java |  6 -
 4 files changed, 14 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b5f2c37/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index e06522b..8f6dd4b 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2987,9 +2987,6 @@ public class NativeAzureFileSystem extends FileSystem {
 if (connectUgi == null) {
   connectUgi = ugi;
 }
-if (!connectUgi.hasKerberosCredentials()) {
-  connectUgi = UserGroupInformation.getLoginUser();
-}
 connectUgi.checkTGTAndReloginFromKeytab();
 return connectUgi.doAs(new PrivilegedExceptionAction>() {
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b5f2c37/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
index aab62a1..387d911 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
@@ -97,7 +97,7 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
   private static final String RELATIVE_PATH_QUERY_PARAM_NAME =
   "relative_path";
 
-  private String delegationToken = "";
+  private String delegationToken;
   private String credServiceUrl = "";
   private WasbRemoteCallHelper remoteCallHelper = null;
   private boolean isSecurityEnabled;
@@ -110,14 +110,7 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
   public void initialize(Configuration conf) throws IOException {
 
 LOG.debug("Initializing RemoteSASKeyGeneratorImpl instance");
-try {
-  delegationToken = SecurityUtils.getDelegationTokenFromCredentials();
-} catch (IOException e) {
-  final String msg = "Error in fetching the WASB delegation token";
-  LOG.error(msg, e);
-  throw new IOException(msg, e);
-}
-
+setDelegationToken();
 try {
   credServiceUrl = SecurityUtils.getCredServiceUrls(conf);
 } catch (UnknownHostException e) {
@@ -146,6 +139,7 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
 try {
   LOG.debug("Generating Container SAS Key for Container {} "
   + "inside Storage Account {} ", container, storageAccount);
+  setDelegationToken();
   URIBuilder uriBuilder = new URIBuilder(credServiceUrl);
   uriBuilder.setPath("/" + CONTAINER_SAS_OP);
   uriBuilder.addParameter(STORAGE_ACCOUNT_QUERY_PARAM_NAME,
@@ -166,10 +160,6 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
   } else {
 uriBuilder.addParameter(Constants.DOAS_PARAM, ugi.getShortUserName());
   }
-
-  if (isSecurityEnabled && !connectUgi.hasKerberosCredentials()) {
-connectUgi = UserGroupInformation.getLoginUser();
-  }
   return getSASKey(uriBuilder.build(), connectUgi);
 } catch (URISyntaxException uriSyntaxEx) {
   throw new SASKeyGenerationException("Encountered URISyntaxException "
@@ -188,6 +178,

hadoop git commit: HADOOP-14323. ITestS3GuardListConsistency failure w/ Local, authoritative metadata store. Contributed by Aaron Fabbri

2017-04-26 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 1b27f15d1 -> 31e737be0


HADOOP-14323. ITestS3GuardListConsistency failure w/ Local, authoritative 
metadata store. Contributed by Aaron Fabbri


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31e737be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31e737be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31e737be

Branch: refs/heads/HADOOP-13345
Commit: 31e737be09bbbec25a9f16456810fde24ad2e0e7
Parents: 1b27f15
Author: Mingliang Liu 
Authored: Wed Apr 26 13:35:21 2017 -0700
Committer: Mingliang Liu 
Committed: Wed Apr 26 13:35:21 2017 -0700

--
 .../fs/s3a/s3guard/DirListingMetadata.java  | 10 +++
 .../fs/s3a/s3guard/LocalMetadataStore.java  |  3 +-
 .../fs/s3a/ITestS3GuardListConsistency.java | 79 ++--
 3 files changed, 69 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31e737be/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java
index ba6d1a6..f13b447 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java
@@ -81,6 +81,16 @@ public class DirListingMetadata {
   }
 
   /**
+   * Copy constructor.
+   * @param d the existing {@link DirListingMetadata} object.
+   */
+  public DirListingMetadata(DirListingMetadata d) {
+path = d.path;
+isAuthoritative = d.isAuthoritative;
+listMap = new ConcurrentHashMap<>(d.listMap);
+  }
+
+  /**
* @return {@code Path} of the directory that contains this listing.
*/
   public Path getPath() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31e737be/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
index 165ed5e..52e5b2a 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
@@ -169,7 +169,8 @@ public class LocalMetadataStore implements MetadataStore {
   LOG.debug("listChildren({}) -> {}", path,
   listing == null ? "null" : listing.prettyPrint());
 }
-return listing;
+// Make a copy so callers can mutate without affecting our state
+return listing == null ? null : new DirListingMetadata(listing);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31e737be/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
index 47d88073..5e83906 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
@@ -27,10 +27,12 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.contract.s3a.S3AContract;
 import org.apache.hadoop.fs.s3a.s3guard.DirListingMetadata;
-import org.apache.hadoop.fs.s3a.s3guard.S3Guard;
 import org.junit.Assume;
 import org.junit.Test;
 
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -265,6 +267,49 @@ public class ITestS3GuardListConsistency extends 
AbstractS3ATestBase {
 }
   }
 
+  private static S3AFileSystem asS3AFS(FileSystem fs) {
+assertTrue("Not a S3AFileSystem: " + fs, fs instanceof S3AFileSystem);
+return (S3AFileSystem)fs;
+  }
+
+  /** Create a separate S3AFileSystem instance for testing. */
+  private S3AFileSystem createTestFS(URI fsURI, boolean disableS3Guard,
+  boolean authoritativeMeta)
+  throws IOException {
+Configuration conf;
+
+// Create a FileSystem that is S3-backed only
+  

hadoop git commit: YARN-5617. AMs only intended to run one attempt can be run more than once? Contributed by Jason Lowe.

2017-04-26 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 19e94bfd1 -> 73563bc9b


YARN-5617. AMs only intended to run one attempt can be run more than once? 
Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73563bc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73563bc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73563bc9

Branch: refs/heads/branch-2.8
Commit: 73563bc9b66e737aec56bb4f3d5579babda8f737
Parents: 19e94bf
Author: Eric Payne 
Authored: Wed Apr 26 14:33:27 2017 -0500
Committer: Eric Payne 
Committed: Wed Apr 26 14:33:27 2017 -0500

--
 .../server/resourcemanager/rmapp/RMAppImpl.java |  19 +-
 .../applicationsmanager/TestAMRestart.java  | 177 +++
 2 files changed, 152 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73563bc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index a4feb5f..197283f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -1357,11 +1357,20 @@ public class RMAppImpl implements RMApp, Recoverable {
 @Override
 public RMAppState transition(RMAppImpl app, RMAppEvent event) {
   int numberOfFailure = app.getNumFailedAppAttempts();
-  LOG.info("The number of failed attempts"
-  + (app.attemptFailuresValidityInterval > 0 ? " in previous "
-  + app.attemptFailuresValidityInterval + " milliseconds " : " ")
-  + "is " + numberOfFailure + ". The max attempts is "
-  + app.maxAppAttempts);
+  if (app.maxAppAttempts == 1) {
+// If the user explicitly set the attempts to 1 then there are likely
+// correctness issues if the AM restarts for any reason.
+LOG.info("Max app attempts is 1 for " + app.applicationId
++ ", preventing further attempts.");
+numberOfFailure = app.maxAppAttempts;
+  } else {
+LOG.info("The number of failed attempts"
++ (app.attemptFailuresValidityInterval > 0 ? " in previous "
++ app.attemptFailuresValidityInterval + " milliseconds " : " ")
++ "is " + numberOfFailure + ". The max attempts is "
++ app.maxAppAttempts);
+  }
+
   if (!app.submissionContext.getUnmanagedAM()
   && numberOfFailure < app.maxAppAttempts) {
 if (initialState.equals(RMAppState.KILLING)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73563bc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index fd478f6..c5c286a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -379,8 +379,7 @@ public class TestAMRestart {
 YarnConfiguration conf = new YarnConfiguration();
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
   ResourceScheduler.class);
-// explicitly set max-am-retry count as 1.
-conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
+conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
 conf.set(YarnConfiguration.RM_STORE, Me

hadoop git commit: YARN-6510. Fix profs stat file warning caused by process names that includes parenthesis. (Wilfred Spiegelenburg via Haibo Chen)

2017-04-26 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cc66153e1 -> 894521673


YARN-6510. Fix profs stat file warning caused by process names that includes 
parenthesis. (Wilfred Spiegelenburg via Haibo Chen)

(cherry picked from commit 4f3ca0396a810f54f7fd0489a224c1bb13143aa4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89452167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89452167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89452167

Branch: refs/heads/branch-2
Commit: 894521673bf6a242cc102fe0c5f22290640a1d29
Parents: cc66153
Author: Haibo Chen 
Authored: Wed Apr 26 11:43:27 2017 -0700
Committer: Haibo Chen 
Committed: Wed Apr 26 11:47:23 2017 -0700

--
 .../org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java  | 2 +-
 .../apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java  | 8 
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89452167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index bb2a77f..e581af5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -58,7 +58,7 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   private static final String PROCFS = "/proc/";
 
   private static final Pattern PROCFS_STAT_FILE_FORMAT = Pattern.compile(
-  "^([\\d-]+)\\s\\(([^)]+)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
+  "^([\\d-]+)\\s\\((.*)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
   "([\\d-]+)\\s([\\d-]+\\s){7}(\\d+)\\s(\\d+)\\s([\\d-]+\\s){7}(\\d+)\\s" +
   "(\\d+)(\\s[\\d-]+){15}");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89452167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index 841d333..a0a008d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -423,7 +423,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "1", "400", "400",
@@ -566,7 +566,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "1", "300", "300",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "1", "300", "300",
   "30", "300"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "100", "100",
@@ -812,7 +812,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "200", "100",
@@ -835,7 +835,7 @@ public class TestProcfsBasedProcessTree {
   String[] cmdLines = new String[numProcesses];
   cmdLines[0] = "proc1 arg1 arg2";
   cmdLines[1] = "process two arg3 arg4";
-  cmdLines[2] = "proc3 arg5 arg6";
+

hadoop git commit: YARN-6510. Fix profs stat file warning caused by process names that includes parenthesis. (Wilfred Spiegelenburg via Haibo Chen)

2017-04-26 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk edd693833 -> 4f3ca0396


YARN-6510. Fix profs stat file warning caused by process names that includes 
parenthesis. (Wilfred Spiegelenburg via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f3ca039
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f3ca039
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f3ca039

Branch: refs/heads/trunk
Commit: 4f3ca0396a810f54f7fd0489a224c1bb13143aa4
Parents: edd6938
Author: Haibo Chen 
Authored: Wed Apr 26 11:43:27 2017 -0700
Committer: Haibo Chen 
Committed: Wed Apr 26 11:46:55 2017 -0700

--
 .../org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java  | 2 +-
 .../apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java  | 8 
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f3ca039/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index a08b90e..d54611e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -58,7 +58,7 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   private static final String PROCFS = "/proc/";
 
   private static final Pattern PROCFS_STAT_FILE_FORMAT = Pattern.compile(
-  "^([\\d-]+)\\s\\(([^)]+)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
+  "^([\\d-]+)\\s\\((.*)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
   "([\\d-]+)\\s([\\d-]+\\s){7}(\\d+)\\s(\\d+)\\s([\\d-]+\\s){7}(\\d+)\\s" +
   "(\\d+)(\\s[\\d-]+){15}");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f3ca039/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index c5fd40c..aad513a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -419,7 +419,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "1", "400", "400",
@@ -555,7 +555,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "1", "300", "300",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "1", "300", "300",
   "30", "300"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "100", "100",
@@ -748,7 +748,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "200", "100",
@@ -771,7 +771,7 @@ public class TestProcfsBasedProcessTree {
   String[] cmdLines = new String[numProcesses];
   cmdLines[0] = "proc1 arg1 arg2";
   cmdLines[1] = "process two arg3 arg4";
-  cmdLines[2] = "proc3 arg5 arg6";
+  cmdLines[2] = "proc(3) arg5 arg6";
   cmdLines[3] = "proc4 arg7 arg8";

hadoop git commit: HADOOP-14340. Enable KMS and HttpFS to exclude SSL ciphers. Contributed by John Zhuge.

2017-04-26 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/trunk 93fa48fcf -> edd693833


HADOOP-14340. Enable KMS and HttpFS to exclude SSL ciphers. Contributed by John 
Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edd69383
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edd69383
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edd69383

Branch: refs/heads/trunk
Commit: edd693833b468623562c1b1085f79cbafbee9f15
Parents: 93fa48f
Author: John Zhuge 
Authored: Thu Apr 20 21:22:06 2017 -0700
Committer: John Zhuge 
Committed: Wed Apr 26 11:44:59 2017 -0700

--
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edd69383/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index bd10f93..cbabb33 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -384,6 +384,7 @@ public final class HttpServer2 implements FilterContainer {
   getPassword(sslConf, SSLFactory.SSL_SERVER_TRUSTSTORE_PASSWORD),
   sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE,
   SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE_DEFAULT));
+  excludeCiphers(sslConf.get(SSLFactory.SSL_SERVER_EXCLUDE_CIPHER_LIST));
 }
 
 public HttpServer2 build() throws IOException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11627. Block Storage: Cblock cache should register with flusher to upload blocks to containers. Contributed by Mukul Kumar Singh.

2017-04-26 Thread cliang
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 eae8c2a46 -> 50dd3a5cf


HDFS-11627. Block Storage: Cblock cache should register with flusher to upload 
blocks to containers. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50dd3a5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50dd3a5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50dd3a5c

Branch: refs/heads/HDFS-7240
Commit: 50dd3a5cfa7727089dc47b0870fb4dd5df078310
Parents: eae8c2a
Author: Chen Liang 
Authored: Wed Apr 26 10:36:56 2017 -0700
Committer: Chen Liang 
Committed: Wed Apr 26 10:36:56 2017 -0700

--
 .../apache/hadoop/cblock/CBlockConfigKeys.java  |   4 +
 .../cblock/jscsiHelper/BlockWriterTask.java |   8 +-
 .../cblock/jscsiHelper/CBlockTargetMetrics.java |  44 ++-
 .../jscsiHelper/ContainerCacheFlusher.java  |  46 ---
 .../cache/impl/AsyncBlockWriter.java|   6 +-
 .../cache/impl/CBlockLocalCache.java|   6 +-
 .../jscsiHelper/cache/impl/SyncBlockReader.java |   4 +
 .../hadoop/cblock/TestLocalBlockCache.java  | 121 +--
 8 files changed, 202 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50dd3a5c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
index b1fba41..74f5dc6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
@@ -156,6 +156,10 @@ public final class CBlockConfigKeys {
   public static final int DFS_CBLOCK_CONTAINER_SIZE_GB_DEFAULT =
   5;
 
+  // LevelDB cache file uses an off-heap cache in LevelDB of 256 MB.
+  public static final String DFS_CBLOCK_CACHE_LEVELDB_CACHE_SIZE_MB_KEY =
+  "dfs.cblock.cache.leveldb.cache.size.mb";
+  public static final int DFS_CBLOCK_CACHE_LEVELDB_CACHE_SIZE_MB_DEFAULT = 256;
 
   private CBlockConfigKeys() {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50dd3a5c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java
index 310dcca..6b5416b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java
@@ -79,6 +79,7 @@ public class BlockWriterTask implements Runnable {
   incTryCount();
   Pipeline pipeline = flusher.getPipeline(this.dbPath, block.getBlockID());
   client = flusher.getXceiverClientManager().acquireClient(pipeline);
+  containerName = pipeline.getContainerName();
   byte[] keybuf = Longs.toByteArray(block.getBlockID());
   byte[] data;
   long startTime = Time.monotonicNow();
@@ -97,11 +98,16 @@ public class BlockWriterTask implements Runnable {
 
   flusher.incrementRemoteIO();
 
-} catch (IOException ex) {
+} catch (Exception ex) {
   flusher.getLOG().error("Writing of block failed, We have attempted " +
   "to write this block {} times to the container {}.Trace ID:{}",
   this.getTryCount(), containerName, "", ex);
   writeRetryBlock(block);
+  if (ex instanceof IOException) {
+flusher.getTargetMetrics().incNumWriteIOExceptionRetryBlocks();
+  } else {
+flusher.getTargetMetrics().incNumWriteGenericExceptionRetryBlocks();
+  }
 } finally {
   flusher.incFinishCount(fileName);
   if(client != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50dd3a5c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetMetrics.java
index 9ba63ee..1174c33 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetMetrics.java
+++ 
b/hadoop-hdfs-pro

[1/2] hadoop git commit: YARN-6405. Improve configuring services through REST API. Contributed by Jian He

2017-04-26 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services 633eb661f -> d23a97d4e


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d23a97d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index 43c7ead..9f7b4a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -20,7 +20,13 @@ package org.apache.slider.server.appmaster.state;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -42,6 +48,7 @@ import 
org.apache.slider.api.proto.Messages.ComponentCountProto;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.ApplicationState;
 import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.ConfigFile;
 import org.apache.slider.api.types.ApplicationLivenessInformation;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.RoleStatistics;
@@ -79,6 +86,7 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.apache.slider.api.ResourceKeys.*;
@@ -99,7 +107,6 @@ public class AppState {
   private final AbstractClusterServices recordFactory;
 
   private final MetricsAndMonitoring metricsAndMonitoring;
-
   /**
* Flag set to indicate the application is live -this only happens
* after the buildInstance operation
@@ -108,9 +115,11 @@ public class AppState {
 
   private Application app;
 
+  // priority_id -> RoleStatus
   private final Map roleStatusMap =
 new ConcurrentSkipListMap<>();
 
+  // component_name -> ProviderRole
   private final Map roles =
 new ConcurrentHashMap<>();
 
@@ -202,6 +211,10 @@ public class AppState {
   private SliderMetrics appMetrics;
 
   private ServiceTimelinePublisher serviceTimelinePublisher;
+
+  // A cache for loading config files from remote such as hdfs
+  public LoadingCache configFileCache = null;
+
   /**
* Create an instance
* @param recordFactory factory for YARN records
@@ -304,8 +317,6 @@ public class AppState {
   public synchronized void buildInstance(AppStateBindingInfo binding)
   throws BadClusterStateException, BadConfigException, IOException {
 binding.validate();
-
-log.debug("Building application state");
 containerReleaseSelector = binding.releaseSelector;
 
 // set the cluster specification (once its dependency the client properties
@@ -313,10 +324,8 @@ public class AppState {
 this.app = binding.application;
 appMetrics = SliderMetrics.register(app.getName(),
 "Metrics for service");
-appMetrics
-.tag("type", "Metrics type [component or service]", "service");
-appMetrics
-.tag("appId", "Application id for service", app.getId());
+appMetrics.tag("type", "Metrics type [component or service]", "service");
+appMetrics.tag("appId", "Application id for service", app.getId());
 
 org.apache.slider.api.resource.Configuration conf = app.getConfiguration();
 startTimeThreshold =
@@ -327,12 +336,7 @@ public class AppState {
 nodeFailureThreshold = conf.getPropertyInt(NODE_FAILURE_THRESHOLD,
 DEFAULT_NODE_FAILURE_THRESHOLD);
 
-//build the initial role list
-List roleList = new ArrayList<>(binding.roles);
-for (ProviderRole providerRole : roleList) {
-  buildRole(providerRole);
-}
-
+//build the initial component list
 int priority = 1;
 for (Component component : app.getComponents()) {
   priority = getNewPriority(priority);
@@ -340,25 +344,1

[2/2] hadoop git commit: YARN-6405. Improve configuring services through REST API. Contributed by Jian He

2017-04-26 Thread billie
YARN-6405. Improve configuring services through REST API. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d23a97d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d23a97d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d23a97d4

Branch: refs/heads/yarn-native-services
Commit: d23a97d4efc8f2d2e45920cdb2c3ec68733078b8
Parents: 633eb66
Author: Billie Rinaldi 
Authored: Wed Apr 26 08:44:38 2017 -0700
Committer: Billie Rinaldi 
Committed: Wed Apr 26 08:44:38 2017 -0700

--
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |   4 +-
 .../api/impl/TestApplicationApiService.java |  26 +-
 .../apache/slider/api/ServiceApiConstants.java  |  50 +++
 .../apache/slider/api/resource/ConfigFile.java  |  58 +--
 .../slider/api/resource/Configuration.java  |  39 +-
 .../org/apache/slider/client/SliderClient.java  |  16 +-
 .../org/apache/slider/common/SliderKeys.java|   4 +-
 .../slider/core/launch/CommandLineBuilder.java  |  15 -
 .../docstore/ConfigurationResolver.java |  24 --
 .../apache/slider/providers/ProviderRole.java   |  32 +-
 .../slider/providers/ProviderService.java   |   4 +-
 .../apache/slider/providers/ProviderUtils.java  | 279 +-
 .../providers/docker/DockerProviderService.java |  50 +--
 .../server/appmaster/RoleLaunchService.java |  24 +-
 .../server/appmaster/SliderAppMaster.java   |  77 ++--
 .../actions/RegisterComponentInstance.java  |  12 +-
 .../actions/UnregisterComponentInstance.java|  16 +-
 .../server/appmaster/metrics/SliderMetrics.java |  23 ++
 .../appmaster/monkey/ChaosKillContainer.java|   2 +-
 .../server/appmaster/rpc/SliderIPCService.java  |   2 +-
 .../slider/server/appmaster/state/AppState.java | 384 ++-
 .../server/appmaster/state/RoleInstance.java|  38 +-
 .../server/appmaster/state/RoleStatus.java  |   1 +
 .../state/StateAccessForProviders.java  |   1 +
 .../slider/util/RestApiErrorMessages.java   |   2 +-
 .../org/apache/slider/util/ServiceApiUtil.java  |  77 +++-
 .../TestMockAppStateDynamicHistory.java |   8 +-
 .../TestMockAppStateFlexDynamicRoles.java   |   6 +-
 .../appstate/TestMockAppStateUniqueNames.java   |  77 +++-
 .../TestMockContainerResourceAllocations.java   |  11 +
 .../model/mock/BaseMockAppStateTest.java|   9 +-
 .../model/mock/MockProviderService.java |   4 +-
 32 files changed, 850 insertions(+), 525 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d23a97d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index e9239e4..82cc30f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -347,10 +347,10 @@ definitions:
 description: The absolute path that this configuration file should be 
mounted as, in the application container.
   src_file:
 type: string
-description: Required for type template. This provides the source 
location of the template which needs to be mounted as dest_file post property 
substitutions. Typically the src_file would point to a source controlled 
network accessible file maintained by tools like puppet, chef, etc.
+description: This provides the source location of the configuration 
file, the content of which is dumped to dest_file post property substitutions, 
in the format as specified in type. Typically the src_file would point to a 
source controlled network accessible file maintained by tools like puppet, 
chef, or hdfs etc. Currently, only hdfs is supported.
   props:
 type: object
-description: A blob of key value pairs that will be dumped in the 
dest_file in the format as specified in type. If the type is template then the 
attribute src_file is mandatory and the src_file content is dumped to dest_file 
post property substitutions.
+description: A blob of key value pairs that will be dumped in the 
dest_file in the format as specified in type. If src_file is specified, 
src_file content are dumpe

[hadoop] Git Push Summary

2017-04-26 Thread varunsaxena
Repository: hadoop
Updated Tags:  refs/tags/YARN-5355-branch-2-2017-04-25 [created] 3f3e926ba

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. 
Contributed by Haibo Chen.

(cherry picked from commit 34e7c30293b5a56f9f745769a29c5666bdb85d6c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/177a324d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/177a324d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/177a324d

Branch: refs/heads/YARN-5355-branch-2
Commit: 177a324db7f07685ab593cf8ad33af34e7a47f4e
Parents: 70b788a
Author: Sangjin Lee 
Authored: Tue Feb 28 16:10:25 2017 -0800
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:44 2017 +0530

--
 .../timelineservice/storage/flow/FlowActivityColumnPrefix.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/177a324d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
index 439e0c8..5e7a5d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
@@ -271,7 +271,7 @@ public enum FlowActivityColumnPrefix
 byte[] columnQualifier = getColumnPrefixBytes(qualifier);
 Attribute[] combinedAttributes =
 HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-column.store(rowKey, tableMutator, columnQualifier, null, inputValue,
+column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
 combinedAttributes);
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: Addendum for YARN-6064. Support fromId for flowRuns and flow/flowRun apps REST API's

2017-04-26 Thread varunsaxena
Addendum for YARN-6064. Support fromId for flowRuns and flow/flowRun apps REST 
API's

(cherry picked from commit c9246f619104bff44dd453a4ffe70104d37fd781)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5efd6cc7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5efd6cc7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5efd6cc7

Branch: refs/heads/YARN-5355-branch-2
Commit: 5efd6cc726a21955a36a5507e615666802e24b21
Parents: 5d27b1c
Author: Varun Saxena 
Authored: Thu Jan 19 10:15:28 2017 +0530
Committer: Varun Saxena 
Committed: Tue Apr 25 23:14:28 2017 +0530

--
 .../storage/reader/ApplicationEntityReader.java| 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5efd6cc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
index 8a331c3..4e8286d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
@@ -375,9 +375,9 @@ class ApplicationEntityReader extends GenericEntityReader {
   Long flowRunId = context.getFlowRunId();
   if (flowRunId == null) {
 AppToFlowRowKey appToFlowRowKey = new AppToFlowRowKey(
-context.getClusterId(), getFilters().getFromId());
-FlowContext flowContext =
-lookupFlowContext(appToFlowRowKey, hbaseConf, conn);
+getFilters().getFromId());
+FlowContext flowContext = lookupFlowContext(appToFlowRowKey,
+context.getClusterId(), hbaseConf, conn);
 flowRunId = flowContext.getFlowRunId();
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
new file mode 100644
index 000..97e5f7b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the application table column families.
+ */
+public enum ApplicationColumnFamily implements ColumnFamily {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons: a) the 
size
+   * of the config values can be very large and b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value create a column family with this name. Must be lower case and
+   *  without spaces.
+   */
+  private ApplicationColumnFamily(String value) {
+// column families should be lower case and not contain any spaces.
+this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KI

[47/50] [abbrv] hadoop git commit: YARN-6146. Add Builder methods for TimelineEntityFilters (Haibo Chen via Varun Saxena)

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e59486a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index 4a9e53e..4d3e769 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -76,12 +76,44 @@ final class TimelineReaderWebServicesUtils {
   String isRelatedTo, String infofilters, String conffilters,
   String metricfilters, String eventfilters,
   String fromid) throws TimelineParseException {
-return new TimelineEntityFilters(parseLongStr(limit),
-parseLongStr(createdTimeStart), parseLongStr(createdTimeEnd),
-parseRelationFilters(relatesTo), parseRelationFilters(isRelatedTo),
-parseKVFilters(infofilters, false), parseKVFilters(conffilters, true),
-parseMetricFilters(metricfilters), parseEventFilters(eventfilters),
-parseStr(fromid));
+return createTimelineEntityFilters(
+limit, parseLongStr(createdTimeStart),
+parseLongStr(createdTimeEnd),
+relatesTo, isRelatedTo, infofilters,
+conffilters, metricfilters, eventfilters, fromid);
+  }
+
+  /**
+   * Parse the passed filters represented as strings and convert them into a
+   * {@link TimelineEntityFilters} object.
+   * @param limit Limit to number of entities to return.
+   * @param createdTimeStart Created time start for the entities to return.
+   * @param createdTimeEnd Created time end for the entities to return.
+   * @param relatesTo Entities to return must match relatesTo.
+   * @param isRelatedTo Entities to return must match isRelatedTo.
+   * @param infofilters Entities to return must match these info filters.
+   * @param conffilters Entities to return must match these metric filters.
+   * @param metricfilters Entities to return must match these metric filters.
+   * @param eventfilters Entities to return must match these event filters.
+   * @return a {@link TimelineEntityFilters} object.
+   * @throws TimelineParseException if any problem occurs during parsing.
+   */
+  static TimelineEntityFilters createTimelineEntityFilters(String limit,
+  Long createdTimeStart, Long createdTimeEnd, String relatesTo,
+  String isRelatedTo, String infofilters, String conffilters,
+  String metricfilters, String eventfilters,
+  String fromid) throws TimelineParseException {
+return new TimelineEntityFilters.Builder()
+.entityLimit(parseLongStr(limit))
+.createdTimeBegin(createdTimeStart)
+.createTimeEnd(createdTimeEnd)
+.relatesTo(parseRelationFilters(relatesTo))
+.isRelatedTo(parseRelationFilters(isRelatedTo))
+.infoFilters(parseKVFilters(infofilters, false))
+.configFilters(parseKVFilters(conffilters, true))
+.metricFilters(parseMetricFilters(metricfilters))
+.eventFilters(parseEventFilters(eventfilters))
+.fromId(parseStr(fromid)).build();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e59486a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
index 35af169..1bc66db 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
@@ -398,7 +398,7 @@ public class Te

[38/50] [abbrv] hadoop git commit: YARN-6170. TimelineReaderServer should wait to join with HttpServer2 (Sangjin Lee via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6170. TimelineReaderServer should wait to join with HttpServer2 (Sangjin 
Lee via Varun Saxena)

(cherry picked from commit 649deb72fbb62568b4ea0d67444df6faaaed169d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d573ce44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d573ce44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d573ce44

Branch: refs/heads/YARN-5355-branch-2
Commit: d573ce4489fe2733c37ca45691120bb820522c2f
Parents: e7eed95
Author: Varun Saxena 
Authored: Sat Feb 11 19:21:45 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:44 2017 +0530

--
 .../timelineservice/reader/TimelineReaderServer.java   | 13 -
 1 file changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d573ce44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index 116cc2a..8c5e72d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -110,6 +110,16 @@ public class TimelineReaderServer extends CompositeService 
{
 startTimelineReaderWebApp();
   }
 
+  private void join() {
+// keep the main thread that started the server up until it receives a stop
+// signal
+if (readerWebServer != null) {
+  try {
+readerWebServer.join();
+  } catch (InterruptedException ignore) {}
+}
+  }
+
   @Override
   protected void serviceStop() throws Exception {
 if (readerWebServer != null) {
@@ -185,6 +195,7 @@ public class TimelineReaderServer extends CompositeService {
 Configuration conf = new YarnConfiguration();
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
 conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-startTimelineReaderServer(args, conf);
+TimelineReaderServer server = startTimelineReaderServer(args, conf);
+server.join();
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: YARN-5585. [Atsv2] Reader side changes for entity prefix and support for pagination via additional filters (Rohith Sharma K S via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-5585. [Atsv2] Reader side changes for entity prefix and support for 
pagination via additional filters (Rohith Sharma K S via Varun Saxena)

(cherry picked from commit 9a2f288a6a9e30376b7fd99bf8824184a34a54c9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa5fb6ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa5fb6ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa5fb6ca

Branch: refs/heads/YARN-5355-branch-2
Commit: fa5fb6caf11cf0ffb40d657b9955d2b6e42e19d7
Parents: f155610
Author: Varun Saxena 
Authored: Sat Jan 7 01:38:36 2017 +0530
Committer: Varun Saxena 
Committed: Tue Apr 25 23:14:28 2017 +0530

--
 .../records/timelineservice/TimelineEntity.java |  16 +-
 ...stTimelineReaderWebServicesHBaseStorage.java | 102 +++-
 .../reader/TimelineEntityFilters.java   |  53 -
 .../reader/TimelineReaderContext.java   |  20 +-
 .../reader/TimelineReaderManager.java   |   1 +
 .../reader/TimelineReaderWebServices.java   | 230 +++
 .../reader/TimelineReaderWebServicesUtils.java  |  13 +-
 .../reader/TimelineUIDConverter.java|  19 +-
 .../reader/filter/TimelineFilterUtils.java  |  17 ++
 .../timelineservice/storage/TimelineReader.java |  10 +-
 .../storage/entity/EntityRowKey.java|  26 ++-
 .../storage/entity/EntityRowKeyPrefix.java  |  13 +-
 .../storage/reader/ApplicationEntityReader.java |   2 +-
 .../reader/FlowActivityEntityReader.java|   2 +-
 .../storage/reader/FlowRunEntityReader.java |   2 +-
 .../storage/reader/GenericEntityReader.java |  99 ++--
 .../storage/reader/TimelineEntityReader.java|  29 +--
 .../reader/TimelineEntityReaderFactory.java |   2 +-
 .../reader/TestTimelineUIDConverter.java|   8 +-
 .../storage/common/TestRowKeys.java |   8 +-
 20 files changed, 512 insertions(+), 160 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa5fb6ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 7a289b9..845e2cc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -549,20 +549,10 @@ public class TimelineEntity implements 
Comparable {
   public int compareTo(TimelineEntity other) {
 int comparison = getType().compareTo(other.getType());
 if (comparison == 0) {
-  if (getCreatedTime() == null) {
-if (other.getCreatedTime() == null) {
-  return getId().compareTo(other.getId());
-} else {
-  return 1;
-}
-  }
-  if (other.getCreatedTime() == null) {
-return -1;
-  }
-  if (getCreatedTime() > other.getCreatedTime()) {
-// Order by created time desc
+  if (getIdPrefix() > other.getIdPrefix()) {
+// Descending order by entity id prefix
 return -1;
-  } else if (getCreatedTime() < other.getCreatedTime()) {
+  } else if (getIdPrefix() < other.getIdPrefix()) {
 return 1;
   } else {
 return getId().compareTo(other.getId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa5fb6ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index a83d2dc..fa35fc5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-

[16/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or mor

[09/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
deleted file mode 100644
index 93b4b36..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies fully qualified columns for the {@link EntityTable}.
- */
-public enum EntityColumn implements Column {
-
-  /**
-   * Identifier for the entity.
-   */
-  ID(EntityColumnFamily.INFO, "id"),
-
-  /**
-   * The type of entity.
-   */
-  TYPE(EntityColumnFamily.INFO, "type"),
-
-  /**
-   * When the entity was created.
-   */
-  CREATED_TIME(EntityColumnFamily.INFO, "created_time", new LongConverter()),
-
-  /**
-   * The version of the flow that this entity belongs to.
-   */
-  FLOW_VERSION(EntityColumnFamily.INFO, "flow_version");
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier) {
-this(columnFamily, columnQualifier, GenericConverter.getInstance());
-  }
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier, ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes =
-Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  public void store(byte[] rowKey,
-  TypedBufferedMutator tableMutator, Long timestamp,
-  Object inputValue, Attribute... attributes) throws IOException {
-column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-inputValue, attributes);
-  }
-
-  public Object readResult(Result result) throws IOException {
-return column.readResult(result, columnQualifierBytes);
-  }
-
-  /**
-   * Retrieve an {@link EntityColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnQualifier Name of the column to retrieve
-   * @return the corresponding {@link EntityColumn} or null
-   */
-  public static final EntityColumn columnFor(String columnQuali

[11/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
deleted file mode 100644
index da62fdf..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the application table.
- */
-public class ApplicationRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final KeyConverter appRowKeyConverter =
-  new ApplicationRowKeyConverter();
-
-  public ApplicationRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  /**
-   * Constructs a row key for the application table as follows:
-   * {@code clusterId!userName!flowName!flowRunId!AppId}.
-   *
-   * @return byte array with the row key
-   */
-  public byte[] getRowKey() {
-return appRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey Byte representation of row key.
-   * @return An ApplicationRowKey object.
-   */
-  public static ApplicationRowKey parseRowKey(byte[] rowKey) {
-return new ApplicationRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for application table. The row key is of the
-   * form: clusterId!userName!flowName!flowRunId!appId. flowRunId is a long,
-   * appId is encoded and decoded using {@link AppIdKeyConverter} and rest are
-   * strings.
-   * 
-   */
-  final private static class ApplicationRowKeyConverter implements
-  KeyConverter {
-
-private final KeyConverter appIDKeyConverter =
-new AppIdKeyConverter();
-
-/**
- * Intended for use in ApplicationRowKey only.
- */
-private ApplicationRowKeyConverter() {
-}
-
-/**
- * Application row key is of the form
- * clusterId!userName!flowName!flowRunId!appId with each segment separated
- * by !. The sizes below indicate sizes of each one of these segements in
- * sequence. clusterId, userName and flowName are strings. flowrunId is a
- * long hence 8 bytes in size. app id is represented as 12 bytes with
- * cluster timestamp part of appid takes 8 bytes(long) and seq id takes 4
- * bytes(int). Strings are variable in size (i.e. end whenever separator is
- * encountered). This is used while decoding and helps in determining where
- * to split.
- 

[39/50] [abbrv] hadoop git commit: YARN-6159. Documentation changes for TimelineV2Client (Naganarasimha G R via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6159. Documentation changes for TimelineV2Client (Naganarasimha G R via 
Varun Saxena)

(cherry picked from commit 6ba61d20d3f65e40ea8e3a49d5beebe34f04aab4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4aad6683
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4aad6683
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4aad6683

Branch: refs/heads/YARN-5355-branch-2
Commit: 4aad6683f7b9936089679f7dd16c838a13f8f16c
Parents: d573ce4
Author: Varun Saxena 
Authored: Tue Feb 21 12:25:37 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:44 2017 +0530

--
 .../src/site/markdown/TimelineServiceV2.md  | 44 
 1 file changed, 18 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aad6683/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 02b0562..7d36a4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -340,56 +340,48 @@ To write MapReduce framework data to Timeline Service 
v.2, enable the following
 
 This section is for YARN application developers that want to integrate with 
Timeline Service v.2.
 
-Developers can continue to use the `TimelineClient` API to publish 
per-framework data to the
-Timeline Service v.2. You only need to instantiate the right type of the 
client to write to v.2.
-On the other hand, the entity/object API for v.2 is different than v.1 as the 
object model is
-significantly changed. The v.2 timeline entity class is
-`org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity` whereas 
the v.1 class is
-`org.apache.hadoop.yarn.api.records.timeline.TimelineEntity`. The methods on 
`TimelineClient`
-suitable for writing to Timeline Service v.2 are clearly delineated, and they 
use the v.2
-types as arguments.
+Developers need to use the `TimelineV2Client` API to publish per-framework 
data to the
+Timeline Service v.2. The entity/object API for v.2 is different than v.1 as
+the object model is significantly changed. The v.2 timeline entity class is
+`org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity`.
 
 Timeline Service v.2 `putEntities` methods come in 2 varieties: `putEntities` 
and
 `putEntitiesAsync`. The former is a blocking operation which must be used for 
writing more
 critical data (e.g. lifecycle events). The latter is a non-blocking operation. 
Note that neither
 has a return value.
 
-Creating a `TimelineClient` for v.2 involves passing in the application id to 
the factory method.
+Creating a `TimelineV2Client` involves passing in the application id to the 
static method
+`TimelineV2Client.createTimelineClient`.
 
 For example:
 
 
 // Create and start the Timeline client v.2
-TimelineClient client = TimelineClient.createTimelineClient(appId);
-client.init(conf);
-client.start();
+TimelineV2Client timelineClient =
+TimelineV2Client.createTimelineClient(appId);
+timelineClient.init(conf);
+timelineClient.start();
 
 try {
   TimelineEntity myEntity = new TimelineEntity();
-  myEntity.setEntityType("MY_APPLICATION");
-  myEntity.setEntityId("MyApp1")
+  myEntity.setType("MY_APPLICATION");
+  myEntity.setId("MyApp1");
   // Compose other entity info
 
   // Blocking write
-  client.putEntities(entity);
+  timelineClient.putEntities(myEntity);
 
   TimelineEntity myEntity2 = new TimelineEntity();
   // Compose other info
 
   // Non-blocking write
-  timelineClient.putEntitiesAsync(entity);
+  timelineClient.putEntitiesAsync(myEntity2);
 
-} catch (IOException e) {
-  // Handle the exception
-} catch (RuntimeException e) {
-  // In Hadoop 2.6, if attempts submit information to the Timeline Server 
fail more than the retry limit,
-  // a RuntimeException will be raised. This may change in future 
releases, being
-  // replaced with a IOException that is (or wraps) that which triggered 
retry failures.
-} catch (YarnException e) {
+} catch (IOException | YarnException e) {
   // Handle the exception
 } finally {
   // Stop the Timeline client
-  client.stop();
+  timelineClient.stop();
 }
 
 As evidenced above, you need to specify the YARN application id to be able to 
write to the Timeline
@@ -397,9 +389,9 @@ Service v.2. Note that currently you need to be on th

[17/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..b9815eb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,389 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), attribute.getVa

[43/50] [abbrv] hadoop git commit: YARN-6377. NMTimelinePublisher#serviceStop does not stop timeline clients (Haibo Chen via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6377. NMTimelinePublisher#serviceStop does not stop timeline clients 
(Haibo Chen via Varun Saxena)

(cherry picked from commit a4b5aa8493e0bd9006f44291d265c28ab86497e1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d9ae7f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d9ae7f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d9ae7f1

Branch: refs/heads/YARN-5355-branch-2
Commit: 6d9ae7f1448bcb1b5dca81cf823235f8ccf714b3
Parents: fd2c8d2
Author: Varun Saxena 
Authored: Sun Apr 2 04:54:12 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:45 2017 +0530

--
 .../nodemanager/timelineservice/NMTimelinePublisher.java  |  8 
 .../timelineservice/TestNMTimelinePublisher.java  | 10 +-
 2 files changed, 17 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d9ae7f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index 851ba53..ced41c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -102,6 +102,14 @@ public class NMTimelinePublisher extends CompositeService {
 this.nodeId = context.getNodeId();
   }
 
+  @Override
+  protected void serviceStop() throws Exception {
+for(ApplicationId app : appToClientMap.keySet()) {
+  stopTimelineClient(app);
+}
+super.serviceStop();
+  }
+
   @VisibleForTesting
   Map getAppToClientMap() {
 return appToClientMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d9ae7f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
index e116122..0b8eaa9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.impl.TimelineV2ClientImpl;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
@@ -53,14 +54,21 @@ public class TestNMTimelinePublisher {
 final DummyTimelineClient timelineClient = new DummyTimelineClient(null);
 when(context.getNodeId()).thenReturn(NodeId.newInstance("localhost", 0));
 when(context.getHttpPort()).thenReturn(0);
+
+Configuration conf = new Configuration();
+conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+
 NMTimelinePublisher publisher = new NMTimelinePublisher(context) {
   public void createTimelineClient(ApplicationId appId) {
 if (!getAppToClientMap().containsKey(appId)) {
+  timelineClient.init(getConfig());
+  timelineClient.start();
   getAppToClientMap().put(appId, timelineClient);
 }
   }
 };
-publisher.init(new 

[46/50] [abbrv] hadoop git commit: YARN-6424. TimelineCollector is not stopped when an app finishes in RM. Contributed by Varun Saxena.

2017-04-26 Thread varunsaxena
YARN-6424. TimelineCollector is not stopped when an app finishes in RM. 
Contributed by Varun Saxena.

(cherry picked from commit 1a9439e299910032ce6a1919dece3107c1c9de5b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4deb89d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4deb89d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4deb89d4

Branch: refs/heads/YARN-5355-branch-2
Commit: 4deb89d4288caf304de80ad97a255c0eb472613f
Parents: 6d9ae7f
Author: Rohith Sharma K S 
Authored: Thu Apr 6 10:15:22 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:45 2017 +0530

--
 .../metrics/TimelineServiceV2Publisher.java  | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4deb89d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 8b1ad1c..1fc0c38 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -180,8 +180,9 @@ public class TimelineServiceV2Publisher extends 
AbstractSystemMetricsPublisher {
 getTimelinelineAppMetrics(appMetrics, finishedTime);
 entity.setMetrics(entityMetrics);
 
-getDispatcher().getEventHandler().handle(new TimelineV2PublishEvent(
-SystemMetricsEventType.PUBLISH_ENTITY, entity, 
app.getApplicationId()));
+getDispatcher().getEventHandler().handle(
+new ApplicationFinishPublishEvent(SystemMetricsEventType.
+PUBLISH_APPLICATION_FINISHED_ENTITY, entity, app));
   }
 
   private Set getTimelinelineAppMetrics(
@@ -450,16 +451,16 @@ public class TimelineServiceV2Publisher extends 
AbstractSystemMetricsPublisher {
   }
 
   private class ApplicationFinishPublishEvent extends TimelineV2PublishEvent {
-private RMAppImpl app;
+private RMApp app;
 
 public ApplicationFinishPublishEvent(SystemMetricsEventType type,
-TimelineEntity entity, RMAppImpl app) {
+TimelineEntity entity, RMApp app) {
   super(type, entity, app.getApplicationId());
   this.app = app;
 }
 
 public RMAppImpl getRMAppImpl() {
-  return app;
+  return (RMAppImpl) app;
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: YARN-6318. timeline service schema creator fails if executed from a remote machine (Sangjin Lee via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6318. timeline service schema creator fails if executed from a remote 
machine (Sangjin Lee via Varun Saxena)

(cherry picked from commit 68ec0d9b471356d3adef15d7826f57ee50f8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e6fdeb3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e6fdeb3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e6fdeb3

Branch: refs/heads/YARN-5355-branch-2
Commit: 7e6fdeb36e45355fccc0acfb41fd9b4628a4cb3d
Parents: 4846b0c
Author: Varun Saxena 
Authored: Tue Mar 14 02:05:01 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:45 2017 +0530

--
 .../storage/TimelineSchemaCreator.java  |  5 ++-
 .../common/HBaseTimelineStorageUtils.java   | 29 ++---
 .../common/TestHBaseTimelineStorageUtils.java   | 33 
 3 files changed, 54 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e6fdeb3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index dd87169..a4c1bbb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
@@ -67,8 +68,10 @@ public final class TimelineSchemaCreator {
 
   public static void main(String[] args) throws Exception {
 
+LOG.info("Starting the schema creation");
 Configuration hbaseConf =
-HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(null);
+HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(
+new YarnConfiguration());
 // Grab input args and allow for -Dxyz style arguments
 String[] otherArgs = new GenericOptionsParser(hbaseConf, args)
 .getRemainingArgs();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e6fdeb3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
index afe4d6a..865a70d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -17,6 +17,15 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -30,17 +39,12 @@ import 
org.a

[07/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
deleted file mode 100644
index 5bacf66..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-
-/**
- * The base class for reading timeline data from the HBase storage. This class
- * provides basic support to validate and augment reader context.
- */
-public abstract class AbstractTimelineStorageReader {
-
-  private final TimelineReaderContext context;
-  /**
-   * Used to look up the flow context.
-   */
-  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
-
-  public AbstractTimelineStorageReader(TimelineReaderContext ctxt) {
-context = ctxt;
-  }
-
-  protected TimelineReaderContext getContext() {
-return context;
-  }
-
-  /**
-   * Looks up flow context from AppToFlow table.
-   *
-   * @param appToFlowRowKey to identify Cluster and App Ids.
-   * @param clusterId the cluster id.
-   * @param hbaseConf HBase configuration.
-   * @param conn HBase Connection.
-   * @return flow context information.
-   * @throws IOException if any problem occurs while fetching flow information.
-   */
-  protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
-  String clusterId, Configuration hbaseConf, Connection conn)
-  throws IOException {
-byte[] rowKey = appToFlowRowKey.getRowKey();
-Get get = new Get(rowKey);
-Result result = appToFlowTable.getResult(hbaseConf, conn, get);
-if (result != null && !result.isEmpty()) {
-  Object flowName =
-  AppToFlowColumnPrefix.FLOW_NAME.readResult(result, clusterId);
-  Object flowRunId =
-  AppToFlowColumnPrefix.FLOW_RUN_ID.readResult(result, clusterId);
-  Object userId =
-  AppToFlowColumnPrefix.USER_ID.readResult(result, clusterId);
-  if (flowName == null || userId == null || flowRunId == null) {
-throw new NotFoundException(
-"Unable to find the context flow name, and flow run id, "
-+ "and user id for clusterId=" + clusterId
-+ ", appId=" + appToFlowRowKey.getAppId());
-  }
-  return new FlowContext((String)userId, (String)flowName,
-  ((Number)flowRunId).longValue());
-} else {
-  throw new NotFoundException(
-  "Unable to find the context flow name, and flow run id, "
-  + "and user id for clusterId=" + clusterId
-  + ", appId=" + appToFlowRowKey.getAppId());
-}
-  }
-
-  /**
-* Sets certain parameters to defaults if the values are not provided.
-*
-* @param hbaseConf HBase Configuration.
-* @param conn HBase Connection.
-* @throws IOE

[15/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[06/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
deleted file mode 100644
index f6904c5..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ /dev/null
@@ -1,628 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for generic entities that are stored in the entity
- * table.
- */
-class GenericEntityReader extends TimelineEntityReader {
-  private static final EntityTable ENTITY_TABLE = new EntityTable();
-
-  /**
-   * Used to convert strings key components to and from storage format.
-   */
-  private final KeyConverter stringKeyConverter =
-  new StringKeyConverter();
-
-  public GenericEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetri

[34/50] [abbrv] hadoop git commit: YARN-6256. Add FROM_ID info key for timeline entities in reader response (Rohith Sharma K S via Varun Saxena)

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a391f54f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index 1a518d0..4a9e53e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -74,14 +74,14 @@ final class TimelineReaderWebServicesUtils {
   static TimelineEntityFilters createTimelineEntityFilters(String limit,
   String createdTimeStart, String createdTimeEnd, String relatesTo,
   String isRelatedTo, String infofilters, String conffilters,
-  String metricfilters, String eventfilters, String fromidprefix,
+  String metricfilters, String eventfilters,
   String fromid) throws TimelineParseException {
 return new TimelineEntityFilters(parseLongStr(limit),
 parseLongStr(createdTimeStart), parseLongStr(createdTimeEnd),
 parseRelationFilters(relatesTo), parseRelationFilters(isRelatedTo),
 parseKVFilters(infofilters, false), parseKVFilters(conffilters, true),
 parseMetricFilters(metricfilters), parseEventFilters(eventfilters),
-parseLongStr(fromidprefix), parseStr(fromid));
+parseStr(fromid));
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: YARN-6069. CORS support in timeline v2 (Rohith Sharma K S via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6069. CORS support in timeline v2 (Rohith Sharma K S via Varun Saxena)

(cherry picked from commit ab192fd58358faff6880f3e0e867d7bdd4bb043a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70b788a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70b788a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70b788a3

Branch: refs/heads/YARN-5355-branch-2
Commit: 70b788a3190ccf4dd5626b9699ddba52473ef33c
Parents: 4aad668
Author: Varun Saxena 
Authored: Thu Feb 23 11:15:51 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:44 2017 +0530

--
 .../src/main/resources/yarn-default.xml  | 11 +++
 .../timelineservice/reader/TimelineReaderServer.java |  9 +
 .../src/site/markdown/TimelineServiceV2.md   |  9 +
 3 files changed, 29 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70b788a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 77f7914..4fd10ee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3099,5 +3099,16 @@
 yarn.app.attempt.diagnostics.limit.kc
 64
   
+  
+  
+
+  Flag to enable cross-origin (CORS) support for timeline service v1.x or
+  Timeline Reader in timeline service v2. For timeline service v2, also add
+  org.apache.hadoop.security.HttpCrossOriginFilterInitializer to the
+  configuration hadoop.http.filter.initializers in core-site.xml.
+
+yarn.timeline-service.http-cross-origin.enabled
+false
+  
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70b788a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index 8c5e72d..b45fd36 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
+import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -134,6 +135,14 @@ public class TimelineReaderServer extends CompositeService 
{
 YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
 WebAppUtils.getTimelineReaderWebAppURL(conf));
 LOG.info("Instantiating TimelineReaderWebApp at " + bindAddress);
+boolean enableCorsFilter = conf.getBoolean(
+YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED,
+YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT);
+// setup CORS
+if (enableCorsFilter) {
+  conf.setBoolean(HttpCrossOriginFilterInitializer.PREFIX
+  + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
+}
 try {
   HttpServer2.Builder builder = new HttpServer2.Builder()
 .setName("timeline")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70b788a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 7d36a4a..8b18474 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn

[23/50] [abbrv] hadoop git commit: YARN-6026. A couple of spelling errors in the docs. Contributed by Grant Sohn.

2017-04-26 Thread varunsaxena
YARN-6026. A couple of spelling errors in the docs. Contributed by Grant Sohn.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f155610f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f155610f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f155610f

Branch: refs/heads/YARN-5355-branch-2
Commit: f155610f6ffb32e5bbc6b189ec29759d9579ec12
Parents: e9f1682
Author: Naganarasimha 
Authored: Tue Dec 27 05:52:41 2016 +0530
Committer: Varun Saxena 
Committed: Tue Apr 25 23:14:28 2017 +0530

--
 .../hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f155610f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 3f12a59..182a5fe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -352,7 +352,7 @@ After creating the timeline client, user also needs to set 
the timeline collecto
 
 amRMClient.registerTimelineClient(timelineClient);
 
-Else address needs to be retreived from the AM allocate response and need to 
be set in timeline client explicitly.
+Else address needs to be retrieved from the AM allocate response and need to 
be set in timeline client explicitly.
 
 timelineClient.setTimelineServiceAddress(response.getCollectorAddr());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: YARN-5572. HBaseTimelineWriterImpl appears to reference a bad property name. Contributed by Varun Saxena.

2017-04-26 Thread varunsaxena
YARN-5572. HBaseTimelineWriterImpl appears to reference a bad property name. 
Contributed by Varun Saxena.

(cherry picked from commit c06114d6a360dddeb66c2dd9ad4fa5dae0cfbfb1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/688b34f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/688b34f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/688b34f8

Branch: refs/heads/YARN-5355-branch-2
Commit: 688b34f88e9ea8f3ab0e351ce64103c8f888e4af
Parents: da882ad
Author: Naganarasimha 
Authored: Sun Nov 27 23:35:53 2016 +0530
Committer: Varun Saxena 
Committed: Tue Apr 25 23:13:07 2017 +0530

--
 .../TestTimelineReaderWebServicesHBaseStorage.java|  2 +-
 .../timelineservice/storage/DataGeneratorForTest.java |  4 ++--
 .../storage/TestHBaseTimelineStorageApps.java |  8 
 .../storage/TestHBaseTimelineStorageEntities.java |  6 +++---
 .../storage/flow/TestHBaseStorageFlowActivity.java|  6 +++---
 .../storage/flow/TestHBaseStorageFlowRun.java | 14 +++---
 .../flow/TestHBaseStorageFlowRunCompaction.java   |  2 +-
 .../storage/HBaseTimelineWriterImpl.java  |  5 -
 8 files changed, 21 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/688b34f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index e97ea5b..6bbafe3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -334,7 +334,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 HBaseTimelineWriterImpl hbi = null;
 Configuration c1 = util.getConfiguration();
 try {
-  hbi = new HBaseTimelineWriterImpl(c1);
+  hbi = new HBaseTimelineWriterImpl();
   hbi.init(c1);
   hbi.write(cluster, user, flow, flowVersion, runid, entity.getId(), te);
   hbi.write(cluster, user, flow, flowVersion, runid, entity1.getId(), te1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/688b34f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
index b56a752..cafacab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
@@ -183,7 +183,7 @@ final class DataGeneratorForTest {
 te2.addEntity(entity2);
 HBaseTimelineWriterImpl hbi = null;
 try {
-  hbi = new HBaseTimelineWriterImpl(util.getConfiguration());
+  hbi = new HBaseTimelineWriterImpl();
   hbi.init(util.getConfiguration());
   hbi.start();
   String cluster = "cluster1";
@@ -401,7 +401,7 @@ final class DataGeneratorForTest {
 
 HBaseTimelineWriterImpl hbi = null;
 try {
-  hbi = new HBaseTimelineWriterImpl(util.getConfiguration());
+  hbi = new HBaseTimelineWriterImpl();
   hbi.init(util.getConfiguration());
   hbi.start();
   String cluster = "cluster1";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/688b34f8/hadoop-yar

[02/50] [abbrv] hadoop git commit: YARN-5925. Extract hbase-backend-exclusive utility methods from TimelineStorageUtil. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
YARN-5925. Extract hbase-backend-exclusive utility methods from 
TimelineStorageUtil. Contributed by Haibo Chen.

(cherry picked from commit 8288030cb4aa3b5a9425cc0a3f6df03a3eae1dfb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8c9f368
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8c9f368
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8c9f368

Branch: refs/heads/YARN-5355-branch-2
Commit: d8c9f3685bcbbd5b8a051861a81505a8723426e4
Parents: 148803e
Author: Sangjin Lee 
Authored: Fri Dec 9 16:30:49 2016 -0800
Committer: Varun Saxena 
Committed: Tue Apr 25 23:13:07 2017 +0530

--
 ...stTimelineReaderWebServicesHBaseStorage.java |   6 +-
 .../flow/TestHBaseStorageFlowActivity.java  |  12 +-
 .../flow/TestHBaseStorageFlowRunCompaction.java |  44 +--
 .../storage/HBaseTimelineReaderImpl.java|   4 +-
 .../storage/HBaseTimelineWriterImpl.java|   4 +-
 .../storage/TimelineSchemaCreator.java  |   4 +-
 .../storage/common/AppIdKeyConverter.java   |   5 +-
 .../common/HBaseTimelineStorageUtils.java   | 306 +++
 .../storage/common/LongConverter.java   |   2 +-
 .../storage/common/TimelineStorageUtils.java| 265 
 .../storage/flow/FlowActivityColumnPrefix.java  |  10 +-
 .../storage/flow/FlowActivityRowKey.java|   4 +-
 .../storage/flow/FlowRunColumn.java |   6 +-
 .../storage/flow/FlowRunColumnPrefix.java   |   6 +-
 .../storage/flow/FlowRunCoprocessor.java|   4 +-
 .../storage/flow/FlowScanner.java   |  13 +-
 .../storage/reader/EntityTypeReader.java|   6 +-
 .../storage/common/TestRowKeys.java |   2 +-
 18 files changed, 374 insertions(+), 329 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8c9f368/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 6bbafe3..a83d2dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -78,7 +78,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   private static HBaseTestingUtility util;
   private static long ts = System.currentTimeMillis();
   private static long dayTs =
-  TimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+  HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
 
   @BeforeClass
   public static void setup() throws Exception {
@@ -962,7 +962,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   assertEquals(1, entities.size());
 
   long firstFlowActivity =
-  TimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
+  HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
 
   DateFormat fmt = TimelineReaderWebServices.DATE_FORMAT.get();
   uri = URI.create("http://localhost:"; + serverPort + "/ws/v2/" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8c9f368/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
--

[37/50] [abbrv] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-04-26 Thread varunsaxena
YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate 
classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

(cherry picked from commit 73235ab30361b41293846189f3c5fef321ae7cac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7eed958
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7eed958
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7eed958

Branch: refs/heads/YARN-5355-branch-2
Commit: e7eed958f7d88cdcb5614d6f0d9f6d1cf1d4a43f
Parents: 673ab90
Author: Sangjin Lee 
Authored: Thu Feb 16 18:43:31 2017 -0800
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:44 2017 +0530

--
 .../jobhistory/JobHistoryEventHandler.java  |  57 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  14 +-
 .../v2/app/rm/RMContainerAllocator.java |   4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   8 +-
 .../distributedshell/ApplicationMaster.java | 113 +--
 .../hadoop/yarn/client/api/AMRMClient.java  |  38 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  19 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   5 +-
 .../yarn/client/api/impl/YarnClientImpl.java|  15 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |  94 +--
 .../yarn/client/api/TimelineV2Client.java   |  92 +++
 .../client/api/impl/TimelineClientImpl.java | 825 ++-
 .../yarn/client/api/impl/TimelineConnector.java | 442 ++
 .../client/api/impl/TimelineV2ClientImpl.java   | 459 +++
 .../client/api/impl/TestTimelineClient.java |  39 +-
 .../api/impl/TestTimelineClientV2Impl.java  |   4 +-
 .../timelineservice/NMTimelinePublisher.java|  22 +-
 .../TestNMTimelinePublisher.java|  10 +-
 .../TestTimelineServiceClientIntegration.java   |  10 +-
 19 files changed, 1278 insertions(+), 992 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7eed958/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 1cf7182..5edd189 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -72,13 +72,12 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.TimelineServiceHelper;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.node.JsonNodeFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
@@ -91,8 +90,6 @@ import com.sun.jersey.api.client.ClientHandlerException;
  */
 public class JobHistoryEventHandler extends AbstractService
 implements EventHandler {
-  private static final JsonNodeFactory FACTORY =
-  new ObjectMapper().getNodeFactory();
 
   private final AppContext context;
   private final int startCount;
@@ -134,9 +131,10 @@ public class JobHistoryEventHandler extends AbstractService
   // should job completion be force when the AM shuts down?
   protected volatile boolean forceJobCompletion = false;
 
+  @VisibleForTesting
   protected TimelineClient timelineClient;
-
-  private boolean timelineServiceV2Enabled = false;
+  @VisibleForTesting
+  protected TimelineV2Client timelineV2Client;
 
   private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB";
   private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK";
@@ -269,12 +267,17 @@ public class JobHistoryEventHandler extends 
AbstractService
 MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
   LOG.info("Emitting job history data to the timeline service is enabled")

[35/50] [abbrv] hadoop git commit: YARN-6256. Add FROM_ID info key for timeline entities in reader response (Rohith Sharma K S via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6256. Add FROM_ID info key for timeline entities in reader response 
(Rohith Sharma K S via Varun Saxena)

(cherry picked from commit 5d9ad152a1082a7c9b8edaf57a88ae471a537599)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a391f54f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a391f54f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a391f54f

Branch: refs/heads/YARN-5355-branch-2
Commit: a391f54f7729c92d06d6c37370035d4442ce59c7
Parents: 6c993a3
Author: Varun Saxena 
Authored: Tue Mar 7 23:54:38 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:44 2017 +0530

--
 ...stTimelineReaderWebServicesHBaseStorage.java |  33 +--
 .../storage/TestHBaseTimelineStorageApps.java   |  19 +-
 .../TestHBaseTimelineStorageEntities.java   |  21 +-
 .../storage/application/ApplicationRowKey.java  |  49 +++-
 .../storage/entity/EntityRowKey.java|  56 -
 .../storage/flow/FlowRunRowKey.java |  47 +++-
 .../storage/reader/ApplicationEntityReader.java |  28 ++-
 .../storage/reader/FlowRunEntityReader.java |  32 +--
 .../storage/reader/GenericEntityReader.java |  25 +-
 .../storage/common/TestRowKeys.java |  21 --
 .../storage/common/TestRowKeysAsString.java | 115 ++
 .../reader/TimelineEntityFilters.java   |  29 +--
 .../reader/TimelineReaderWebServices.java   | 227 +++
 .../reader/TimelineReaderWebServicesUtils.java  |   4 +-
 14 files changed, 445 insertions(+), 261 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a391f54f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 6386183..6e416c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -816,7 +816,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
   assertEquals(2, entities1.size());
   for (TimelineEntity entity : entities1) {
 assertNotNull(entity.getInfo());
-assertEquals(1, entity.getInfo().size());
+assertEquals(2, entity.getInfo().size());
 String uid =
 (String) entity.getInfo().get(TimelineReaderManager.UID_KEY);
 assertNotNull(uid);
@@ -844,7 +844,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
   assertEquals(2, entities2.size());
   for (TimelineEntity entity : entities2) {
 assertNotNull(entity.getInfo());
-assertEquals(1, entity.getInfo().size());
+assertEquals(2, entity.getInfo().size());
 String uid =
 (String) entity.getInfo().get(TimelineReaderManager.UID_KEY);
 assertNotNull(uid);
@@ -1408,8 +1408,9 @@ public class TestTimelineReaderWebServicesHBaseStorage
 infoCnt += entity.getInfo().size();
 assertTrue(entity.getId().equals("entity2"));
   }
-  // Includes UID in info field even if fields not specified as INFO.
-  assertEquals(1, infoCnt);
+  // Includes UID and FROM_ID in info field even if fields not specified as
+  // INFO.
+  assertEquals(2, infoCnt);
 
   // infofilters=(info1 eq cluster1 AND info4 eq 35000) OR
   // (info1 eq cluster2 AND info2 eq 2.0)
@@ -1427,8 +1428,8 @@ public class TestTimelineReaderWebServicesHBaseStorage
 infoCnt += entity.getInfo().size();
 assertTrue(entity.getId().equals("entity2"));
   }
-  // Includes UID in info field.
-  assertEquals(4, infoCnt);
+  // Includes UID and FROM_ID in info field.
+  assertEquals(5, infoCnt);
 
   // Test for behavior when compare op is ne(not equals) vs ene
   // (exists and not equals). info3 does not exist for entity2. For ne,
@@ -2159,8 +2160,8 @@ public class TestTimelineReaderWebServicesHBaseStorage
   // verify for entity-10 to entity-7 in

[12/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
new file mode 100644
index 000..7d37206
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class TestSeparator {
+
+  private static String villain = "Dr. Heinz Doofenshmirtz";
+  private static String special =
+  ".   *   |   ?   +   \t   (   )   [   ]   {   }   ^   $  \\ \"  %";
+
+  /**
+   *
+   */
+  @Test
+  public void testEncodeDecodeString() {
+
+for (Separator separator : Separator.values()) {
+  testEncodeDecode(separator, "");
+  testEncodeDecode(separator, " ");
+  testEncodeDecode(separator, "!");
+  testEncodeDecode(separator, "?");
+  testEncodeDecode(separator, "&");
+  testEncodeDecode(separator, "+");
+  testEncodeDecode(separator, "\t");
+  testEncodeDecode(separator, "Dr.");
+  testEncodeDecode(separator, "Heinz");
+  testEncodeDecode(separator, "Doofenshmirtz");
+  testEncodeDecode(separator, villain);
+  testEncodeDecode(separator, special);
+
+  assertNull(separator.encode(null));
+
+}
+  }
+
+  private void testEncodeDecode(Separator separator, String token) {
+String encoded = separator.encode(token);
+String decoded = separator.decode(encoded);
+String msg = "token:" + token + " separator:" + separator + ".";
+assertEquals(msg, token, decoded);
+  }
+
+  @Test
+  public void testEncodeDecode() {
+testEncodeDecode("Dr.", Separator.QUALIFIERS);
+testEncodeDecode("Heinz", Separator.QUALIFIERS, Separator.QUALIFIERS);
+testEncodeDecode("Doofenshmirtz", Separator.QUALIFIERS, null,
+Separator.QUALIFIERS);
+testEncodeDecode("&Perry", Separator.QUALIFIERS, Separator.VALUES, null);
+testEncodeDecode("the ", Separator.QUALIFIERS, Separator.SPACE);
+testEncodeDecode("Platypus...", (Separator) null);
+testEncodeDecode("The what now ?!?", Separator.QUALIFIERS,
+Separator.VALUES, Separator.SPACE);
+
+  }
+  @Test
+  public void testEncodedValues() {
+testEncodeDecode("Double-escape %2$ and %9$ or %%2$ or %%3$, nor  %%%2$" +
+"= no problem!",
+Separator.QUALIFIERS, Separator.VALUES, Separator.SPACE, 
Separator.TAB);
+  }
+
+  @Test
+  public void testSplits() {
+byte[] maxLongBytes = Bytes.toBytes(Long.MAX_VALUE);
+byte[] maxIntBytes = Bytes.toBytes(Integer.MAX_VALUE);
+for (Separator separator : Separator.values()) {
+  String str1 = "cl" + separator.getValue() + "us";
+  String str2 = separator.getValue() + "rst";
+  byte[] sepByteArr = Bytes.toBytes(separator.getValue());
+  byte[] longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
+  sepByteArr.length, Bytes.SIZEOF_LONG - sepByteArr.length));
+  byte[] intVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxIntBytes,
+  sepByteArr.length, Bytes.SIZEOF_INT - sepByteArr.length));
+  byte[] arr = separator.join(
+  Bytes.toBytes(s

[33/50] [abbrv] hadoop git commit: YARN-6027 addendum. Fixed the broken build for YARN-5355-branch-2. Contributed by Sangjin Lee.

2017-04-26 Thread varunsaxena
YARN-6027 addendum. Fixed the broken build for YARN-5355-branch-2. Contributed 
by Sangjin Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c993a39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c993a39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c993a39

Branch: refs/heads/YARN-5355-branch-2
Commit: 6c993a39cbcb1775e47028c9ba0d1291a18c5766
Parents: 1862a03
Author: Sangjin Lee 
Authored: Wed Mar 1 15:29:17 2017 -0800
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:44 2017 +0530

--
 .../AbstractTimelineReaderHBaseTestBase.java| 22 +---
 1 file changed, 5 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c993a39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
index 7853c94..ccfdfd0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
@@ -86,19 +86,7 @@ public abstract class AbstractTimelineReaderHBaseTestBase {
   "org.apache.hadoop.yarn.server.timelineservice.storage."
   + "HBaseTimelineReaderImpl");
   config.setInt("hfile.format.version", 3);
-  server = new TimelineReaderServer() {
-@Override
-protected void setupOptions(Configuration conf) {
-  // The parent code tries to use HttpServer2 from this version of
-  // Hadoop, but the tests are loading in HttpServer2 from
-  // ${hbase-compatible-hadoop.version}. This version uses Jetty 9
-  // while ${hbase-compatible-hadoop.version} uses Jetty 6, and there
-  // are many differences, including classnames and packages.
-  // We do nothing here, so that we don't cause a NoSuchMethodError.
-  // Once ${hbase-compatible-hadoop.version} is changed to Hadoop 3,
-  // we should be able to remove this @Override.
-}
-  };
+  server = new TimelineReaderServer();
   server.init(config);
   server.start();
   serverPort = server.getWebServerPort();
@@ -119,11 +107,11 @@ public abstract class AbstractTimelineReaderHBaseTestBase 
{
 ClientResponse resp =
 client.resource(uri).accept(MediaType.APPLICATION_JSON)
 .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-if (resp == null || resp.getStatusInfo()
-.getStatusCode() != ClientResponse.Status.OK.getStatusCode()) {
+if (resp == null ||
+resp.getClientResponseStatus() != ClientResponse.Status.OK) {
   String msg = "";
   if (resp != null) {
-msg = String.valueOf(resp.getStatusInfo().getStatusCode());
+msg = String.valueOf(resp.getClientResponseStatus());
   }
   throw new IOException(
   "Incorrect response from timeline reader. " + "Status=" + msg);
@@ -137,7 +125,7 @@ public abstract class AbstractTimelineReaderHBaseTestBase {
 .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
 assertNotNull(resp);
 assertTrue("Response from server should have been " + status,
-resp.getStatusInfo().getStatusCode() == status.getStatusCode());
+resp.getClientResponseStatus() == status);
 System.out.println("Response is: " + resp.getEntity(String.class));
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: YARN-6342. Make TimelineV2Client's drain timeout after stop configurable (Haibo Chen via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6342. Make TimelineV2Client's drain timeout after stop configurable (Haibo 
Chen via Varun Saxena)

(cherry picked from commit 7c2bc444b3d6750aafeed9b530c8e5b1bf95c1f4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/739bf97a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/739bf97a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/739bf97a

Branch: refs/heads/YARN-5355-branch-2
Commit: 739bf97ac3e1c7bdfd8363dc1ca9579962d42c92
Parents: 53c89d9
Author: Varun Saxena 
Authored: Fri Mar 31 02:02:57 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:45 2017 +0530

--
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java| 10 ++
 .../hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java |  9 ++---
 .../src/main/resources/yarn-default.xml   |  9 +
 3 files changed, 25 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/739bf97a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ab5bbb2..347c8fd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2110,6 +2110,16 @@ public class YarnConfiguration extends Configuration {
 
   public static final int DEFAULT_NUMBER_OF_ASYNC_ENTITIES_TO_MERGE = 10;
 
+
+  /**
+   * The time period for which timeline v2 client will wait for draining
+   * leftover entities after stop.
+   */
+  public static final String TIMELINE_V2_CLIENT_DRAIN_TIME_MILLIS =
+  TIMELINE_SERVICE_CLIENT_PREFIX + "drain-entities.timeout.ms";
+  public static final long DEFAULT_TIMELINE_V2_CLIENT_DRAIN_TIME_MILLIS
+  = 2000L;
+
   // mark app-history related configs @Private as application history is going
   // to be integrated into the timeline service
   @Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/739bf97a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
index 848e238..e0e4f00 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
@@ -289,7 +289,7 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
  * Time period for which the timelineclient will wait for draining after
  * stop.
  */
-private static final long DRAIN_TIME_PERIOD = 2000L;
+private final long drainTimeoutPeriod;
 
 private int numberOfAsyncsToMerge;
 private final BlockingQueue timelineEntityQueue;
@@ -300,6 +300,9 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
   numberOfAsyncsToMerge =
   conf.getInt(YarnConfiguration.NUMBER_OF_ASYNC_ENTITIES_TO_MERGE,
   YarnConfiguration.DEFAULT_NUMBER_OF_ASYNC_ENTITIES_TO_MERGE);
+  drainTimeoutPeriod = conf.getLong(
+  YarnConfiguration.TIMELINE_V2_CLIENT_DRAIN_TIME_MILLIS,
+  YarnConfiguration.DEFAULT_TIMELINE_V2_CLIENT_DRAIN_TIME_MILLIS);
 }
 
 Runnable createRunnable() {
@@ -330,7 +333,7 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
 // Try to drain the remaining entities to be published @ the max 
for
 // 2 seconds
 long timeTillweDrain =
-System.currentTimeMillis() + DRAIN_TIME_PERIOD;
+System.currentTimeMillis() + drainTimeoutPeriod;
 while (!timelineEntityQueue.isEmpty()) {
   publishWithoutBlockingOnQueue(timelineEntityQueue.poll());
   if (System.currentTimeMillis() > timeTillweDrain) {
@@ -449,7 +452,7 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
   LOG.info("Stopping TimelineClient.");
   executor.shutdownNow();
   try {
-executor.awaitTermination(DRAIN_TIME_PERIOD, Time

[49/50] [abbrv] hadoop git commit: Addendum for YARN-4218. Metric for resource*time that was preempted.

2017-04-26 Thread varunsaxena
Addendum for YARN-4218. Metric for resource*time that was preempted.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39b383aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39b383aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39b383aa

Branch: refs/heads/YARN-5355-branch-2
Commit: 39b383aaffb4509fe51300b6339dc6d9a8a25d3c
Parents: 4deb89d
Author: Varun Saxena 
Authored: Wed Apr 26 11:29:12 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 11:29:12 2017 +0530

--
 .../resourcemanager/metrics/TimelineServiceV1Publisher.java| 4 
 .../resourcemanager/metrics/TimelineServiceV2Publisher.java| 6 ++
 .../metrics/TestSystemMetricsPublisherForV2.java   | 2 +-
 3 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39b383aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
index cbf6a73..89a8616 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
@@ -144,6 +144,10 @@ public class TimelineServiceV1Publisher extends 
AbstractSystemMetricsPublisher {
 appMetrics.getVcoreSeconds());
 entity.addOtherInfo(ApplicationMetricsConstants.APP_MEM_METRICS,
 appMetrics.getMemorySeconds());
+entity.addOtherInfo(ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS,
+appMetrics.getPreemptedMemorySeconds());
+entity.addOtherInfo(ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS,
+appMetrics.getPreemptedVcoreSeconds());
 tEvent.setEventInfo(eventInfo);
 
 entity.addEvent(tEvent);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39b383aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 1fc0c38..495c36c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -196,6 +196,12 @@ public class TimelineServiceV2Publisher extends 
AbstractSystemMetricsPublisher {
 ApplicationMetricsConstants.APP_MEM_METRICS, timestamp,
 appMetrics.getMemorySeconds()));
 entityMetrics.add(getTimelineMetric(
+ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS, timestamp,
+appMetrics.getPreemptedMemorySeconds()));
+entityMetrics.add(getTimelineMetric(
+ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS, timestamp,
+appMetrics.getPreemptedVcoreSeconds()));
+entityMetrics.add(getTimelineMetric(
 ApplicationMetricsConstants.APP_RESOURCE_PREEMPTED_CPU, timestamp,
 appMetrics.getResourcePreempted().getVirtualCores()));
 entityMetrics.add(getTimelineMetric(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39b383aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/

[40/50] [abbrv] hadoop git commit: YARN-6357. Implement putEntitiesAsync API in TimelineCollector (Haibo Chen via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6357. Implement putEntitiesAsync API in TimelineCollector (Haibo Chen via 
Varun Saxena)

(cherry picked from commit 063b513b1c10987461caab3d26c8543c6e657bf7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53c89d97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53c89d97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53c89d97

Branch: refs/heads/YARN-5355-branch-2
Commit: 53c89d9760627e7a607c16407f06bf1e2e924c45
Parents: e59486a
Author: Varun Saxena 
Authored: Wed Mar 29 03:48:03 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:45 2017 +0530

--
 .../collector/TimelineCollector.java| 31 --
 .../collector/TimelineCollectorWebService.java  | 12 ++--
 .../collector/TestTimelineCollector.java| 63 
 3 files changed, 96 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c89d97/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
index 2fc3033..353066b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
@@ -133,19 +133,35 @@ public abstract class TimelineCollector extends 
CompositeService {
   public TimelineWriteResponse putEntities(TimelineEntities entities,
   UserGroupInformation callerUgi) throws IOException {
 if (LOG.isDebugEnabled()) {
-  LOG.debug("SUCCESS - TIMELINE V2 PROTOTYPE");
   LOG.debug("putEntities(entities=" + entities + ", callerUgi="
   + callerUgi + ")");
 }
-TimelineCollectorContext context = getTimelineEntityContext();
 
+TimelineWriteResponse response = writeTimelineEntities(entities);
+flushBufferedTimelineEntities();
+
+return response;
+  }
+
+  private TimelineWriteResponse writeTimelineEntities(
+  TimelineEntities entities) throws IOException {
 // Update application metrics for aggregation
 updateAggregateStatus(entities, aggregationGroups,
 getEntityTypesSkipAggregation());
 
+final TimelineCollectorContext context = getTimelineEntityContext();
 return writer.write(context.getClusterId(), context.getUserId(),
-context.getFlowName(), context.getFlowVersion(), 
context.getFlowRunId(),
-context.getAppId(), entities);
+context.getFlowName(), context.getFlowVersion(),
+context.getFlowRunId(), context.getAppId(), entities);
+  }
+
+  /**
+   * Flush buffered timeline entities, if any.
+   * @throws IOException if there is any exception encountered while
+   *  flushing buffered entities.
+   */
+  private void flushBufferedTimelineEntities() throws IOException {
+writer.flush();
   }
 
   /**
@@ -158,14 +174,17 @@ public abstract class TimelineCollector extends 
CompositeService {
*
* @param entities entities to post
* @param callerUgi the caller UGI
+   * @throws IOException if there is any exception encounted while putting
+   * entities.
*/
   public void putEntitiesAsync(TimelineEntities entities,
-  UserGroupInformation callerUgi) {
-// TODO implement
+  UserGroupInformation callerUgi) throws IOException {
 if (LOG.isDebugEnabled()) {
   LOG.debug("putEntitiesAsync(entities=" + entities + ", callerUgi=" +
   callerUgi + ")");
 }
+
+writeTimelineEntities(entities);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c89d97/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apac

[48/50] [abbrv] hadoop git commit: YARN-6146. Add Builder methods for TimelineEntityFilters (Haibo Chen via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6146. Add Builder methods for TimelineEntityFilters (Haibo Chen via Varun 
Saxena)

(cherry picked from commit 7f09cd1d5ee70e0bdf24cdecd78cd3bc258a40f8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e59486a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e59486a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e59486a3

Branch: refs/heads/YARN-5355-branch-2
Commit: e59486a376e41757350b9d387026a537fb01b52c
Parents: 7e6fdeb
Author: Varun Saxena 
Authored: Thu Mar 23 14:35:37 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:45 2017 +0530

--
 .../storage/TestHBaseTimelineStorageApps.java   | 190 -
 .../TestHBaseTimelineStorageEntities.java   | 202 +--
 .../flow/TestHBaseStorageFlowActivity.java  |   9 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  48 +++--
 .../storage/reader/TimelineEntityReader.java|   2 +-
 .../reader/TimelineEntityFilters.java   | 160 ---
 .../reader/TimelineReaderWebServices.java   |   6 +-
 .../reader/TimelineReaderWebServicesUtils.java  |  44 +++-
 .../TestFileSystemTimelineReaderImpl.java   | 100 +
 9 files changed, 381 insertions(+), 380 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e59486a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index 6bb8de1..321b72e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -658,7 +658,7 @@ public class TestHBaseTimelineStorageApps {
 new TimelineReaderContext("cluster1", "user1", "some_flow_name",
 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
 null),
-new TimelineEntityFilters(),
+new TimelineEntityFilters.Builder().build(),
 new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
 assertEquals(3, entities.size());
 int cfgCnt = 0;
@@ -695,8 +695,8 @@ public class TestHBaseTimelineStorageApps {
 new TimelineReaderContext("cluster1", "user1", "some_flow_name",
 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
 null),
-new TimelineEntityFilters(null, 1425016502000L, 1425016502040L, null,
-null, null, null, null, null),
+new TimelineEntityFilters.Builder().createdTimeBegin(1425016502000L)
+.createTimeEnd(1425016502040L).build(),
 new TimelineDataToRetrieve());
 assertEquals(3, entities.size());
 for (TimelineEntity entity : entities) {
@@ -712,8 +712,8 @@ public class TestHBaseTimelineStorageApps {
 new TimelineReaderContext("cluster1", "user1", "some_flow_name",
 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
 null),
-new TimelineEntityFilters(null, 1425016502015L, null, null, null, null,
-null, null, null),
+new TimelineEntityFilters.Builder().createdTimeBegin(1425016502015L)
+.build(),
 new TimelineDataToRetrieve());
 assertEquals(2, entities.size());
 for (TimelineEntity entity : entities) {
@@ -727,8 +727,8 @@ public class TestHBaseTimelineStorageApps {
 new TimelineReaderContext("cluster1", "user1", "some_flow_name",
 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
 null),
-new TimelineEntityFilters(null, null, 1425016502015L, null, null, null,
-null, null, null),
+new TimelineEntityFilters.Builder().createTimeEnd(1425016502015L)
+.build(),
 new TimelineDataToRetrieve());
 assertEquals(1, entities.size());
 for (TimelineEntity entity : entities) {
@@ -755,7 +755,7 @@ public class TestHBaseTimelineStorageApps {
 new TimelineReaderContext("cluster1", "user1", "some_flow_name",
 1002345678919L, nul

[42/50] [abbrv] hadoop git commit: YARN-6237. Move UID constant to TimelineReaderUtils (Rohith Sharma K S via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6237. Move UID constant to TimelineReaderUtils (Rohith Sharma K S via 
Varun Saxena)

(cherry picked from commit ec32c4265bbf436ae61b6efc71ca33b16b70a23d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4846b0cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4846b0cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4846b0cc

Branch: refs/heads/YARN-5355-branch-2
Commit: 4846b0cc579de797f98b23e0dfe353b93f6bbd17
Parents: a391f54
Author: Varun Saxena 
Authored: Thu Mar 9 01:06:54 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:45 2017 +0530

--
 .../TestTimelineReaderWebServicesHBaseStorage.java  | 12 ++--
 .../timelineservice/reader/TimelineReaderManager.java   | 12 
 .../timelineservice/reader/TimelineReaderUtils.java |  3 +++
 .../reader/TestTimelineReaderWebServices.java   |  4 ++--
 4 files changed, 15 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4846b0cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 6e416c8..6836cc1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -648,7 +648,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
   List listFlowUIDs = new ArrayList();
   for (FlowActivityEntity entity : flowEntities) {
 String flowUID =
-(String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+(String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
 listFlowUIDs.add(flowUID);
 assertEquals(TimelineUIDConverter.FLOW_UID.encodeUID(
 new TimelineReaderContext(entity.getCluster(), entity.getUser(),
@@ -672,7 +672,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
 assertNotNull(frEntities);
 for (FlowRunEntity entity : frEntities) {
   String flowRunUID =
-  (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+  (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
   listFlowRunUIDs.add(flowRunUID);
   assertEquals(TimelineUIDConverter.FLOWRUN_UID.encodeUID(
   new TimelineReaderContext("cluster1", entity.getUser(),
@@ -704,7 +704,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
 assertNotNull(appEntities);
 for (TimelineEntity entity : appEntities) {
   String appUID =
-  (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+  (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
   listAppUIDs.add(appUID);
   assertEquals(TimelineUIDConverter.APPLICATION_UID.encodeUID(
   new TimelineReaderContext(context.getClusterId(),
@@ -737,7 +737,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
 assertNotNull(entities);
 for (TimelineEntity entity : entities) {
   String entityUID =
-  (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+  (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
   listEntityUIDs.add(entityUID);
   assertEquals(TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(
   new TimelineReaderContext(context.getClusterId(),
@@ -818,7 +818,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
 assertNotNull(entity.getInfo());
 assertEquals(2, entity.getInfo().size());
 String uid =
-(String) entity.getInfo().get(TimelineReaderManager.UID_KEY);
+(String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
 assertNotNull(uid);
 assertTrue(uid.equals(appUIDWithFlowInfo + "!type1!0!entity1")
 || uid.equals(appUIDWithFlowInfo + "!type1!0!entity2"));
@@ -846,7

[50/50] [abbrv] hadoop git commit: Addendum for YARN-5865. Retrospect updateApplicationPriority api to handle state store exception in align with YARN-5611

2017-04-26 Thread varunsaxena
Addendum for YARN-5865. Retrospect updateApplicationPriority api to handle 
state store exception in align with YARN-5611


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f3e926b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f3e926b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f3e926b

Branch: refs/heads/YARN-5355-branch-2
Commit: 3f3e926ba97532d78778b4abf61654a6e4fd48f1
Parents: 39b383a
Author: Varun Saxena 
Authored: Wed Apr 26 15:43:32 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 15:43:32 2017 +0530

--
 .../server/resourcemanager/metrics/TimelineServiceV1Publisher.java | 2 +-
 .../server/resourcemanager/metrics/TimelineServiceV2Publisher.java | 2 +-
 .../resourcemanager/metrics/TestSystemMetricsPublisherForV2.java   | 1 +
 3 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f3e926b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
index 89a8616..4c371a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
@@ -88,7 +88,7 @@ public class TimelineServiceV1Publisher extends 
AbstractSystemMetricsPublisher {
 ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO,
 app.getApplicationSubmissionContext().getUnmanagedAM());
 entityInfo.put(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO,
-app.getApplicationSubmissionContext().getPriority().getPriority());
+app.getApplicationPriority().getPriority());
 entityInfo.put(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION,
 app.getAmNodeLabelExpression());
 entityInfo.put(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f3e926b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 495c36c..f3b495b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -118,7 +118,7 @@ public class TimelineServiceV2Publisher extends 
AbstractSystemMetricsPublisher {
 ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO,
 app.getApplicationSubmissionContext().getUnmanagedAM());
 entityInfo.put(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO,
-app.getApplicationSubmissionContext().getPriority().getPriority());
+app.getApplicationPriority().getPriority());
 entity.getConfigs().put(
 ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION,
 app.getAmNodeLabelExpression());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f3e926b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestS

[31/50] [abbrv] hadoop git commit: YARN-6027. Support fromid(offset) filter for /flows API (Rohith Sharma K S via Varun Saxena)

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1862a030/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index b45fd36..cf0fa50 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -175,7 +175,7 @@ public class TimelineReaderServer extends CompositeService {
   }
 
   @VisibleForTesting
-  int getWebServerPort() {
+  public int getWebServerPort() {
 return readerWebServer.getConnectorAddress(0).getPort();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1862a030/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
index c93c631..8f92433 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
@@ -24,14 +24,30 @@ import java.util.List;
 
 import org.apache.commons.lang.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Set of utility methods to be used across timeline reader.
  */
-final class TimelineReaderUtils {
+public final class TimelineReaderUtils {
   private TimelineReaderUtils() {
   }
 
   /**
+   * Default delimiter for joining strings.
+   */
+  @VisibleForTesting
+  public static final char DEFAULT_DELIMITER_CHAR = '!';
+
+  /**
+   * Default escape character used for joining strings.
+   */
+  @VisibleForTesting
+  public static final char DEFAULT_ESCAPE_CHAR = '*';
+
+  public static final String FROMID_KEY = "FROM_ID";
+
+  /**
* Split the passed string along the passed delimiter character while looking
* for escape char to interpret the splitted parts correctly. For delimiter 
or
* escape character to be interpreted as part of the string, they have to be
@@ -168,4 +184,14 @@ final class TimelineReaderUtils {
 // Join the strings after they have been escaped.
 return StringUtils.join(strs, delimiterChar);
   }
+
+  public static List split(final String str)
+  throws IllegalArgumentException {
+return split(str, DEFAULT_DELIMITER_CHAR, DEFAULT_ESCAPE_CHAR);
+  }
+
+  public static String joinAndEscapeStrings(final String[] strs) {
+return joinAndEscapeStrings(strs, DEFAULT_DELIMITER_CHAR,
+DEFAULT_ESCAPE_CHAR);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1862a030/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index df3ccab..1f82d91 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -1333,6 +1333,10 @@ public 

[19/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.

(cherry picked from commit b92089c0e8ab1b87b8b5b55b1e3d4367ae5d847a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/673ab905
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/673ab905
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/673ab905

Branch: refs/heads/YARN-5355-branch-2
Commit: 673ab905cf6c5a326449f8d5a0565667e4018398
Parents: ac13f7c
Author: Sangjin Lee 
Authored: Thu Jan 19 21:21:48 2017 -0800
Committer: Varun Saxena 
Committed: Tue Apr 25 23:14:28 2017 +0530

--
 hadoop-project/pom.xml  |  13 +-
 .../TestRMHATimelineCollectors.java |   6 +
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 190 +
 .../reader/filter/TimelineFilterUtils.java  | 307 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  96 +++
 .../storage/HBaseTimelineWriterImpl.java| 542 ++
 .../storage/TimelineSchemaCreator.java  | 251 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../apptoflow/AppToFlowColumnPrefix.java| 206 ++
 .../storage/apptoflow/AppToFlowRowKey.java  |  58 ++
 .../storage/apptoflow/AppToFlowTable.java   | 124 
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 167 +
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 389 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 306 
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 249 +++
 .../storage/entity/EntityRowKeyPrefix.java  |  77 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 274 +++
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 150 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOp

[10/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
deleted file mode 100644
index 89aa013..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Used to represent a partially qualified column, where the actual column name
- * will be composed of a prefix and the remainder of the column qualifier. The
- * prefix can be null, in which case the column qualifier will be completely
- * determined when the values are stored.
- */
-public interface ColumnPrefix {
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  byte[] qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  String qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Get the latest version of this specified column. Note: this call clones 
the
-   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
-   *
-   * @param result Cannot be null
-   * @param qualifier column qualifier. Nothing gets read when null.
-   * @return result object (can be cas

[04/50] [abbrv] hadoop git commit: YARN-5433. Audit dependencies for Category-X. Contributed by Sangjin Lee.

2017-04-26 Thread varunsaxena
YARN-5433. Audit dependencies for Category-X. Contributed by Sangjin Lee.

(cherry picked from commit f511cc89b66997e496f630bdd299d3068d43fd31)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da882ad7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da882ad7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da882ad7

Branch: refs/heads/YARN-5355-branch-2
Commit: da882ad797002fd30ba3b9c92462d9000546cb22
Parents: f673251
Author: Sangjin Lee 
Authored: Wed Oct 26 11:31:00 2016 -0700
Committer: Varun Saxena 
Committed: Tue Apr 25 23:13:07 2017 +0530

--
 LICENSE.txt  | 8 
 .../hadoop-yarn-server-timelineservice-hbase-tests/pom.xml   | 8 
 .../hadoop-yarn-server-timelineservice/pom.xml   | 4 
 3 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da882ad7/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 2cad31e..1e506aa 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -519,6 +519,8 @@ Mockito 1.8.5
 JUL to SLF4J bridge 1.7.25
 SLF4J API Module 1.7.25
 SLF4J LOG4J-12 Binding 1.7.25
+JCodings 1.0.8
+Joni 2.1.2
 

 
 The MIT License (MIT)
@@ -1574,6 +1576,12 @@ JLine 0.9.94
 leveldbjni-all 1.8
 Hamcrest Core 1.3
 xmlenc Library 0.52
+StringTemplate 4 4.0.7
+ANTLR 3 Tool 3.5
+ANTLR 3 Runtime 3.5
+ANTLR StringTemplate 3.2.1
+ASM All 5.0.2
+sqlline 1.1.8
 

 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da882ad7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index a016985..c627112 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -325,6 +325,10 @@
   org.apache.hadoop
   hadoop-mapreduce-client-core
 
+
+  net.sourceforge.findbugs
+  annotations
+
   
 
 
@@ -350,6 +354,10 @@
   org.apache.hadoop
   hadoop-mapreduce-client-core
 
+
+  net.sourceforge.findbugs
+  annotations
+
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da882ad7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
index 9466d22..1eca09f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
@@ -184,6 +184,10 @@
   org.apache.hadoop
   hadoop-mapreduce-client-core
 
+
+  net.sourceforge.findbugs
+  annotations
+
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: YARN-6376. Exceptions caused by synchronous putEntities requests can be swallowed (Haibo Chen via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6376. Exceptions caused by synchronous putEntities requests can be 
swallowed (Haibo Chen via Varun Saxena)

(cherry picked from commit b58777a9c9a5b6f2e4bcfd2b3bede33f25f80dec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd2c8d2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd2c8d2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd2c8d2c

Branch: refs/heads/YARN-5355-branch-2
Commit: fd2c8d2c6f53b335ce5e2ee5b160165119779a52
Parents: 739bf97
Author: Varun Saxena 
Authored: Fri Mar 31 02:17:20 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:45 2017 +0530

--
 .../timelineservice/collector/TimelineCollector.java  | 10 --
 .../collector/TimelineCollectorManager.java   |  7 ++-
 2 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd2c8d2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
index 353066b..4c9e9f8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
@@ -137,8 +137,14 @@ public abstract class TimelineCollector extends 
CompositeService {
   + callerUgi + ")");
 }
 
-TimelineWriteResponse response = writeTimelineEntities(entities);
-flushBufferedTimelineEntities();
+TimelineWriteResponse response;
+// synchronize on the writer object so that no other threads can
+// flush the writer buffer concurrently and swallow any exception
+// caused by the timeline enitites that are being put here.
+synchronized (writer) {
+  response = writeTimelineEntities(entities);
+  flushBufferedTimelineEntities();
+}
 
 return response;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd2c8d2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 19896e8..8ef9b43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -259,7 +259,12 @@ public class TimelineCollectorManager extends 
AbstractService {
 
 public void run() {
   try {
-writer.flush();
+// synchronize on the writer object to avoid flushing timeline
+// entities placed on the buffer by synchronous putEntities
+// requests.
+synchronized (writer) {
+  writer.flush();
+}
   } catch (Throwable th) {
 // we need to handle all exceptions or subsequent execution may be
 // suppressed


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: YARN-6027. Support fromid(offset) filter for /flows API (Rohith Sharma K S via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6027. Support fromid(offset) filter for /flows API (Rohith Sharma K S via 
Varun Saxena)

(cherry picked from commit 63c06ec44e633567c378e28898e319143593ff30)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1862a030
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1862a030
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1862a030

Branch: refs/heads/YARN-5355-branch-2
Commit: 1862a03030a26876f701cd842fce17f272fbb8fb
Parents: 177a324
Author: Varun Saxena 
Authored: Thu Mar 2 01:49:34 2017 +0530
Committer: Varun Saxena 
Committed: Wed Apr 26 00:48:44 2017 +0530

--
 .../AbstractTimelineReaderHBaseTestBase.java| 176 
 ...stTimelineReaderWebServicesHBaseStorage.java | 411 ---
 .../storage/common/KeyConverterToString.java|  38 ++
 .../storage/flow/FlowActivityRowKey.java|  59 ++-
 .../reader/FlowActivityEntityReader.java|  28 +-
 .../storage/common/TestRowKeys.java |  21 +
 .../reader/TimelineReaderServer.java|   2 +-
 .../reader/TimelineReaderUtils.java |  28 +-
 .../reader/TimelineReaderWebServices.java   |  18 +-
 .../reader/TimelineUIDConverter.java|  30 +-
 10 files changed, 549 insertions(+), 262 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1862a030/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
new file mode 100644
index 000..7853c94
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URL;
+import java.util.List;
+
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+import org.junit.Assert;
+
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.api.client.GenericType;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Test Base for TimelineReaderServer HBase tests.
+ */
+public abstract class AbstractTimelineReaderHBaseTest

[24/50] [abbrv] hadoop git commit: YARN-6074. FlowRunEntity does not deserialize long values correctly (Rohith Sharma K S via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6074. FlowRunEntity does not deserialize long values correctly (Rohith 
Sharma K S via Varun Saxena)

(cherry picked from commit db490eccced3c42ac27253ca6cbaf10a77e0e116)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cec92566
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cec92566
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cec92566

Branch: refs/heads/YARN-5355-branch-2
Commit: cec92566d0419148f49268966e51d55b68cc28b7
Parents: fa5fb6c
Author: Varun Saxena 
Authored: Mon Jan 9 18:54:22 2017 +0530
Committer: Varun Saxena 
Committed: Tue Apr 25 23:14:28 2017 +0530

--
 .../hadoop/yarn/api/records/timelineservice/FlowRunEntity.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cec92566/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/FlowRunEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/FlowRunEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/FlowRunEntity.java
index 410a1bf..5c666cd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/FlowRunEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/FlowRunEntity.java
@@ -100,7 +100,7 @@ public class FlowRunEntity extends 
HierarchicalTimelineEntity {
 
   public long getRunId() {
 Object runId = getInfo().get(FLOW_RUN_ID_INFO_KEY);
-return runId == null ? 0L : (Long) runId;
+return runId == null ? 0L : ((Number) runId).longValue();
   }
 
   public void setRunId(long runId) {
@@ -117,7 +117,7 @@ public class FlowRunEntity extends 
HierarchicalTimelineEntity {
 
   public long getMaxEndTime() {
 Object time = getInfo().get(FLOW_RUN_END_TIME);
-return time == null ? 0L : (Long)time;
+return time == null ? 0L : ((Number) time).longValue();
   }
 
   public void setMaxEndTime(long endTime) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed by Vrushali C.

2017-04-26 Thread varunsaxena
YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed 
by Vrushali C.

(cherry picked from commit 0327a79d79a4d56d9c7cb6889886afd2272b07d3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac13f7ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac13f7ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac13f7ce

Branch: refs/heads/YARN-5355-branch-2
Commit: ac13f7ce3883f303226061235bc874e0bf03b915
Parents: 5efd6cc
Author: Sangjin Lee 
Authored: Thu Jan 19 14:52:47 2017 -0800
Committer: Varun Saxena 
Committed: Tue Apr 25 23:14:28 2017 +0530

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../src/main/resources/yarn-default.xml |   9 +
 ...stTimelineReaderWebServicesHBaseStorage.java |   4 +-
 .../storage/DataGeneratorForTest.java   | 364 ---
 .../storage/TestHBaseTimelineStorageApps.java   |   6 +-
 .../TestHBaseTimelineStorageEntities.java   |   6 +-
 .../storage/TestHBaseTimelineStorageSchema.java |  12 +-
 .../storage/flow/TestFlowDataGenerator.java |  28 +-
 .../flow/TestHBaseStorageFlowActivity.java  |   8 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  46 ++-
 .../flow/TestHBaseStorageFlowRunCompaction.java |   8 +-
 .../storage/flow/FlowRunCoprocessor.java|  36 +-
 .../storage/flow/FlowRunTable.java  |  33 +-
 .../src/site/markdown/TimelineServiceV2.md  |  26 +-
 14 files changed, 322 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac13f7ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8fcd719..ab5bbb2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2070,6 +2070,18 @@ public class YarnConfiguration extends Configuration {
   + "hbase.coprocessor.app-final-value-retention-milliseconds";
 
   /**
+   * The name of the setting for the location of the coprocessor
+   * jar on hdfs.
+   */
+  public static final String FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION =
+  TIMELINE_SERVICE_PREFIX
+  + "hbase.coprocessor.jar.hdfs.location";
+
+  /** default hdfs location for flowrun coprocessor jar. */
+  public static final String DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR =
+  "/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar";
+
+/**
* The name for setting that points to an optional HBase configuration
* (hbase-site.xml file) with settings that will override the ones found on
* the classpath.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac13f7ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 904d8e5..77f7914 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2294,6 +2294,15 @@
 
   
 
+The default hdfs location for flowrun coprocessor jar.
+
+yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+
+/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar
+  
+
+  
+
 The value of this parameter sets the prefix for all tables that are part of
 timeline service in the hbase storage schema. It can be set to "dev."
 or "staging." if it is to be used for development or staging instances.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac13f7ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tim

[14/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation action

[21/50] [abbrv] hadoop git commit: YARN-5585. [Atsv2] Reader side changes for entity prefix and support for pagination via additional filters (Rohith Sharma K S via Varun Saxena)

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa5fb6ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
index e90338e..16fffa4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
@@ -83,7 +83,7 @@ public final class TimelineEntityReaderFactory {
   return new FlowRunEntityReader(context, filters, dataToRetrieve);
 } else {
   // assume we're dealing with a generic entity read
-  return new GenericEntityReader(context, filters, dataToRetrieve, false);
+  return new GenericEntityReader(context, filters, dataToRetrieve);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa5fb6ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
index d5e791b..11dc913 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
@@ -53,19 +53,19 @@ public class TestTimelineUIDConverter {
 assertEquals(context, TimelineUIDConverter.APPLICATION_UID.decodeUID(uid));
 
 context = new TimelineReaderContext("yarn_cluster", "root", "hive_join",
-1234L, "application_11_", "YARN_CONTAINER",
+1234L, "application_11_", "YARN_CONTAINER", 12345L,
 "container_11__01_01");
 uid = TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(context);
 
assertEquals("yarn_cluster!root!hive_join!1234!application_11_!"
-+ "YARN_CONTAINER!container_11__01_01", uid);
++ "YARN_CONTAINER!12345!container_11__01_01", uid);
 assertEquals(
 context, TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uid));
 context = new TimelineReaderContext("yarn_cluster", null, null, null,
-"application_11_", "YARN_CONTAINER",
+"application_11_", "YARN_CONTAINER", 54321L,
 "container_11__01_01");
 uid = TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(context);
 assertEquals("yarn_cluster!application_11_!YARN_CONTAINER!" +
-"container_11__01_01", uid);
+"54321!container_11__01_01", uid);
 assertEquals(
 context, TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uid));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa5fb6ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
index 6c6d1b3..7560f33 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ 
b/hadoop-yarn-project/hadoop-

[20/50] [abbrv] hadoop git commit: YARN-5980. Update documentation for single node hbase deploy. Contributed by Vrushali C.

2017-04-26 Thread varunsaxena
YARN-5980. Update documentation for single node hbase deploy. Contributed by 
Vrushali C.

(cherry picked from commit e1bdba77888723b435a235a96c8659029afd25d5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d883f4e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d883f4e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d883f4e

Branch: refs/heads/YARN-5355-branch-2
Commit: 7d883f4eee95f3a127da3149bd2ea088de6c63a9
Parents: cec9256
Author: Sangjin Lee 
Authored: Fri Jan 13 09:12:48 2017 -0800
Committer: Varun Saxena 
Committed: Tue Apr 25 23:14:28 2017 +0530

--
 .../src/site/markdown/TimelineServiceV2.md  | 63 +---
 1 file changed, 55 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d883f4e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 182a5fe..7a0c477 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -165,18 +165,64 @@ New configuration parameters that are introduced with v.2 
are marked bold.
 ### Enabling Timeline Service v.2
 
  Preparing Apache HBase cluster for storage
+There are a few steps to be done for preparing the storage for Timeline 
Service v.2:
+
+Step 1) [Set up the HBase cluster](#Set_up_the_HBase_cluster)
+
+Step 2) [Enable the coprocessor](#Enable_the_coprocessor)
+
+Step 3) [Create the schema for Timeline Service v.2](#Create_schema)
+
+Each step is explained in more detail below.
+
+#  Step 1) Set up the HBase cluster
 The first part is to set up or pick an Apache HBase cluster to use as the 
storage cluster. The
-version of Apache HBase that is supported with Timeline Service v.2 is 1.1.x. 
The 1.0.x versions
-do not work with Timeline Service v.2. The 1.2.x versions have not been tested.
+version of Apache HBase that is supported with Timeline Service v.2 is 1.2.4. 
The 1.0.x versions
+do not work with Timeline Service v.2. Later versions of HBase have not been 
tested with
+Timeline Service.
+
+HBase has different deployment modes. Refer to the HBase book for 
understanding them and pick a
+mode that is suitable for your setup.
+(http://hbase.apache.org/book.html#standalone_dist)
+
+# Simple deployment for HBase
+If you are intent on a simple deploy profile for the Apache HBase cluster
+where the data loading is light but the data needs to persist across node
+comings and goings, you could consider the "Standalone HBase over HDFS" deploy 
mode.
+
+This is a useful variation on the standalone HBase setup and has all HBase 
daemons running inside
+one JVM but rather than persisting to the local filesystem, it persists to an 
HDFS instance.
+Writing to HDFS where data is replicated ensures that data is persisted across 
node
+comings and goings. To configure this standalone variant, edit your 
`hbase-site.xml` setting
+the `hbase.rootdir` to point at a directory in your HDFS instance but then set
+`hbase.cluster.distributed` to false. For example:
+
+```
+
+  
+hbase.rootdir
+hdfs://namenode.example.org:8020/hbase
+  
+  
+hbase.cluster.distributed
+false
+  
+
+```
+
+For more details on this mode, refer to
+http://hbase.apache.org/book.html#standalone.over.hdfs .
+
+Once you have an Apache HBase cluster ready to use, perform the following 
steps.
 
-Once you have an Apache HBase cluster ready to use for this purpose, perform 
the following steps.
+#  Step 2) Enable the coprocessor
 
-First, add the timeline service jar to the HBase classpath in all HBase 
machines in the cluster. It
+Step 2.1) Add the timeline service jar to the HBase classpath in all HBase 
machines in the cluster. It
 is needed for the coprocessor as well as the schema creator. For example,
 
 cp hadoop-yarn-server-timelineservice-3.0.0-alpha1-SNAPSHOT.jar 
/usr/hbase/lib/
 
-Then, enable the coprocessor that handles the aggregation. To enable it, add 
the following entry in
+Step 2.2) Enable the coprocessor that handles the aggregation. To enable it, 
add the following entry in
 region servers' `hbase-site.xml` file (generally located in the `conf` 
directory) as follows:
 
 ```
@@ -186,10 +232,11 @@ region servers' `hbase-site.xml` file (generally located 
in the `conf` directory
 
 ```
 
-Restart the region servers and the master to pick up the timeline service jar 
as well as the config
-change. In this version, the coprocessor is loaded 

[08/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
deleted file mode 100644
index 90dd345..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies fully qualified columns for the {@link FlowRunTable}.
- */
-public enum FlowRunColumn implements Column {
-
-  /**
-   * When the flow was started. This is the minimum of currently known
-   * application start times.
-   */
-  MIN_START_TIME(FlowRunColumnFamily.INFO, "min_start_time",
-  AggregationOperation.GLOBAL_MIN, new LongConverter()),
-
-  /**
-   * When the flow ended. This is the maximum of currently known application 
end
-   * times.
-   */
-  MAX_END_TIME(FlowRunColumnFamily.INFO, "max_end_time",
-  AggregationOperation.GLOBAL_MAX, new LongConverter()),
-
-  /**
-   * The version of the flow that this flow belongs to.
-   */
-  FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null);
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-  private final AggregationOperation aggOp;
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp) {
-this(columnFamily, columnQualifier, aggOp,
-GenericConverter.getInstance());
-  }
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp,
-  ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-this.aggOp = aggOp;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
-.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-return columnFamily.getBytes();
-  }
-
-  public AggregationOperation getAggregationOperation() {
-return aggOp;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.Column#store
-   * (byte[], org.apache.hadoop.yarn.server.timelineservice.s

[26/50] [abbrv] hadoop git commit: YARN-6064. Support fromId for flowRuns and flow/flowRun apps REST API's (Rohith Sharma K S via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-6064. Support fromId for flowRuns and flow/flowRun apps REST API's (Rohith 
Sharma K S via Varun Saxena)

(cherry picked from commit 4b1ba4ea314147f8a06cc4f446c1d9336de89fc1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d27b1ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d27b1ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d27b1ca

Branch: refs/heads/YARN-5355-branch-2
Commit: 5d27b1ca24d32724dd75c19a1654fbe9c7d45ccc
Parents: 44216a4
Author: Varun Saxena 
Authored: Wed Jan 18 10:30:15 2017 +0530
Committer: Varun Saxena 
Committed: Tue Apr 25 23:14:28 2017 +0530

--
 ...stTimelineReaderWebServicesHBaseStorage.java | 256 +++
 .../reader/TimelineReaderWebServices.java   |  72 --
 .../storage/reader/ApplicationEntityReader.java |  43 +++-
 .../storage/reader/FlowRunEntityReader.java |  29 ++-
 4 files changed, 327 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d27b1ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index fa35fc5..485c191 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -40,6 +40,8 @@ import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
@@ -53,6 +55,7 @@ import 
org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -352,6 +355,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   flowVersion2, runid2, entity3.getId(), te3);
   hbi.write(cluster, user, flow, flowVersion, runid,
   "application_11_", userEntities);
+  writeApplicationEntities(hbi);
   hbi.flush();
 } finally {
   if (hbi != null) {
@@ -360,6 +364,35 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 }
   }
 
+  static void writeApplicationEntities(HBaseTimelineWriterImpl hbi)
+  throws IOException {
+long currentTimeMillis = System.currentTimeMillis();
+int count = 1;
+for (long i = 1; i <= 3; i++) {
+  for (int j = 1; j <= 5; j++) {
+TimelineEntities te = new TimelineEntities();
+ApplicationId appId =
+BuilderUtils.newApplicationId(currentTimeMillis, count++);
+ApplicationEntity appEntity = new ApplicationEntity();
+appEntity.setId(appId.toString());
+appEntity.setCreatedTime(currentTimeMillis);
+
+TimelineEvent created = new TimelineEvent();
+created.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
+created.setTimestamp(currentTimeMillis);
+appEntity.addEvent(created);
+TimelineEvent finished = new TimelineEvent();
+finished.setId(ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
+finished.setTimestamp(currentTimeMillis + i * j);
+
+appEntity.addEvent(finished);
+te.addEntity(appEntity);
+hbi.write("cluster1", "user1", "flow1", "CF7022C10

[01/50] [abbrv] hadoop git commit: MAPREDUCE-6818. Remove direct reference to TimelineClientImpl. Contributed by Li Lu. [Forced Update!]

2017-04-26 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 c2fc688d7 -> 3f3e926ba (forced update)


MAPREDUCE-6818. Remove direct reference to TimelineClientImpl. Contributed by 
Li Lu.

(cherry picked from commit 6217b87f4a056cf704cef2e073b386b7803415de)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/148803ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/148803ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/148803ea

Branch: refs/heads/YARN-5355-branch-2
Commit: 148803eac461caa029b16dce502a1e4d8119952a
Parents: 07b2e75
Author: Sangjin Lee 
Authored: Thu Dec 8 18:14:09 2016 -0800
Committer: Varun Saxena 
Committed: Tue Apr 25 23:13:07 2017 +0530

--
 .../org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java | 3 +--
 .../java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java| 3 +--
 2 files changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/148803ea/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
index 447ea4e..d553596 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
 
@@ -54,7 +53,7 @@ class JobHistoryFileReplayMapperV1 extends
 
   public void map(IntWritable key, IntWritable val, Context context) throws 
IOException {
 // collect the apps it needs to process
-TimelineClient tlc = new TimelineClientImpl();
+TimelineClient tlc = TimelineClient.createTimelineClient();
 TimelineEntityConverterV1 converter = new TimelineEntityConverterV1();
 JobHistoryFileReplayHelper helper = new 
JobHistoryFileReplayHelper(context);
 int replayMode = helper.getReplayMode();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/148803ea/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
index 16d14a1..6d6151f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 
 /**
* Adds simple entities with random string payload, events, metrics, and
@@ -46,7 +45,7 @@ class SimpleEntityWriterV1
 
   public void map(IntWritable key, IntWritable val, Context context)
   throws IOException {
-TimelineClient tlc = new TimelineClientImpl();
+TimelineClient tlc = TimelineClient.createTimelineClient();
 Configuration conf = context.getConfiguration();
 
 final int kbs = conf.getInt(KBS_SENT, KBS_SENT_DEFAULT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: YARN-5976. Update hbase version to 1.2. Contributed by Vrushali C.

2017-04-26 Thread varunsaxena
YARN-5976. Update hbase version to 1.2. Contributed by Vrushali C.

(cherry picked from commit f945008d1cf5730bdebeae501ed0e42477ad219e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9f16828
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9f16828
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9f16828

Branch: refs/heads/YARN-5355-branch-2
Commit: e9f1682852a2bba47a382561debe74ff4858c824
Parents: d8c9f36
Author: Sangjin Lee 
Authored: Wed Dec 21 09:53:07 2016 -0800
Committer: Varun Saxena 
Committed: Tue Apr 25 23:14:27 2017 +0530

--
 LICENSE.txt |   9 +-
 hadoop-project/pom.xml  |  26 +-
 .../pom.xml | 142 +---
 ...TestPhoenixOfflineAggregationWriterImpl.java | 161 -
 .../hadoop-yarn-server-timelineservice/pom.xml  |  26 +-
 .../PhoenixOfflineAggregationWriterImpl.java| 358 ---
 .../storage/TimelineSchemaCreator.java  |  22 --
 7 files changed, 20 insertions(+), 724 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f16828/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 1e506aa..344de2a 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1576,12 +1576,6 @@ JLine 0.9.94
 leveldbjni-all 1.8
 Hamcrest Core 1.3
 xmlenc Library 0.52
-StringTemplate 4 4.0.7
-ANTLR 3 Tool 3.5
-ANTLR 3 Runtime 3.5
-ANTLR StringTemplate 3.2.1
-ASM All 5.0.2
-sqlline 1.1.8
 

 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
@@ -1810,6 +1804,9 @@ be bound by any additional provisions that may appear in 
any communication from
 You. This License may not be modified without the mutual written agreement of
 the Licensor and You.
 
+The binary distribution of this product bundles these dependencies under the
+following license:
+jamon-runtime 2.4.1
 

 
 For: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f16828/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 45bc253..152a703 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -50,8 +50,7 @@
 
 0.8.2.1
 
-1.1.3
-4.7.0-HBase-1.1
+1.2.4
 2.5.1
 
 ${project.version}
@@ -1113,29 +1112,6 @@
 tests
  
   
-org.apache.phoenix
-phoenix-core
-${phoenix.version}
-
-  
-  
-jline
-jline
-  
- 
-joda-time
-joda-time
-  
-
-  
-  
-org.apache.phoenix
-phoenix-core
-test-jar
-${phoenix.version}
-test
-  
-  
 org.apache.hbase
 hbase-it
 ${hbase.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f16828/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index c627112..ed014de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -56,10 +56,6 @@
   org.apache.hadoop
   hadoop-common
 
-
-  org.apache.phoenix
-  phoenix-core
-
   
 
 
@@ -76,6 +72,8 @@
   
 
 
+
 
   org.apache.hadoop
   hadoop-auth
@@ -111,20 +109,6 @@
   
 
 
-
-
-  org.apache.hadoop
-  hadoop-yarn-server-common
-  test
-  
-
-  org.apache.hadoop
-  hadoop-common
-
-  
-
-
 
   org.apache.hadoop
   hadoop-yarn-server-applicationhistoryservice
@@ -145,14 +129,14 @@
 
 
   com.sun.jersey
-  jersey-core
+  jersey-client
   test
 
 
 
-  com.sun.jersey
-  jersey-client
-  test
+  javax.ws.rs
+  jsr311-api
+  1.1.1
 
 
 
@@ -225,23 +209,6 @@
 
 
   org.apache.hbase
-  hbase-common
-  tests
-  test
-   

[36/50] [abbrv] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7eed958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
new file mode 100644
index 000..bb29d6c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
@@ -0,0 +1,442 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.ConnectException;
+import java.net.HttpURLConnection;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URL;
+import java.net.URLConnection;
+import java.security.GeneralSecurityException;
+import java.security.PrivilegedExceptionAction;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.api.client.filter.ClientFilter;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Utility Connector class which is used by timeline clients to securely get
+ * connected to the timeline server.
+ *
+ */
+public class TimelineConnector extends AbstractService {
+
+  private static final Joiner JOINER = Joiner.on("");
+  private static final Log LOG = LogFactory.getLog(TimelineConnector.class);
+  public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+
+  private SSLFactory sslFactory;
+  private Client client;
+  private ConnectionConfigurator connConfigurator;
+  private DelegationTokenAuthenticator authenticator;
+  private DelegationTokenAuthenticatedURL.Token token;
+  private UserGroupInformation authUgi;
+  private String doAsUser;
+  @VisibleForTesting
+  TimelineClientConnectionRetry connectionRetry;
+  private boolean requireConnectionRetry;
+
+  public TimelineConnector(boolean requireConnectionRetry,
+  UserGroupInformation authUgi, String doAsUser,
+  DelegationTokenAuthenticatedURL.Token token) {
+super("TimelineConnector");
+this.requi

[25/50] [abbrv] hadoop git commit: YARN-5378. Accommodate app-id->cluster mapping (Sangjin Lee via Varun Saxena)

2017-04-26 Thread varunsaxena
YARN-5378. Accommodate app-id->cluster mapping (Sangjin Lee via Varun Saxena)

(cherry picked from commit 6baea680ba6e5df6f254ced086d6defa64fb99f0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44216a46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44216a46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44216a46

Branch: refs/heads/YARN-5355-branch-2
Commit: 44216a4660fbdef7818e5de07d238510db5dec22
Parents: 7d883f4
Author: Varun Saxena 
Authored: Tue Jan 17 20:05:47 2017 +0530
Committer: Varun Saxena 
Committed: Tue Apr 25 23:14:28 2017 +0530

--
 .../storage/HBaseTimelineWriterImpl.java|  20 +-
 .../storage/apptoflow/AppToFlowColumn.java  | 148 -
 .../apptoflow/AppToFlowColumnPrefix.java| 206 +++
 .../storage/apptoflow/AppToFlowRowKey.java  | 101 +
 .../storage/apptoflow/AppToFlowTable.java   |  21 +-
 .../storage/common/ColumnHelper.java|   5 +-
 .../reader/AbstractTimelineStorageReader.java   |  39 ++--
 .../storage/common/TestRowKeys.java |   4 +-
 8 files changed, 271 insertions(+), 273 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44216a46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
index c1c2a5e..dfd63bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
@@ -40,7 +40,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.Applica
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
@@ -172,9 +172,7 @@ public class HBaseTimelineWriterImpl extends 
AbstractService implements
 FlowRunRowKey flowRunRowKey =
 new FlowRunRowKey(clusterId, userId, flowName, flowRunId);
 if (event != null) {
-  AppToFlowRowKey appToFlowRowKey =
-  new AppToFlowRowKey(clusterId, appId);
-  onApplicationCreated(flowRunRowKey, appToFlowRowKey, appId, userId,
+  onApplicationCreated(flowRunRowKey, clusterId, appId, userId,
   flowVersion, te, event.getTimestamp());
 }
 // if it's an application entity, store metrics
@@ -193,18 +191,22 @@ public class HBaseTimelineWriterImpl extends 
AbstractService implements
   }
 
   private void onApplicationCreated(FlowRunRowKey flowRunRowKey,
-  AppToFlowRowKey appToFlowRowKey, String appId, String userId,
-  String flowVersion, TimelineEntity te, long appCreatedTimeStamp)
+  String clusterId, String appId, String userId, String flowVersion,
+  TimelineEntity te, long appCreatedTimeStamp)
   throws IOException {
 
 String flowName = flowRunRowKey.getFlowName();
 Long flowRunId = flowRunRowKey.getFlowRunId();
 
 // store in App to flow table
+AppToFlowRowKey appToFlowRowKey = new AppToFlowRowKey(appId);
 byte[] rowKey = appToFlowRowKey.getRowKey();
-AppToFlowColumn.FLOW_ID.store(rowKey, appToFlowTable, null, flowName);
-AppToFlowColumn.FLOW_RUN_ID.store(rowKey, appToFlowTable, null, flowRunId);
-AppToFlowColumn.USER_ID.store(rowKey, appToFlowTable, null, userId);
+AppToFlowColumnPrefix.FLOW_NAME.store(rowKey, appToFlowTable, clusterId,
+null, flowName);
+AppToFlowColumnPrefix.FLOW_RUN_ID.sto

[13/50] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-04-26 Thread varunsaxena
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673ab905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
new file mode 100644
index 000..cedf96a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for flow run entities that are stored in the flow run
+ * table.
+ */
+class FlowRunEntityReader extends TimelineEntityReader {
+  private static final FlowRunTable FLOW_RUN_TABLE = new FlowRunTable();
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+super(ctxt, entityFilters, toRetrieve);
+  }
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineDataToRetrieve toRetrieve) {
+super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link FlowRunTable}.
+   */
+  @Override
+  protected BaseTable getTable() {
+return FLOW_RUN_TABLE;
+  }
+
+  @Override
+  protected void validateParams() 

[hadoop] Git Push Summary

2017-04-26 Thread varunsaxena
Repository: hadoop
Updated Tags:  refs/tags/YARN-5355-2017-04-25 [created] 193746245

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2017-04-26 Thread varunsaxena
Repository: hadoop
Updated Tags:  refs/tags/YARN-5355-2017-05-25 [deleted] 193746245

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11417. Add datanode admin command to get the storage info. Contributed by Surendra Singh Lilhore.

2017-04-26 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1e60cfed7 -> cc66153e1


HDFS-11417. Add datanode admin command to get the storage info. Contributed by 
Surendra Singh Lilhore.

(cherry picked from commit 93fa48fcf243dc759db1736af145633da760f937)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc66153e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc66153e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc66153e

Branch: refs/heads/branch-2
Commit: cc66153e121e71f5f87166be3c3e1331517bc594
Parents: 1e60cfe
Author: Akira Ajisaka 
Authored: Wed Apr 26 17:43:29 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Apr 26 18:08:12 2017 +0900

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   |   5 +
 .../hdfs/protocol/DatanodeVolumeInfo.java   | 122 +++
 .../ClientDatanodeProtocolTranslatorPB.java |  28 +
 .../src/main/proto/ClientDatanodeProtocol.proto |   9 ++
 .../src/main/proto/hdfs.proto   |  13 ++
 ...tDatanodeProtocolServerSideTranslatorPB.java |  29 -
 .../hadoop/hdfs/server/datanode/DataNode.java   |  27 
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  30 -
 .../src/site/markdown/HDFSCommands.md   |   2 +
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  |  20 +++
 10 files changed, 283 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc66153e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index a60c17d..0202335 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -182,4 +182,9 @@ public interface ClientDatanodeProtocol {
* @return balancer bandwidth
*/
   long getBalancerBandwidth() throws IOException;
+
+  /**
+   * Get volume report of datanode.
+   */
+  List getVolumeReport() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc66153e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java
new file mode 100644
index 000..40e0918
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * Locally available datanode volume information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DatanodeVolumeInfo {
+  private long usedSpace;
+  private long freeSpace;
+  private long reservedSpace;
+  private long reservedSpaceForRep

hadoop git commit: HDFS-11417. Add datanode admin command to get the storage info. Contributed by Surendra Singh Lilhore.

2017-04-26 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2f73396b5 -> 93fa48fcf


HDFS-11417. Add datanode admin command to get the storage info. Contributed by 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93fa48fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93fa48fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93fa48fc

Branch: refs/heads/trunk
Commit: 93fa48fcf243dc759db1736af145633da760f937
Parents: 2f73396
Author: Akira Ajisaka 
Authored: Wed Apr 26 17:43:29 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Apr 26 17:43:29 2017 +0900

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   |   5 +
 .../hdfs/protocol/DatanodeVolumeInfo.java   | 122 +++
 .../ClientDatanodeProtocolTranslatorPB.java |  29 +
 .../src/main/proto/ClientDatanodeProtocol.proto |   9 ++
 .../src/main/proto/hdfs.proto   |  13 ++
 ...tDatanodeProtocolServerSideTranslatorPB.java |  28 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  27 
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  30 -
 .../src/site/markdown/HDFSCommands.md   |   2 +
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  |  20 +++
 10 files changed, 284 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93fa48fc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index 10041f5..e2be9c6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -166,6 +166,11 @@ public interface ClientDatanodeProtocol {
   long getBalancerBandwidth() throws IOException;
 
   /**
+   * Get volume report of datanode.
+   */
+  List getVolumeReport() throws IOException;
+
+  /**
* Submit a disk balancer plan for execution.
*/
   void submitDiskBalancerPlan(String planID, long planVersion, String planFile,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93fa48fc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java
new file mode 100644
index 000..40e0918
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * Locally available datanode volume information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DatanodeVolumeInfo {
+  private long usedSpace;
+  private long freeSpace;
+  private long reservedSpace;
+  private long reservedSpaceForReplicas;
+  private long numBlocks;
+  private StorageType storageType;
+  private String path;
+
+  public DatanodeVolumeInfo(String path, long usedSpace, long freeSpace,
+  long reservedSpace, long reservedSpaceForReplicas, long numBlocks,
+  StorageType type) {
+this.usedSpace = usedSpace;
+this.freeSpace = freeSpace;
+this.reservedSpace = reservedSpace;
+

hadoop git commit: HDFS-6708. StorageType should be encoded in the block token. Contributed by Ewan Higgs

2017-04-26 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8a99eba96 -> 2f73396b5


HDFS-6708. StorageType should be encoded in the block token. Contributed by 
Ewan Higgs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f73396b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f73396b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f73396b

Branch: refs/heads/trunk
Commit: 2f73396b5901fd5fe29f6cd76fc1b3134b854b37
Parents: 8a99eba
Author: Chris Douglas 
Authored: Tue Apr 25 23:57:00 2017 -0700
Committer: Chris Douglas 
Committed: Tue Apr 25 23:57:00 2017 -0700

--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |   3 +
 .../token/block/BlockTokenIdentifier.java   |  40 +++-
 .../src/main/proto/hdfs.proto   |   1 +
 .../block/BlockPoolTokenSecretManager.java  |  30 +--
 .../token/block/BlockTokenSecretManager.java|  65 ++-
 .../hadoop/hdfs/server/balancer/Dispatcher.java |   3 +-
 .../hadoop/hdfs/server/balancer/KeyManager.java |   8 +-
 .../server/blockmanagement/BlockManager.java|   6 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  21 ++-
 .../hdfs/server/datanode/DataXceiver.java   |  57 --
 .../erasurecode/StripedBlockReader.java |   4 +-
 .../erasurecode/StripedBlockWriter.java |   3 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java |  70 +++
 .../security/token/block/TestBlockToken.java| 182 +++
 14 files changed, 397 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f73396b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index e703a94..dd55203 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -635,6 +635,9 @@ public class PBHelperClient {
 blockTokenSecret.getAccessModes()) {
   builder.addModes(convert(aMode));
 }
+for (StorageType storageType : blockTokenSecret.getStorageTypes()) {
+  builder.addStorageTypes(convertStorageType(storageType));
+}
 return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f73396b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
index 28e7acc..228a7b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
@@ -22,10 +22,13 @@ import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.EnumSet;
+import java.util.Optional;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -49,21 +52,24 @@ public class BlockTokenIdentifier extends TokenIdentifier {
   private String blockPoolId;
   private long blockId;
   private final EnumSet modes;
+  private StorageType[] storageTypes;
   private boolean useProto;
 
   private byte [] cache;
 
   public BlockTokenIdentifier() {
-this(null, null, 0, EnumSet.noneOf(AccessMode.class), false);
+this(null, null, 0, EnumSet.noneOf(AccessMode.class), null, false);
   }
 
   public BlockTokenIdentifier(String userId, String bpid, long blockId,
-  EnumSet modes, boolean useProto) {
+  EnumSet modes, StorageType[] storageTypes, boolean useProto) 
{
 this.cache = null;
 this.userId = userId;
 this.blockPoolId = bpid;
 this.blockId = blockId;
 this.modes = modes == null ? EnumSet.noneOf(AccessMode.class) : modes;
+this.storageTypes =