hadoop git commit: Revert "HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and fail to tell the DFSClient about it because of a network error (cmccabe)" (jenkins didn't run yet)

2015-03-13 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5aa892ed4 -> 32741cf3d


Revert "HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot 
and fail to tell the DFSClient about it because of a network error (cmccabe)" 
(jenkins didn't run yet)

This reverts commit 5aa892ed486d42ae6b94c4866b92cd2b382ea640.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32741cf3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32741cf3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32741cf3

Branch: refs/heads/trunk
Commit: 32741cf3d25d85a92e3deb11c302cc2a718d71dd
Parents: 5aa892e
Author: Colin Patrick Mccabe 
Authored: Fri Mar 13 18:40:20 2015 -0700
Committer: Colin Patrick Mccabe 
Committed: Fri Mar 13 18:40:20 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 -
 .../apache/hadoop/hdfs/BlockReaderFactory.java  | 23 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  2 -
 .../datatransfer/DataTransferProtocol.java  |  5 +-
 .../hdfs/protocol/datatransfer/Receiver.java|  2 +-
 .../hdfs/protocol/datatransfer/Sender.java  |  4 +-
 .../hdfs/server/datanode/DataXceiver.java   | 95 
 .../server/datanode/ShortCircuitRegistry.java   | 13 +--
 .../src/main/proto/datatransfer.proto   | 11 ---
 .../shortcircuit/TestShortCircuitCache.java | 63 -
 10 files changed, 43 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32741cf3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ff00b0c..c3f9367 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1177,9 +1177,6 @@ Release 2.7.0 - UNRELEASED
   HDFS-7722. DataNode#checkDiskError should also remove Storage when error
   is found. (Lei Xu via Colin P. McCabe)
 
-  HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and
-  fail to tell the DFSClient about it because of a network error (cmccabe)
-
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32741cf3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index 1e915b2..ba48c79 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.USE_RECEIPT_VERIFICATION;
-
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
@@ -71,12 +69,6 @@ import com.google.common.base.Preconditions;
 public class BlockReaderFactory implements ShortCircuitReplicaCreator {
   static final Log LOG = LogFactory.getLog(BlockReaderFactory.class);
 
-  public static class FailureInjector {
-public void injectRequestFileDescriptorsFailure() throws IOException {
-  // do nothing
-}
-  }
-
   @VisibleForTesting
   static ShortCircuitReplicaCreator
   createShortCircuitReplicaInfoCallback = null;
@@ -84,11 +76,6 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   private final DFSClient.Conf conf;
 
   /**
-   * Injects failures into specific operations during unit tests.
-   */
-  private final FailureInjector failureInjector;
-
-  /**
* The file name, for logging and debugging purposes.
*/
   private String fileName;
@@ -182,7 +169,6 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 
   public BlockReaderFactory(DFSClient.Conf conf) {
 this.conf = conf;
-this.failureInjector = conf.brfFailureInjector;
 this.remainingCacheTries = conf.nCachedConnRetry;
   }
 
@@ -532,12 +518,11 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 final DataOutputStream out =
 new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
 SlotId slotId = slot == null ? null : slot.getSlotId();
-new Sender(out).requestShortCircuitFds(block, token, slotId, 1, true);
+new Sender(out).requestShortCircuitFds(block, token, slotId, 1);
 DataInputStream in = new DataInputStream(peer.getInputStream());
 Block

hadoop git commit: Revert "HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and fail to tell the DFSClient about it because of a network error (cmccabe)" (Jenkins didn't run yet)

2015-03-13 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7550052b8 -> f93a2dd94


Revert "HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot 
and fail to tell the DFSClient about it because of a network error (cmccabe)" 
(Jenkins didn't run yet)

This reverts commit 7550052b85bc9b73eb94cedc708f682681679b45.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f93a2dd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f93a2dd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f93a2dd9

Branch: refs/heads/branch-2
Commit: f93a2dd94b0c770aaff35bec4f45c7a76b6c2629
Parents: 7550052
Author: Colin Patrick Mccabe 
Authored: Fri Mar 13 18:39:43 2015 -0700
Committer: Colin Patrick Mccabe 
Committed: Fri Mar 13 18:39:43 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 -
 .../apache/hadoop/hdfs/BlockReaderFactory.java  | 23 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  2 -
 .../datatransfer/DataTransferProtocol.java  |  5 +-
 .../hdfs/protocol/datatransfer/Receiver.java|  2 +-
 .../hdfs/protocol/datatransfer/Sender.java  |  4 +-
 .../hdfs/server/datanode/DataXceiver.java   | 95 
 .../server/datanode/ShortCircuitRegistry.java   | 13 +--
 .../src/main/proto/datatransfer.proto   | 11 ---
 .../shortcircuit/TestShortCircuitCache.java | 63 -
 10 files changed, 43 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f93a2dd9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cb40232..40b538c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -874,9 +874,6 @@ Release 2.7.0 - UNRELEASED
   HDFS-7722. DataNode#checkDiskError should also remove Storage when error
   is found. (Lei Xu via Colin P. McCabe)
 
-  HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and
-  fail to tell the DFSClient about it because of a network error (cmccabe)
-
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f93a2dd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index 1e915b2..ba48c79 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.USE_RECEIPT_VERIFICATION;
-
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
@@ -71,12 +69,6 @@ import com.google.common.base.Preconditions;
 public class BlockReaderFactory implements ShortCircuitReplicaCreator {
   static final Log LOG = LogFactory.getLog(BlockReaderFactory.class);
 
-  public static class FailureInjector {
-public void injectRequestFileDescriptorsFailure() throws IOException {
-  // do nothing
-}
-  }
-
   @VisibleForTesting
   static ShortCircuitReplicaCreator
   createShortCircuitReplicaInfoCallback = null;
@@ -84,11 +76,6 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   private final DFSClient.Conf conf;
 
   /**
-   * Injects failures into specific operations during unit tests.
-   */
-  private final FailureInjector failureInjector;
-
-  /**
* The file name, for logging and debugging purposes.
*/
   private String fileName;
@@ -182,7 +169,6 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 
   public BlockReaderFactory(DFSClient.Conf conf) {
 this.conf = conf;
-this.failureInjector = conf.brfFailureInjector;
 this.remainingCacheTries = conf.nCachedConnRetry;
   }
 
@@ -532,12 +518,11 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 final DataOutputStream out =
 new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
 SlotId slotId = slot == null ? null : slot.getSlotId();
-new Sender(out).requestShortCircuitFds(block, token, slotId, 1, true);
+new Sender(out).requestShortCircuitFds(block, token, slotId, 1);
 DataInputStream in = new DataInputStream(peer.getInputStream());
 B

hadoop git commit: HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and fail to tell the DFSClient about it because of a network error (cmccabe)

2015-03-13 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8afbfed5d -> 7550052b8


HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and fail 
to tell the DFSClient about it because of a network error (cmccabe)

(cherry picked from commit 5aa892ed486d42ae6b94c4866b92cd2b382ea640)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7550052b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7550052b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7550052b

Branch: refs/heads/branch-2
Commit: 7550052b85bc9b73eb94cedc708f682681679b45
Parents: 8afbfed
Author: Colin Patrick Mccabe 
Authored: Fri Mar 13 18:29:49 2015 -0700
Committer: Colin Patrick Mccabe 
Committed: Fri Mar 13 18:30:36 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../apache/hadoop/hdfs/BlockReaderFactory.java  | 23 -
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  2 +
 .../datatransfer/DataTransferProtocol.java  |  5 +-
 .../hdfs/protocol/datatransfer/Receiver.java|  2 +-
 .../hdfs/protocol/datatransfer/Sender.java  |  4 +-
 .../hdfs/server/datanode/DataXceiver.java   | 95 
 .../server/datanode/ShortCircuitRegistry.java   | 13 ++-
 .../src/main/proto/datatransfer.proto   | 11 +++
 .../shortcircuit/TestShortCircuitCache.java | 63 +
 10 files changed, 178 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7550052b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 40b538c..cb40232 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -874,6 +874,9 @@ Release 2.7.0 - UNRELEASED
   HDFS-7722. DataNode#checkDiskError should also remove Storage when error
   is found. (Lei Xu via Colin P. McCabe)
 
+  HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and
+  fail to tell the DFSClient about it because of a network error (cmccabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7550052b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index ba48c79..1e915b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.USE_RECEIPT_VERIFICATION;
+
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
@@ -69,6 +71,12 @@ import com.google.common.base.Preconditions;
 public class BlockReaderFactory implements ShortCircuitReplicaCreator {
   static final Log LOG = LogFactory.getLog(BlockReaderFactory.class);
 
+  public static class FailureInjector {
+public void injectRequestFileDescriptorsFailure() throws IOException {
+  // do nothing
+}
+  }
+
   @VisibleForTesting
   static ShortCircuitReplicaCreator
   createShortCircuitReplicaInfoCallback = null;
@@ -76,6 +84,11 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   private final DFSClient.Conf conf;
 
   /**
+   * Injects failures into specific operations during unit tests.
+   */
+  private final FailureInjector failureInjector;
+
+  /**
* The file name, for logging and debugging purposes.
*/
   private String fileName;
@@ -169,6 +182,7 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 
   public BlockReaderFactory(DFSClient.Conf conf) {
 this.conf = conf;
+this.failureInjector = conf.brfFailureInjector;
 this.remainingCacheTries = conf.nCachedConnRetry;
   }
 
@@ -518,11 +532,12 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 final DataOutputStream out =
 new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
 SlotId slotId = slot == null ? null : slot.getSlotId();
-new Sender(out).requestShortCircuitFds(block, token, slotId, 1);
+new Sender(out).requestShortCircuitFds(block, token, slotId, 1, true);
 DataInputStream in = new DataInputStream(peer.getInputStream());
 BlockOpResponseProto resp = B

hadoop git commit: HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and fail to tell the DFSClient about it because of a network error (cmccabe)

2015-03-13 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6fdef76cc -> 5aa892ed4


HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and fail 
to tell the DFSClient about it because of a network error (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5aa892ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5aa892ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5aa892ed

Branch: refs/heads/trunk
Commit: 5aa892ed486d42ae6b94c4866b92cd2b382ea640
Parents: 6fdef76
Author: Colin Patrick Mccabe 
Authored: Fri Mar 13 18:29:49 2015 -0700
Committer: Colin Patrick Mccabe 
Committed: Fri Mar 13 18:29:49 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../apache/hadoop/hdfs/BlockReaderFactory.java  | 23 -
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  2 +
 .../datatransfer/DataTransferProtocol.java  |  5 +-
 .../hdfs/protocol/datatransfer/Receiver.java|  2 +-
 .../hdfs/protocol/datatransfer/Sender.java  |  4 +-
 .../hdfs/server/datanode/DataXceiver.java   | 95 
 .../server/datanode/ShortCircuitRegistry.java   | 13 ++-
 .../src/main/proto/datatransfer.proto   | 11 +++
 .../shortcircuit/TestShortCircuitCache.java | 63 +
 10 files changed, 178 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aa892ed/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c3f9367..ff00b0c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1177,6 +1177,9 @@ Release 2.7.0 - UNRELEASED
   HDFS-7722. DataNode#checkDiskError should also remove Storage when error
   is found. (Lei Xu via Colin P. McCabe)
 
+  HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and
+  fail to tell the DFSClient about it because of a network error (cmccabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aa892ed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index ba48c79..1e915b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.USE_RECEIPT_VERIFICATION;
+
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
@@ -69,6 +71,12 @@ import com.google.common.base.Preconditions;
 public class BlockReaderFactory implements ShortCircuitReplicaCreator {
   static final Log LOG = LogFactory.getLog(BlockReaderFactory.class);
 
+  public static class FailureInjector {
+public void injectRequestFileDescriptorsFailure() throws IOException {
+  // do nothing
+}
+  }
+
   @VisibleForTesting
   static ShortCircuitReplicaCreator
   createShortCircuitReplicaInfoCallback = null;
@@ -76,6 +84,11 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   private final DFSClient.Conf conf;
 
   /**
+   * Injects failures into specific operations during unit tests.
+   */
+  private final FailureInjector failureInjector;
+
+  /**
* The file name, for logging and debugging purposes.
*/
   private String fileName;
@@ -169,6 +182,7 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 
   public BlockReaderFactory(DFSClient.Conf conf) {
 this.conf = conf;
+this.failureInjector = conf.brfFailureInjector;
 this.remainingCacheTries = conf.nCachedConnRetry;
   }
 
@@ -518,11 +532,12 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 final DataOutputStream out =
 new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
 SlotId slotId = slot == null ? null : slot.getSlotId();
-new Sender(out).requestShortCircuitFds(block, token, slotId, 1);
+new Sender(out).requestShortCircuitFds(block, token, slotId, 1, true);
 DataInputStream in = new DataInputStream(peer.getInputStream());
 BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
 PBHelper.vintPrefixed(in));
 D

hadoop git commit: YARN-2854. Updated the documentation of the timeline service and the generic history service. Contributed by Naganarasimha G R.

2015-03-13 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/trunk dfd320170 -> 6fdef76cc


YARN-2854. Updated the documentation of the timeline service and the generic 
history service. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fdef76c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fdef76c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fdef76c

Branch: refs/heads/trunk
Commit: 6fdef76cc3e818856ddcc4d385c2899a8e6ba916
Parents: dfd3201
Author: Zhijie Shen 
Authored: Fri Mar 13 13:58:42 2015 -0700
Committer: Zhijie Shen 
Committed: Fri Mar 13 14:00:09 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../src/site/markdown/TimelineServer.md | 318 ++-
 .../resources/images/timeline_structure.jpg | Bin 0 -> 23070 bytes
 3 files changed, 165 insertions(+), 156 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fdef76c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 94f992d..77f8819 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -387,6 +387,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3187. Documentation of Capacity Scheduler Queue mapping based on user
 or group. (Gururaj Shetty via jianhe)
 
+YARN-2854. Updated the documentation of the timeline service and the 
generic
+history service. (Naganarasimha G R via zjshen)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fdef76c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index 4889936..31fe4ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -16,144 +16,122 @@ YARN Timeline Server
 
 
 * [Overview](#Overview)
-* [Current Status](#Current_Status)
-* [Basic Configuration](#Basic_Configuration)
-* [Advanced Configuration](#Advanced_Configuration)
-* [Generic-data related Configuration](#Generic-data_related_Configuration)
-* [Per-framework-date related 
Configuration](#Per-framework-date_related_Configuration)
-* [Running Timeline server](#Running_Timeline_server)
-* [Accessing generic-data via 
command-line](#Accessing_generic-data_via_command-line)
-* [Publishing of per-framework data by 
applications](#Publishing_of_per-framework_data_by_applications)
+* [Introduction](#Introduction)
+* [Current Status](#Current_Status)
+* [Timeline Structure](#Timeline_Structure)
+* [Deployment](#Deployment)
+* [Configurations](#Configurations)
+* [Running Timeline server](#Running_Timeline_server)
+* [Accessing generic-data via 
command-line](#Accessing_generic-data_via_command-line)
+* [Publishing of application specific 
data](#Publishing_of_application_specific_data)
 
 Overview
-
+-
 
-Storage and retrieval of applications' current as well as historic information 
in a generic fashion is solved in YARN through the Timeline Server (previously 
also called Generic Application History Server). This serves two 
responsibilities:
+### Introduction  
 
-* Generic information about completed applications
-
-Generic information includes application level data like queue-name, user 
information etc in the ApplicationSubmissionContext, list of 
application-attempts that ran for an application, information about each 
application-attempt, list of containers run under each application-attempt, and 
information about each container. Generic data is stored by ResourceManager to 
a history-store (default implementation on a file-system) and used by the 
web-UI to display information about completed applications.
+ Storage and retrieval of application's current as well as historic 
information in a generic fashion is solved in YARN through the Timeline Server. 
This serves two responsibilities:
 
-* Per-framework information of running and completed applications
-
-Per-framework information is completely specific to an application or 
framework. For example, Hadoop MapReduce framework can include pieces of 
information like number of map tasks, reduce tasks, counters etc. Application 
developers can publish the specific information to the Timeline server via 
Timel

hadoop git commit: HDFS-2605. Remove redundant "Release 0.21.1" section from CHANGES.txt. Contributed by Allen Wittenauer.

2015-03-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 f6bdcd938 -> 41530b417


HDFS-2605. Remove redundant "Release 0.21.1" section from CHANGES.txt. 
Contributed by Allen Wittenauer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41530b41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41530b41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41530b41

Branch: refs/heads/branch-2.7
Commit: 41530b4173df07a861b66dfda1a88cf411a0046a
Parents: f6bdcd9
Author: Konstantin V Shvachko 
Authored: Fri Mar 13 13:45:13 2015 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Mar 13 13:45:47 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41530b41/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c08fc74..09b5e63 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -429,6 +429,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7435. PB encoding of block reports is very inefficient.
 (Daryn Sharp via kihwal)
 
+HDFS-2605. Remove redundant "Release 0.21.1" section from CHANGES.txt.
+(Allen Wittenauer via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
@@ -10124,8 +10127,6 @@ Release 0.22.0 - 2011-11-29
 HDFS-2514. Link resolution bug for intermediate symlinks with
 relative targets. (eli)
 
-Release 0.21.1 - Unreleased
-
 HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 
 HDFS-874. TestHDFSFileContextMainOperations fails on weirdly 



hadoop git commit: HDFS-2605. Remove redundant "Release 0.21.1" section from CHANGES.txt. Contributed by Allen Wittenauer.

2015-03-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b199081f5 -> 8afbfed5d


HDFS-2605. Remove redundant "Release 0.21.1" section from CHANGES.txt. 
Contributed by Allen Wittenauer.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8afbfed5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8afbfed5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8afbfed5

Branch: refs/heads/branch-2
Commit: 8afbfed5d3214d3bf39baaa30dbf54033fdf7f86
Parents: b199081
Author: Konstantin V Shvachko 
Authored: Fri Mar 13 13:45:13 2015 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Mar 13 13:45:13 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8afbfed5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e3b3af7..40b538c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -441,6 +441,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7435. PB encoding of block reports is very inefficient.
 (Daryn Sharp via kihwal)
 
+HDFS-2605. Remove redundant "Release 0.21.1" section from CHANGES.txt.
+(Allen Wittenauer via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
@@ -10139,8 +10142,6 @@ Release 0.22.0 - 2011-11-29
 HDFS-2514. Link resolution bug for intermediate symlinks with
 relative targets. (eli)
 
-Release 0.21.1 - Unreleased
-
 HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 
 HDFS-874. TestHDFSFileContextMainOperations fails on weirdly 



hadoop git commit: HDFS-2605. Remove redundant "Release 0.21.1" section from CHANGES.txt. Contributed by Allen Wittenauer.

2015-03-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6acb7f211 -> dfd320170


HDFS-2605. Remove redundant "Release 0.21.1" section from CHANGES.txt. 
Contributed by Allen Wittenauer.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfd32017
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfd32017
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfd32017

Branch: refs/heads/trunk
Commit: dfd32017001e6902829671dc8cc68afbca61e940
Parents: 6acb7f2
Author: Konstantin V Shvachko 
Authored: Fri Mar 13 13:32:45 2015 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Mar 13 13:32:45 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfd32017/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a149f18..c3f9367 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -746,6 +746,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7435. PB encoding of block reports is very inefficient.
 (Daryn Sharp via kihwal)
 
+HDFS-2605. Remove redundant "Release 0.21.1" section from CHANGES.txt.
+(Allen Wittenauer via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
@@ -10299,8 +10302,6 @@ Release 0.22.0 - 2011-11-29
 
 HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
 
-Release 0.21.1 - Unreleased
-
 HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 
 HDFS-874. TestHDFSFileContextMainOperations fails on weirdly 



hadoop git commit: HDFS-7903. Cannot recover block after truncate and delete snapshot. Contributed by Plamen Jeliazkov.

2015-03-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 116a7f1a1 -> f6bdcd938


HDFS-7903. Cannot recover block after truncate and delete snapshot. Contributed 
by Plamen Jeliazkov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6bdcd93
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6bdcd93
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6bdcd93

Branch: refs/heads/branch-2.7
Commit: f6bdcd938edb32ed41cad64ca54b3d80589257f5
Parents: 116a7f1
Author: Konstantin V Shvachko 
Authored: Fri Mar 13 12:39:01 2015 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Mar 13 12:45:47 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/namenode/snapshot/FileDiffList.java  | 19 +++--
 .../hdfs/server/namenode/TestFileTruncate.java  | 30 
 3 files changed, 49 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6bdcd93/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 31f597f..c08fc74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -833,6 +833,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not 
 idempotent (Tsz Wo Nicholas Sze via brandonli)
 
+HDFS-7903. Cannot recover block after truncate and delete snapshot.
+(Plamen Jeliazkov via shv)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6bdcd93/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index 0c94554..5c9e121 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -125,9 +128,19 @@ public class FileDiffList extends
 continue;
   break;
 }
-// Collect the remaining blocks of the file
-while(i < removedBlocks.length) {
-  collectedBlocks.addDeleteBlock(removedBlocks[i++]);
+// Check if last block is part of truncate recovery
+BlockInfoContiguous lastBlock = file.getLastBlock();
+Block dontRemoveBlock = null;
+if(lastBlock != null && lastBlock.getBlockUCState().equals(
+HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
+  dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock)
+  .getTruncateBlock();
+}
+// Collect the remaining blocks of the file, ignoring truncate block
+for(;i < removedBlocks.length; i++) {
+  if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) 
{
+collectedBlocks.addDeleteBlock(removedBlocks[i]);
+  }
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6bdcd93/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 260d8bb..3b6e107 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -1

hadoop git commit: HDFS-7903. Cannot recover block after truncate and delete snapshot. Contributed by Plamen Jeliazkov.

2015-03-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 464271a5e -> b199081f5


HDFS-7903. Cannot recover block after truncate and delete snapshot. Contributed 
by Plamen Jeliazkov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b199081f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b199081f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b199081f

Branch: refs/heads/branch-2
Commit: b199081f54b35d2f129723894f95709d23899c86
Parents: 464271a
Author: Konstantin V Shvachko 
Authored: Fri Mar 13 12:39:01 2015 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Mar 13 12:40:38 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/namenode/snapshot/FileDiffList.java  | 19 +++--
 .../hdfs/server/namenode/TestFileTruncate.java  | 30 
 3 files changed, 49 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b199081f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 99a278f..e3b3af7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -845,6 +845,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not 
 idempotent (Tsz Wo Nicholas Sze via brandonli)
 
+HDFS-7903. Cannot recover block after truncate and delete snapshot.
+(Plamen Jeliazkov via shv)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b199081f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index 0c94554..5c9e121 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -125,9 +128,19 @@ public class FileDiffList extends
 continue;
   break;
 }
-// Collect the remaining blocks of the file
-while(i < removedBlocks.length) {
-  collectedBlocks.addDeleteBlock(removedBlocks[i++]);
+// Check if last block is part of truncate recovery
+BlockInfoContiguous lastBlock = file.getLastBlock();
+Block dontRemoveBlock = null;
+if(lastBlock != null && lastBlock.getBlockUCState().equals(
+HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
+  dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock)
+  .getTruncateBlock();
+}
+// Collect the remaining blocks of the file, ignoring truncate block
+for(;i < removedBlocks.length; i++) {
+  if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) 
{
+collectedBlocks.addDeleteBlock(removedBlocks[i]);
+  }
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b199081f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 260d8bb..3b6e107 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -178,6

hadoop git commit: HDFS-7903. Cannot recover block after truncate and delete snapshot. Contributed by Plamen Jeliazkov.

2015-03-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk d324164a5 -> 6acb7f211


HDFS-7903. Cannot recover block after truncate and delete snapshot. Contributed 
by Plamen Jeliazkov.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6acb7f21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6acb7f21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6acb7f21

Branch: refs/heads/trunk
Commit: 6acb7f2110897264241df44d564db2f85260348f
Parents: d324164
Author: Konstantin V Shvachko 
Authored: Fri Mar 13 12:39:01 2015 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Mar 13 13:12:51 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/namenode/snapshot/FileDiffList.java  | 19 +++--
 .../hdfs/server/namenode/TestFileTruncate.java  | 30 
 3 files changed, 49 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6acb7f21/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ac7e096..a149f18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1148,6 +1148,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not 
 idempotent (Tsz Wo Nicholas Sze via brandonli)
 
+HDFS-7903. Cannot recover block after truncate and delete snapshot.
+(Plamen Jeliazkov via shv)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6acb7f21/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index 0c94554..5c9e121 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -125,9 +128,19 @@ public class FileDiffList extends
 continue;
   break;
 }
-// Collect the remaining blocks of the file
-while(i < removedBlocks.length) {
-  collectedBlocks.addDeleteBlock(removedBlocks[i++]);
+// Check if last block is part of truncate recovery
+BlockInfoContiguous lastBlock = file.getLastBlock();
+Block dontRemoveBlock = null;
+if(lastBlock != null && lastBlock.getBlockUCState().equals(
+HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
+  dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock)
+  .getTruncateBlock();
+}
+// Collect the remaining blocks of the file, ignoring truncate block
+for(;i < removedBlocks.length; i++) {
+  if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) 
{
+collectedBlocks.addDeleteBlock(removedBlocks[i]);
+  }
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6acb7f21/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 260d8bb..3b6e107 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -178,6 +178

hadoop git commit: HDFS-7435. PB encoding of block reports is very inefficient. Contributed by Daryn Sharp. (cherry picked from commit d324164a51a43d72c02567248bd9f0f12b244a40)

2015-03-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 cdeb1079e -> 116a7f1a1


HDFS-7435. PB encoding of block reports is very inefficient. Contributed by 
Daryn Sharp.
(cherry picked from commit d324164a51a43d72c02567248bd9f0f12b244a40)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
(cherry picked from commit 464271a5ede6d05bc7a68ce3f86f84dc72ec1edd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/116a7f1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/116a7f1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/116a7f1a

Branch: refs/heads/branch-2.7
Commit: 116a7f1a16771458adf702bf15d7a4706805839c
Parents: cdeb107
Author: Kihwal Lee 
Authored: Fri Mar 13 14:42:02 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 13 14:42:02 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  | 660 +++
 .../DatanodeProtocolClientSideTranslatorPB.java |  22 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |  14 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +-
 .../server/blockmanagement/BlockManager.java|  16 +-
 .../hdfs/server/datanode/BPServiceActor.java|  13 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  22 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/protocol/DatanodeRegistration.java   |   9 +
 .../hdfs/server/protocol/NamespaceInfo.java |  52 ++
 .../server/protocol/StorageBlockReport.java |   8 +-
 .../src/main/proto/DatanodeProtocol.proto   |   2 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |   1 +
 .../hdfs/protocol/TestBlockListAsLongs.java | 237 +++
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../server/datanode/BlockReportTestBase.java|  27 +-
 .../server/datanode/SimulatedFSDataset.java |  11 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   9 +-
 .../datanode/TestDataNodeVolumeFailure.java |   4 +-
 ...TestDnRespectsBlockReportSplitThreshold.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   2 +-
 .../server/namenode/NNThroughputBenchmark.java  |  23 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   3 +-
 .../hdfs/server/namenode/TestFSImage.java   |   2 +
 .../TestOfflineEditsViewer.java |   9 +-
 26 files changed, 812 insertions(+), 355 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/116a7f1a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6a8e098..31f597f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -426,6 +426,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7491. Add incremental blockreport latency to DN metrics.
 (Ming Ma via cnauroth)
 
+HDFS-7435. PB encoding of block reports is very inefficient.
+(Daryn Sharp via kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/116a7f1a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 4389714..1c89ee4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -17,342 +17,458 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Random;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
 
-/**
- * This class provides an interface for accessing list

hadoop git commit: HDFS-7435. PB encoding of block reports is very inefficient. Contributed by Daryn Sharp. (cherry picked from commit d324164a51a43d72c02567248bd9f0f12b244a40)

2015-03-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 af80a98ac -> 464271a5e


HDFS-7435. PB encoding of block reports is very inefficient. Contributed by 
Daryn Sharp.
(cherry picked from commit d324164a51a43d72c02567248bd9f0f12b244a40)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/464271a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/464271a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/464271a5

Branch: refs/heads/branch-2
Commit: 464271a5ede6d05bc7a68ce3f86f84dc72ec1edd
Parents: af80a98
Author: Kihwal Lee 
Authored: Fri Mar 13 14:36:34 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 13 14:36:34 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  | 660 +++
 .../DatanodeProtocolClientSideTranslatorPB.java |  22 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |  14 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +-
 .../server/blockmanagement/BlockManager.java|  16 +-
 .../hdfs/server/datanode/BPServiceActor.java|  13 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  22 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/protocol/DatanodeRegistration.java   |   9 +
 .../hdfs/server/protocol/NamespaceInfo.java |  52 ++
 .../server/protocol/StorageBlockReport.java |   8 +-
 .../src/main/proto/DatanodeProtocol.proto   |   2 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |   1 +
 .../hdfs/protocol/TestBlockListAsLongs.java | 237 +++
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../server/datanode/BlockReportTestBase.java|  27 +-
 .../server/datanode/SimulatedFSDataset.java |  11 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   9 +-
 .../datanode/TestDataNodeVolumeFailure.java |   4 +-
 ...TestDnRespectsBlockReportSplitThreshold.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   2 +-
 .../server/namenode/NNThroughputBenchmark.java  |  23 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   3 +-
 .../hdfs/server/namenode/TestFSImage.java   |   2 +
 .../TestOfflineEditsViewer.java |   9 +-
 26 files changed, 812 insertions(+), 355 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/464271a5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d10dd29..99a278f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -438,6 +438,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7491. Add incremental blockreport latency to DN metrics.
 (Ming Ma via cnauroth)
 
+HDFS-7435. PB encoding of block reports is very inefficient.
+(Daryn Sharp via kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/464271a5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 4389714..1c89ee4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -17,342 +17,458 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Random;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
 
-/**
- * This class provides an interface for accessing list of blocks that
- * has been implemented as long[].
- * This class is use

hadoop git commit: HDFS-7435. PB encoding of block reports is very inefficient. Contributed by Daryn Sharp.

2015-03-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk f446669af -> d324164a5


HDFS-7435. PB encoding of block reports is very inefficient. Contributed by 
Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d324164a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d324164a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d324164a

Branch: refs/heads/trunk
Commit: d324164a51a43d72c02567248bd9f0f12b244a40
Parents: f446669
Author: Kihwal Lee 
Authored: Fri Mar 13 14:13:55 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 13 14:23:37 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  | 660 +++
 .../DatanodeProtocolClientSideTranslatorPB.java |  22 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |  14 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +-
 .../server/blockmanagement/BlockManager.java|  16 +-
 .../hdfs/server/datanode/BPServiceActor.java|  13 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  20 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/protocol/DatanodeRegistration.java   |   9 +
 .../hdfs/server/protocol/NamespaceInfo.java |  52 ++
 .../server/protocol/StorageBlockReport.java |   8 +-
 .../src/main/proto/DatanodeProtocol.proto   |   2 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |   1 +
 .../hdfs/protocol/TestBlockListAsLongs.java | 237 +++
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../server/datanode/BlockReportTestBase.java|  27 +-
 .../server/datanode/SimulatedFSDataset.java |  11 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   9 +-
 .../datanode/TestDataNodeVolumeFailure.java |   4 +-
 ...TestDnRespectsBlockReportSplitThreshold.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   2 +-
 .../server/namenode/NNThroughputBenchmark.java  |  23 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   3 +-
 .../hdfs/server/namenode/TestFSImage.java   |   2 +
 .../TestOfflineEditsViewer.java |   9 +-
 26 files changed, 811 insertions(+), 354 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d324164a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 909182b..ac7e096 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -743,6 +743,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7491. Add incremental blockreport latency to DN metrics.
 (Ming Ma via cnauroth)
 
+HDFS-7435. PB encoding of block reports is very inefficient.
+(Daryn Sharp via kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d324164a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 4389714..1c89ee4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -17,342 +17,458 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Random;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
 
-/**
- * This class provides an interface for accessing list of blocks that
- * has been implemented as long[].
- * This class is useful for block report. Rather than send block reports
- * as a Block[] we can send it as a long[].
- *
- * The structure of the array is as follows:
- * 0: the length of the finalized replica list;
- * 1: the length 

hadoop git commit: HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not idempotent. Contributed by Tsz Wo Nicholas Sze

2015-03-13 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 e4d8dddb4 -> cdeb1079e


HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not 
idempotent. Contributed by Tsz Wo Nicholas Sze

(cherry picked from commit f446669afb5c3d31a00c65449f27088b39e11ae3)
(cherry picked from commit af80a98ace50934284dde417efc802bb094c8b4e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdeb1079
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdeb1079
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdeb1079

Branch: refs/heads/branch-2.7
Commit: cdeb1079ea93bba893ec6c79404551a009237874
Parents: e4d8ddd
Author: Brandon Li 
Authored: Fri Mar 13 10:42:22 2015 -0700
Committer: Brandon Li 
Committed: Fri Mar 13 10:47:25 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../BlockInfoContiguousUnderConstruction.java|  1 +
 .../hadoop/hdfs/server/namenode/FSNamesystem.java| 15 +++
 .../hdfs/server/namenode/TestFileTruncate.java   |  2 ++
 4 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdeb1079/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4867fef..6a8e098 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -827,6 +827,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-6833.  DirectoryScanner should not register a deleting block with
 memory of DataNode.  (Shinichi Yamashita via szetszwo)
 
+HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not 
+idempotent (Tsz Wo Nicholas Sze via brandonli)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdeb1079/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
index 91b76cc..ae809a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
@@ -383,6 +383,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 
   private void appendUCParts(StringBuilder sb) {
 sb.append("{UCState=").append(blockUCState)
+  .append(", truncateBlock=" + truncateBlock)
   .append(", primaryNodeIndex=").append(primaryNodeIndex)
   .append(", replicas=[");
 if (replicas != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdeb1079/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 81d2b88..d490885 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2021,6 +2021,21 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   throw new UnsupportedOperationException(
   "Cannot truncate lazy persist file " + src);
 }
+
+// Check if the file is already being truncated with the same length
+final BlockInfoContiguous last = file.getLastBlock();
+if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) 
{
+  final Block truncateBlock
+  = ((BlockInfoContiguousUnderConstruction)last).getTruncateBlock();
+  if (truncateBlock != null) {
+final long truncateLength = file.computeFileSize(false, false)
++ truncateBlock.getNumBytes();
+if (newLength == truncateLength) {
+  return false;
+}
+  }
+}
+
 // Opening an existing file for truncate. May need lease recovery.
 recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE,
 ii

hadoop git commit: HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not idempotent. Contributed by Tsz Wo Nicholas Sze

2015-03-13 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 44aedad5d -> af80a98ac


HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not 
idempotent. Contributed by Tsz Wo Nicholas Sze

(cherry picked from commit f446669afb5c3d31a00c65449f27088b39e11ae3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af80a98a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af80a98a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af80a98a

Branch: refs/heads/branch-2
Commit: af80a98ace50934284dde417efc802bb094c8b4e
Parents: 44aedad
Author: Brandon Li 
Authored: Fri Mar 13 10:42:22 2015 -0700
Committer: Brandon Li 
Committed: Fri Mar 13 10:43:46 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../BlockInfoContiguousUnderConstruction.java|  1 +
 .../hadoop/hdfs/server/namenode/FSNamesystem.java| 15 +++
 .../hdfs/server/namenode/TestFileTruncate.java   |  2 ++
 4 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af80a98a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 346050c..d10dd29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -839,6 +839,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-6833.  DirectoryScanner should not register a deleting block with
 memory of DataNode.  (Shinichi Yamashita via szetszwo)
 
+HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not 
+idempotent (Tsz Wo Nicholas Sze via brandonli)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af80a98a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
index 91b76cc..ae809a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
@@ -383,6 +383,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 
   private void appendUCParts(StringBuilder sb) {
 sb.append("{UCState=").append(blockUCState)
+  .append(", truncateBlock=" + truncateBlock)
   .append(", primaryNodeIndex=").append(primaryNodeIndex)
   .append(", replicas=[");
 if (replicas != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af80a98a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 81d2b88..d490885 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2021,6 +2021,21 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   throw new UnsupportedOperationException(
   "Cannot truncate lazy persist file " + src);
 }
+
+// Check if the file is already being truncated with the same length
+final BlockInfoContiguous last = file.getLastBlock();
+if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) 
{
+  final Block truncateBlock
+  = ((BlockInfoContiguousUnderConstruction)last).getTruncateBlock();
+  if (truncateBlock != null) {
+final long truncateLength = file.computeFileSize(false, false)
++ truncateBlock.getNumBytes();
+if (newLength == truncateLength) {
+  return false;
+}
+  }
+}
+
 // Opening an existing file for truncate. May need lease recovery.
 recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE,
 iip, src, clientName, clientMachine, false);

http://git-wip-us.apache.org/

hadoop git commit: HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not idempotent. Contributed by Tsz Wo Nicholas Sze

2015-03-13 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8180e676a -> f446669af


HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not 
idempotent. Contributed by Tsz Wo Nicholas Sze


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f446669a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f446669a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f446669a

Branch: refs/heads/trunk
Commit: f446669afb5c3d31a00c65449f27088b39e11ae3
Parents: 8180e67
Author: Brandon Li 
Authored: Fri Mar 13 10:42:22 2015 -0700
Committer: Brandon Li 
Committed: Fri Mar 13 10:42:22 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../BlockInfoContiguousUnderConstruction.java|  1 +
 .../hadoop/hdfs/server/namenode/FSNamesystem.java| 15 +++
 .../hdfs/server/namenode/TestFileTruncate.java   |  2 ++
 4 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f446669a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 153453c..909182b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1142,6 +1142,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-6833.  DirectoryScanner should not register a deleting block with
 memory of DataNode.  (Shinichi Yamashita via szetszwo)
 
+HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not 
+idempotent (Tsz Wo Nicholas Sze via brandonli)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f446669a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
index 91b76cc..ae809a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
@@ -383,6 +383,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 
   private void appendUCParts(StringBuilder sb) {
 sb.append("{UCState=").append(blockUCState)
+  .append(", truncateBlock=" + truncateBlock)
   .append(", primaryNodeIndex=").append(primaryNodeIndex)
   .append(", replicas=[");
 if (replicas != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f446669a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 77b4a27..b384ce6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1966,6 +1966,21 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   throw new UnsupportedOperationException(
   "Cannot truncate lazy persist file " + src);
 }
+
+// Check if the file is already being truncated with the same length
+final BlockInfoContiguous last = file.getLastBlock();
+if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) 
{
+  final Block truncateBlock
+  = ((BlockInfoContiguousUnderConstruction)last).getTruncateBlock();
+  if (truncateBlock != null) {
+final long truncateLength = file.computeFileSize(false, false)
++ truncateBlock.getNumBytes();
+if (newLength == truncateLength) {
+  return false;
+}
+  }
+}
+
 // Opening an existing file for truncate. May need lease recovery.
 recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE,
 iip, src, clientName, clientMachine, false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f446669a/hadoop-hdfs-project/hadoop-hdfs/src/test/ja

hadoop git commit: YARN-3267. Timelineserver applies the ACL rules after applying the limit on the number of records (Chang Li via jeagles)

2015-03-13 Thread jeagles
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 0cd9eb998 -> e4d8dddb4


YARN-3267. Timelineserver applies the ACL rules after applying the limit on the 
number of records (Chang Li via jeagles)

(cherry picked from commit 8180e676abb2bb500a48b3a0c0809d2a807ab235)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4d8dddb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4d8dddb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4d8dddb

Branch: refs/heads/branch-2.7
Commit: e4d8dddb49eb3d5649cc917fc6fc5dce9c669fa0
Parents: 0cd9eb9
Author: Jonathan Eagles 
Authored: Fri Mar 13 12:04:30 2015 -0500
Committer: Jonathan Eagles 
Committed: Fri Mar 13 12:12:40 2015 -0500

--
 .../jobhistory/TestJobHistoryEventHandler.java  | 14 +++---
 .../mapred/TestMRTimelineEventHandling.java | 12 ++---
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../distributedshell/TestDistributedShell.java  |  4 +-
 .../server/timeline/LeveldbTimelineStore.java   | 18 +--
 .../server/timeline/MemoryTimelineStore.java| 12 -
 .../server/timeline/TimelineDataManager.java| 50 +++-
 .../yarn/server/timeline/TimelineReader.java|  3 +-
 .../timeline/TestLeveldbTimelineStore.java  | 16 +++
 .../timeline/TestTimelineDataManager.java   | 26 +-
 .../server/timeline/TimelineStoreTestUtils.java | 33 +
 11 files changed, 126 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4d8dddb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index de35d84..43e3dbe 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -464,7 +464,7 @@ public class TestJobHistoryEventHandler {
   t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000),
   currentTime - 10));
   TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null,
-  null, null, null, null, null, null);
+  null, null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   TimelineEntity tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -480,7 +480,7 @@ public class TestJobHistoryEventHandler {
   new HashMap(), "default"),
   currentTime + 10));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -498,7 +498,7 @@ public class TestJobHistoryEventHandler {
   new JobQueueChangeEvent(TypeConverter.fromYarn(t.jobId), "q2"),
   currentTime - 20));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -520,7 +520,7 @@ public class TestJobHistoryEventHandler {
   new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0,
   0, new Counters(), new Counters(), new Counters()), 
currentTime));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -546,7 +546,7 @@ public class TestJobHistoryEventHandler {
 new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId),
 0, 0, 0, JobStateInternal.KILLED.toString())

hadoop git commit: YARN-3267. Timelineserver applies the ACL rules after applying the limit on the number of records (Chang Li via jeagles)

2015-03-13 Thread jeagles
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 146abadb9 -> 44aedad5d


YARN-3267. Timelineserver applies the ACL rules after applying the limit on the 
number of records (Chang Li via jeagles)

(cherry picked from commit 8180e676abb2bb500a48b3a0c0809d2a807ab235)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44aedad5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44aedad5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44aedad5

Branch: refs/heads/branch-2
Commit: 44aedad5ddc8069a6dba3eaf66ed54d612b21208
Parents: 146abad
Author: Jonathan Eagles 
Authored: Fri Mar 13 12:04:30 2015 -0500
Committer: Jonathan Eagles 
Committed: Fri Mar 13 12:05:21 2015 -0500

--
 .../jobhistory/TestJobHistoryEventHandler.java  | 14 +++---
 .../mapred/TestMRTimelineEventHandling.java | 12 ++---
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../distributedshell/TestDistributedShell.java  |  4 +-
 .../server/timeline/LeveldbTimelineStore.java   | 18 +--
 .../server/timeline/MemoryTimelineStore.java| 12 -
 .../server/timeline/TimelineDataManager.java| 50 +++-
 .../yarn/server/timeline/TimelineReader.java|  3 +-
 .../timeline/TestLeveldbTimelineStore.java  | 16 +++
 .../timeline/TestTimelineDataManager.java   | 26 +-
 .../server/timeline/TimelineStoreTestUtils.java | 33 +
 11 files changed, 126 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44aedad5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index de35d84..43e3dbe 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -464,7 +464,7 @@ public class TestJobHistoryEventHandler {
   t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000),
   currentTime - 10));
   TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null,
-  null, null, null, null, null, null);
+  null, null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   TimelineEntity tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -480,7 +480,7 @@ public class TestJobHistoryEventHandler {
   new HashMap(), "default"),
   currentTime + 10));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -498,7 +498,7 @@ public class TestJobHistoryEventHandler {
   new JobQueueChangeEvent(TypeConverter.fromYarn(t.jobId), "q2"),
   currentTime - 20));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -520,7 +520,7 @@ public class TestJobHistoryEventHandler {
   new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0,
   0, new Counters(), new Counters(), new Counters()), 
currentTime));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -546,7 +546,7 @@ public class TestJobHistoryEventHandler {
 new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId),
 0, 0, 0, JobStateInternal.KILLED.toString()), cu

hadoop git commit: YARN-3267. Timelineserver applies the ACL rules after applying the limit on the number of records (Chang Li via jeagles)

2015-03-13 Thread jeagles
Repository: hadoop
Updated Branches:
  refs/heads/trunk 387f271c8 -> 8180e676a


YARN-3267. Timelineserver applies the ACL rules after applying the limit on the 
number of records (Chang Li via jeagles)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8180e676
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8180e676
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8180e676

Branch: refs/heads/trunk
Commit: 8180e676abb2bb500a48b3a0c0809d2a807ab235
Parents: 387f271
Author: Jonathan Eagles 
Authored: Fri Mar 13 12:04:30 2015 -0500
Committer: Jonathan Eagles 
Committed: Fri Mar 13 12:04:30 2015 -0500

--
 .../jobhistory/TestJobHistoryEventHandler.java  | 14 +++---
 .../mapred/TestMRTimelineEventHandling.java | 12 ++---
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../distributedshell/TestDistributedShell.java  |  4 +-
 .../server/timeline/LeveldbTimelineStore.java   | 18 +--
 .../server/timeline/MemoryTimelineStore.java| 12 -
 .../server/timeline/TimelineDataManager.java| 50 +++-
 .../yarn/server/timeline/TimelineReader.java|  3 +-
 .../timeline/TestLeveldbTimelineStore.java  | 16 +++
 .../timeline/TestTimelineDataManager.java   | 26 +-
 .../server/timeline/TimelineStoreTestUtils.java | 33 +
 11 files changed, 126 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8180e676/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index de35d84..43e3dbe 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -464,7 +464,7 @@ public class TestJobHistoryEventHandler {
   t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000),
   currentTime - 10));
   TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null,
-  null, null, null, null, null, null);
+  null, null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   TimelineEntity tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -480,7 +480,7 @@ public class TestJobHistoryEventHandler {
   new HashMap(), "default"),
   currentTime + 10));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -498,7 +498,7 @@ public class TestJobHistoryEventHandler {
   new JobQueueChangeEvent(TypeConverter.fromYarn(t.jobId), "q2"),
   currentTime - 20));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -520,7 +520,7 @@ public class TestJobHistoryEventHandler {
   new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0,
   0, new Counters(), new Counters(), new Counters()), 
currentTime));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -546,7 +546,7 @@ public class TestJobHistoryEventHandler {
 new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId),
 0, 0, 0, JobStateInternal.KILLED.toString()), currentTime + 20));
   entities = ts.getEntities("MAPREDUCE_JOB", null, nu