[1/3] hadoop git commit: HADOOP-12851. S3AFileSystem Uptake of ProviderUtils.excludeIncompatibleCredentialProviders. Contributed by Larry McCay.

2016-02-29 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c1855a33e -> e402371b6
  refs/heads/branch-2.8 475a277e6 -> f1236c5d7
  refs/heads/trunk 307ec80ac -> d251e5541


HADOOP-12851. S3AFileSystem Uptake of 
ProviderUtils.excludeIncompatibleCredentialProviders. Contributed by Larry 
McCay.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d251e554
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d251e554
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d251e554

Branch: refs/heads/trunk
Commit: d251e55415f1fab085159b9eb3b43214d100b6a8
Parents: 307ec80
Author: Chris Nauroth 
Authored: Mon Feb 29 20:03:42 2016 -0800
Committer: Chris Nauroth 
Committed: Mon Feb 29 21:59:52 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 ++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 47 +++-
 .../hadoop/fs/s3a/TestS3AConfiguration.java | 34 ++
 3 files changed, 63 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d251e554/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b84131b..65767f6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1772,6 +1772,10 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12813. Migrate TestRPC and related codes to rebase on
 ProtobufRpcEngine. (Kai Zheng via wheat9)
 
+HADOOP-12851. S3AFileSystem Uptake of
+ProviderUtils.excludeIncompatibleCredentialProviders.
+(Larry McCay via cnauroth)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d251e554/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index b9590ea..7ab6c79 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.util.Progressable;
 
 import static org.apache.hadoop.fs.s3a.Constants.*;
@@ -118,16 +119,16 @@ public class S3AFileSystem extends FileSystem {
 bucket = name.getHost();
 
 ClientConfiguration awsConf = new ClientConfiguration();
-awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, 
+awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS,
   DEFAULT_MAXIMUM_CONNECTIONS));
 boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS,
 DEFAULT_SECURE_CONNECTIONS);
 awsConf.setProtocol(secureConnections ?  Protocol.HTTPS : Protocol.HTTP);
-awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, 
+awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES,
   DEFAULT_MAX_ERROR_RETRIES));
 awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT,
 DEFAULT_ESTABLISH_TIMEOUT));
-awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, 
+awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT,
   DEFAULT_SOCKET_TIMEOUT));
 String signerOverride = conf.getTrimmed(SIGNING_ALGORITHM, "");
 if(!signerOverride.isEmpty()) {
@@ -263,9 +264,9 @@ public class S3AFileSystem extends FileSystem {
   }
 
   private void initMultipartUploads(Configuration conf) {
-boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART, 
+boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART,
   DEFAULT_PURGE_EXISTING_MULTIPART);
-long purgeExistingMultipartAge = 
conf.getLong(PURGE_EXISTING_MULTIPART_AGE, 
+long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE,
   DEFAULT_PURGE_EXISTING_MULTIPART_AGE);
 
 if (purgeExistingMultipart) {
@@ -297,9 +298,11 @@ public class S3AFileSystem extends FileSystem {
 accessKey = userInfo;
   }
 }
+Configuration c = ProviderUtils.excludeIncompatibleCredentialProviders(
+  conf, S3AFileSystem.class);
 if (accessKey == null) {
   try {
-final char[] key = conf.getPassword(ACCESS_KEY);
+final char[] key = c.getPassword(ACCESS_KEY);
 if (key != null) {
   accessKey = (new String(key)).trim();
   

[3/3] hadoop git commit: HADOOP-12851. S3AFileSystem Uptake of ProviderUtils.excludeIncompatibleCredentialProviders. Contributed by Larry McCay.

2016-02-29 Thread cnauroth
HADOOP-12851. S3AFileSystem Uptake of 
ProviderUtils.excludeIncompatibleCredentialProviders. Contributed by Larry 
McCay.

(cherry picked from commit d251e55415f1fab085159b9eb3b43214d100b6a8)
(cherry picked from commit e402371b6a0854a89e7bd573b04e43fed5e3e9cf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1236c5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1236c5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1236c5d

Branch: refs/heads/branch-2.8
Commit: f1236c5d7cdf20b0ba062e65ebb100064e689eab
Parents: 475a277
Author: Chris Nauroth 
Authored: Mon Feb 29 20:03:42 2016 -0800
Committer: Chris Nauroth 
Committed: Mon Feb 29 22:00:41 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 ++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 47 +++-
 .../hadoop/fs/s3a/TestS3AConfiguration.java | 34 ++
 3 files changed, 63 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1236c5d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 601a9ce..c38b9a3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1061,6 +1061,10 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12813. Migrate TestRPC and related codes to rebase on
 ProtobufRpcEngine. (Kai Zheng via wheat9)
 
+HADOOP-12851. S3AFileSystem Uptake of
+ProviderUtils.excludeIncompatibleCredentialProviders.
+(Larry McCay via cnauroth)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1236c5d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 295afae..4cda7cd 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.util.Progressable;
 
 import static org.apache.hadoop.fs.s3a.Constants.*;
@@ -170,16 +171,16 @@ public class S3AFileSystem extends FileSystem {
 bucket = name.getHost();
 
 ClientConfiguration awsConf = new ClientConfiguration();
-awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, 
+awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS,
   DEFAULT_MAXIMUM_CONNECTIONS));
 boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS,
 DEFAULT_SECURE_CONNECTIONS);
 awsConf.setProtocol(secureConnections ?  Protocol.HTTPS : Protocol.HTTP);
-awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, 
+awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES,
   DEFAULT_MAX_ERROR_RETRIES));
 awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT,
 DEFAULT_ESTABLISH_TIMEOUT));
-awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, 
+awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT,
   DEFAULT_SOCKET_TIMEOUT));
 String signerOverride = conf.getTrimmed(SIGNING_ALGORITHM, "");
 if(!signerOverride.isEmpty()) {
@@ -321,9 +322,9 @@ public class S3AFileSystem extends FileSystem {
   }
 
   private void initMultipartUploads(Configuration conf) {
-boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART, 
+boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART,
   DEFAULT_PURGE_EXISTING_MULTIPART);
-long purgeExistingMultipartAge = 
conf.getLong(PURGE_EXISTING_MULTIPART_AGE, 
+long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE,
   DEFAULT_PURGE_EXISTING_MULTIPART_AGE);
 
 if (purgeExistingMultipart) {
@@ -355,9 +356,11 @@ public class S3AFileSystem extends FileSystem {
 accessKey = userInfo;
   }
 }
+Configuration c = ProviderUtils.excludeIncompatibleCredentialProviders(
+  conf, S3AFileSystem.class);
 if (accessKey == null) {
   try {
-final char[] key = conf.getPassword(ACCESS_KEY);
+final char[] key = c.getPassword(ACCESS_KEY);
 if (key != null) {
   accessKey = (new String(key)).trim();
 }
@@ -367,7 +370,7 @@ p

[2/3] hadoop git commit: HADOOP-12851. S3AFileSystem Uptake of ProviderUtils.excludeIncompatibleCredentialProviders. Contributed by Larry McCay.

2016-02-29 Thread cnauroth
HADOOP-12851. S3AFileSystem Uptake of 
ProviderUtils.excludeIncompatibleCredentialProviders. Contributed by Larry 
McCay.

(cherry picked from commit d251e55415f1fab085159b9eb3b43214d100b6a8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e402371b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e402371b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e402371b

Branch: refs/heads/branch-2
Commit: e402371b6a0854a89e7bd573b04e43fed5e3e9cf
Parents: c1855a3
Author: Chris Nauroth 
Authored: Mon Feb 29 20:03:42 2016 -0800
Committer: Chris Nauroth 
Committed: Mon Feb 29 22:00:27 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 ++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 47 +++-
 .../hadoop/fs/s3a/TestS3AConfiguration.java | 34 ++
 3 files changed, 63 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e402371b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7c597f3..346a910 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1136,6 +1136,10 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12813. Migrate TestRPC and related codes to rebase on
 ProtobufRpcEngine. (Kai Zheng via wheat9)
 
+HADOOP-12851. S3AFileSystem Uptake of
+ProviderUtils.excludeIncompatibleCredentialProviders.
+(Larry McCay via cnauroth)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e402371b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 295afae..4cda7cd 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.util.Progressable;
 
 import static org.apache.hadoop.fs.s3a.Constants.*;
@@ -170,16 +171,16 @@ public class S3AFileSystem extends FileSystem {
 bucket = name.getHost();
 
 ClientConfiguration awsConf = new ClientConfiguration();
-awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, 
+awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS,
   DEFAULT_MAXIMUM_CONNECTIONS));
 boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS,
 DEFAULT_SECURE_CONNECTIONS);
 awsConf.setProtocol(secureConnections ?  Protocol.HTTPS : Protocol.HTTP);
-awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, 
+awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES,
   DEFAULT_MAX_ERROR_RETRIES));
 awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT,
 DEFAULT_ESTABLISH_TIMEOUT));
-awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, 
+awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT,
   DEFAULT_SOCKET_TIMEOUT));
 String signerOverride = conf.getTrimmed(SIGNING_ALGORITHM, "");
 if(!signerOverride.isEmpty()) {
@@ -321,9 +322,9 @@ public class S3AFileSystem extends FileSystem {
   }
 
   private void initMultipartUploads(Configuration conf) {
-boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART, 
+boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART,
   DEFAULT_PURGE_EXISTING_MULTIPART);
-long purgeExistingMultipartAge = 
conf.getLong(PURGE_EXISTING_MULTIPART_AGE, 
+long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE,
   DEFAULT_PURGE_EXISTING_MULTIPART_AGE);
 
 if (purgeExistingMultipart) {
@@ -355,9 +356,11 @@ public class S3AFileSystem extends FileSystem {
 accessKey = userInfo;
   }
 }
+Configuration c = ProviderUtils.excludeIncompatibleCredentialProviders(
+  conf, S3AFileSystem.class);
 if (accessKey == null) {
   try {
-final char[] key = conf.getPassword(ACCESS_KEY);
+final char[] key = c.getPassword(ACCESS_KEY);
 if (key != null) {
   accessKey = (new String(key)).trim();
 }
@@ -367,7 +370,7 @@ public class S3AFileSystem extends FileSystem {
 }
 if (secretKe

hadoop git commit: HDFS-9733. Refactor DFSClient#getFileChecksum and DataXceiver#blockChecksum. Contributed by Kai Zheng

2016-02-29 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 680f3fc02 -> 307ec80ac


HDFS-9733. Refactor DFSClient#getFileChecksum and DataXceiver#blockChecksum. 
Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/307ec80a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/307ec80a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/307ec80a

Branch: refs/heads/trunk
Commit: 307ec80acae3b4a41d21b2d4b3a55032e55fcdc6
Parents: 680f3fc
Author: Uma Maheswara Rao G 
Authored: Mon Feb 29 21:52:20 2016 -0800
Committer: Uma Maheswara Rao G 
Committed: Mon Feb 29 21:52:20 2016 -0800

--
 .../main/java/org/apache/hadoop/io/IOUtils.java |   4 +-
 .../main/java/org/apache/hadoop/io/MD5Hash.java |  11 +
 .../org/apache/hadoop/util/DataChecksum.java|  12 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 237 ++-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  49 +++
 .../apache/hadoop/hdfs/FileChecksumHelper.java  | 416 +++
 .../protocol/datatransfer/IOStreamPair.java |  11 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/datanode/BlockChecksumHelper.java| 254 +++
 .../hdfs/server/datanode/BlockSender.java   |   1 -
 .../hadoop/hdfs/server/datanode/DataNode.java   |  10 +
 .../hdfs/server/datanode/DataXceiver.java   | 162 +++-
 12 files changed, 846 insertions(+), 324 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/307ec80a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 451163c..2588bf1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -261,7 +261,9 @@ public class IOUtils {
* @param stream the Stream to close
*/
   public static void closeStream(java.io.Closeable stream) {
-cleanup(null, stream);
+if (stream != null) {
+  cleanup(null, stream);
+}
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/307ec80a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MD5Hash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MD5Hash.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MD5Hash.java
index 822e089..aaf3ea1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MD5Hash.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MD5Hash.java
@@ -128,6 +128,17 @@ public class MD5Hash implements 
WritableComparable {
 return new MD5Hash(digest);
   }
 
+  /** Construct a hash value for an array of byte array. */
+  public static MD5Hash digest(byte[][] dataArr, int start, int len) {
+byte[] digest;
+MessageDigest digester = getDigester();
+for (byte[] data : dataArr) {
+  digester.update(data, start, len);
+}
+digest = digester.digest();
+return new MD5Hash(digest);
+  }
+
   /** Construct a hash value for a String. */
   public static MD5Hash digest(String string) {
 return digest(UTF8.getBytes(string));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/307ec80a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
index faac587..e44b64d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
@@ -45,7 +45,7 @@ public class DataChecksum implements Checksum {
   public static final int CHECKSUM_MIXED   = 4;
  
   /** The checksum types */
-  public static enum Type {
+  public enum Type {
 NULL  (CHECKSUM_NULL, 0),
 CRC32 (CHECKSUM_CRC32, 4),
 CRC32C(CHECKSUM_CRC32C, 4),
@@ -55,7 +55,7 @@ public class DataChecksum implements Checksum {
 public final int id;
 public final int size;
 
-private Type(int id, int size) {
+Type(int id, int size) {
   this.id = id;
   this.size = size;
 }
@@ -230,17 +230,21 @@ public class DataChecksum implements Checksum {
   public Type getChecksu

hadoop git commit: HADOOP-10321. TestCompositeService should cover all enumerations of adding a service to a parent service. (Ray Chiang via kasha)

2016-02-29 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 589b53763 -> c1855a33e


HADOOP-10321. TestCompositeService should cover all enumerations of adding a 
service to a parent service. (Ray Chiang via kasha)

(cherry picked from commit 680f3fc02d0037d13c84a8b5c1a7e8729c0bcc94)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1855a33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1855a33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1855a33

Branch: refs/heads/branch-2
Commit: c1855a33ebdf9a9ba7630f025c33fcbbd7904f0c
Parents: 589b537
Author: Karthik Kambatla 
Authored: Mon Feb 29 18:59:59 2016 -0800
Committer: Karthik Kambatla 
Committed: Mon Feb 29 19:00:22 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop/service/TestCompositeService.java| 480 +--
 2 files changed, 445 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1855a33/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index eae216b..7c597f3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -45,6 +45,9 @@ Release 2.9.0 - UNRELEASED
 
 HADOOP-12841. Update s3-related properties in core-default.xml. (Wei-Chiu 
Chuang via lei)
 
+HADOOP-10321. TestCompositeService should cover all enumerations of 
+adding a service to a parent service. (Ray Chiang via kasha)
+
   BUG FIXES
 
 HADOOP-12605. Fix intermittent failure of TestIPC.testIpcWithReaderQueuing

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1855a33/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
index f2ede7d..9493740 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
@@ -332,40 +332,6 @@ public class TestCompositeService {
  1, testService.getServices().size());
   }
 
-  @Test(timeout = 1000)
-  public void testAddInitedSiblingInInit() throws Throwable {
-CompositeService parent = new CompositeService("parent");
-BreakableService sibling = new BreakableService();
-sibling.init(new Configuration());
-parent.addService(new AddSiblingService(parent,
-sibling,
-STATE.INITED));
-parent.init(new Configuration());
-parent.start();
-parent.stop();
-assertEquals("Incorrect number of services",
- 2, parent.getServices().size());
-  }
-
-  @Test(timeout = 1000)
-  public void testAddUninitedSiblingInInit() throws Throwable {
-CompositeService parent = new CompositeService("parent");
-BreakableService sibling = new BreakableService();
-parent.addService(new AddSiblingService(parent,
-sibling,
-STATE.INITED));
-parent.init(new Configuration());
-try {
-  parent.start();
-  fail("Expected an exception, got " + parent);
-} catch (ServiceStateException e) {
-  //expected
-}
-parent.stop();
-assertEquals("Incorrect number of services",
- 2, parent.getServices().size());
-  }
-
   @Test
   public void testRemoveService() {
 CompositeService testService = new CompositeService("TestService") {
@@ -393,6 +359,118 @@ public class TestCompositeService {
 2, testService.getServices().size());
   }
 
+  //
+  // Tests for adding child service to parent
+  //
+
+  @Test(timeout = 1000)
+  public void testAddUninitedChildBeforeInit() throws Throwable {
+CompositeService parent = new CompositeService("parent");
+BreakableService child = new BreakableService();
+AddSiblingService.addChildToService(parent, child);
+parent.init(new Configuration());
+assertInState(STATE.INITED, child);
+parent.start();
+assertInState(STATE.STARTED, child);
+parent.stop();
+assertInState(STATE.STOPPED, child);
+  }
+
+  @Test(timeout = 1000)
+  public void testAddUninitedChildInInit() throws Throwable {
+CompositeService parent = new CompositeService("parent");
+BreakableService child = n

hadoop git commit: HADOOP-10321. TestCompositeService should cover all enumerations of adding a service to a parent service. (Ray Chiang via kasha)

2016-02-29 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk d93c22ec2 -> 680f3fc02


HADOOP-10321. TestCompositeService should cover all enumerations of adding a 
service to a parent service. (Ray Chiang via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/680f3fc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/680f3fc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/680f3fc0

Branch: refs/heads/trunk
Commit: 680f3fc02d0037d13c84a8b5c1a7e8729c0bcc94
Parents: d93c22e
Author: Karthik Kambatla 
Authored: Mon Feb 29 18:59:59 2016 -0800
Committer: Karthik Kambatla 
Committed: Mon Feb 29 18:59:59 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop/service/TestCompositeService.java| 480 +--
 2 files changed, 445 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/680f3fc0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8655d24..b84131b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -695,6 +695,9 @@ Release 2.9.0 - UNRELEASED
 
 HADOOP-12841. Update s3-related properties in core-default.xml. (Wei-Chiu 
Chuang via lei)
 
+HADOOP-10321. TestCompositeService should cover all enumerations of 
+adding a service to a parent service. (Ray Chiang via kasha)
+
   BUG FIXES
 
 HADOOP-12605. Fix intermittent failure of TestIPC.testIpcWithReaderQueuing

http://git-wip-us.apache.org/repos/asf/hadoop/blob/680f3fc0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
index f2ede7d..9493740 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
@@ -332,40 +332,6 @@ public class TestCompositeService {
  1, testService.getServices().size());
   }
 
-  @Test(timeout = 1000)
-  public void testAddInitedSiblingInInit() throws Throwable {
-CompositeService parent = new CompositeService("parent");
-BreakableService sibling = new BreakableService();
-sibling.init(new Configuration());
-parent.addService(new AddSiblingService(parent,
-sibling,
-STATE.INITED));
-parent.init(new Configuration());
-parent.start();
-parent.stop();
-assertEquals("Incorrect number of services",
- 2, parent.getServices().size());
-  }
-
-  @Test(timeout = 1000)
-  public void testAddUninitedSiblingInInit() throws Throwable {
-CompositeService parent = new CompositeService("parent");
-BreakableService sibling = new BreakableService();
-parent.addService(new AddSiblingService(parent,
-sibling,
-STATE.INITED));
-parent.init(new Configuration());
-try {
-  parent.start();
-  fail("Expected an exception, got " + parent);
-} catch (ServiceStateException e) {
-  //expected
-}
-parent.stop();
-assertEquals("Incorrect number of services",
- 2, parent.getServices().size());
-  }
-
   @Test
   public void testRemoveService() {
 CompositeService testService = new CompositeService("TestService") {
@@ -393,6 +359,118 @@ public class TestCompositeService {
 2, testService.getServices().size());
   }
 
+  //
+  // Tests for adding child service to parent
+  //
+
+  @Test(timeout = 1000)
+  public void testAddUninitedChildBeforeInit() throws Throwable {
+CompositeService parent = new CompositeService("parent");
+BreakableService child = new BreakableService();
+AddSiblingService.addChildToService(parent, child);
+parent.init(new Configuration());
+assertInState(STATE.INITED, child);
+parent.start();
+assertInState(STATE.STARTED, child);
+parent.stop();
+assertInState(STATE.STOPPED, child);
+  }
+
+  @Test(timeout = 1000)
+  public void testAddUninitedChildInInit() throws Throwable {
+CompositeService parent = new CompositeService("parent");
+BreakableService child = new BreakableService();
+parent.init(new Configuration());
+AddSibl

hadoop git commit: YARN-4748. ApplicationHistoryManagerOnTimelineStore should not swallow exceptions on generateApplicationReport. Contributed by Li Lu

2016-02-29 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 07623aa94 -> 475a277e6


YARN-4748. ApplicationHistoryManagerOnTimelineStore should not swallow 
exceptions on generateApplicationReport. Contributed by Li Lu

(cherry picked from commit d93c22ec274b1a0f29609217039b80732886fed7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/475a277e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/475a277e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/475a277e

Branch: refs/heads/branch-2.8
Commit: 475a277e6025d913b73a0a48b0476a02d998fadb
Parents: 07623aa
Author: Jian He 
Authored: Mon Feb 29 18:19:09 2016 -0800
Committer: Jian He 
Committed: Mon Feb 29 18:19:49 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../ApplicationHistoryManagerOnTimelineStore.java   | 9 +
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/475a277e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bc4fb4c..00b2040 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1228,6 +1228,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4709. NMWebServices produces incorrect JSON for containers.
 (Varun Saxena via vvasudev)
 
+YARN-4748. ApplicationHistoryManagerOnTimelineStore should not
+swallow exceptions on generateApplicationReport. (Li Lu via jianhe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/475a277e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 6e6b9fc..fd11aea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -625,6 +625,15 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   }
 } catch (AuthorizationException | ApplicationAttemptNotFoundException e) {
   // AuthorizationException is thrown because the user doesn't have access
+  if (e instanceof AuthorizationException) {
+LOG.warn("Failed to authorize when generating application report for "
++ app.appReport.getApplicationId()
++ ". Use a placeholder for its latest attempt id. ", e);
+  } else { // Attempt not found
+LOG.info("No application attempt found for "
++ app.appReport.getApplicationId()
++ ". Use a placeholder for its latest attempt id. ", e);
+  }
   // It's possible that the app is finished before the first attempt is 
created.
   app.appReport.setDiagnostics(null);
   app.appReport.setCurrentApplicationAttemptId(null);



hadoop git commit: YARN-4748. ApplicationHistoryManagerOnTimelineStore should not swallow exceptions on generateApplicationReport. Contributed by Li Lu

2016-02-29 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3e842de1d -> 589b53763


YARN-4748. ApplicationHistoryManagerOnTimelineStore should not swallow 
exceptions on generateApplicationReport. Contributed by Li Lu

(cherry picked from commit d93c22ec274b1a0f29609217039b80732886fed7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/589b5376
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/589b5376
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/589b5376

Branch: refs/heads/branch-2
Commit: 589b537631cc1a6b5e7f921ac170efe78a5ae60c
Parents: 3e842de
Author: Jian He 
Authored: Mon Feb 29 18:19:09 2016 -0800
Committer: Jian He 
Committed: Mon Feb 29 18:19:34 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../ApplicationHistoryManagerOnTimelineStore.java   | 9 +
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/589b5376/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1710e5e..aee91ab 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1421,6 +1421,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4709. NMWebServices produces incorrect JSON for containers.
 (Varun Saxena via vvasudev)
 
+YARN-4748. ApplicationHistoryManagerOnTimelineStore should not
+swallow exceptions on generateApplicationReport. (Li Lu via jianhe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/589b5376/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 6e6b9fc..fd11aea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -625,6 +625,15 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   }
 } catch (AuthorizationException | ApplicationAttemptNotFoundException e) {
   // AuthorizationException is thrown because the user doesn't have access
+  if (e instanceof AuthorizationException) {
+LOG.warn("Failed to authorize when generating application report for "
++ app.appReport.getApplicationId()
++ ". Use a placeholder for its latest attempt id. ", e);
+  } else { // Attempt not found
+LOG.info("No application attempt found for "
++ app.appReport.getApplicationId()
++ ". Use a placeholder for its latest attempt id. ", e);
+  }
   // It's possible that the app is finished before the first attempt is 
created.
   app.appReport.setDiagnostics(null);
   app.appReport.setCurrentApplicationAttemptId(null);



hadoop git commit: YARN-4748. ApplicationHistoryManagerOnTimelineStore should not swallow exceptions on generateApplicationReport. Contributed by Li Lu

2016-02-29 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk d8f390d01 -> d93c22ec2


YARN-4748. ApplicationHistoryManagerOnTimelineStore should not swallow 
exceptions on generateApplicationReport. Contributed by Li Lu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d93c22ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d93c22ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d93c22ec

Branch: refs/heads/trunk
Commit: d93c22ec274b1a0f29609217039b80732886fed7
Parents: d8f390d
Author: Jian He 
Authored: Mon Feb 29 18:19:09 2016 -0800
Committer: Jian He 
Committed: Mon Feb 29 18:19:09 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../ApplicationHistoryManagerOnTimelineStore.java   | 9 +
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93c22ec/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 88396af..e6e7af3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1477,6 +1477,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4709. NMWebServices produces incorrect JSON for containers.
 (Varun Saxena via vvasudev)
 
+YARN-4748. ApplicationHistoryManagerOnTimelineStore should not
+swallow exceptions on generateApplicationReport. (Li Lu via jianhe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93c22ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 6e6b9fc..fd11aea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -625,6 +625,15 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   }
 } catch (AuthorizationException | ApplicationAttemptNotFoundException e) {
   // AuthorizationException is thrown because the user doesn't have access
+  if (e instanceof AuthorizationException) {
+LOG.warn("Failed to authorize when generating application report for "
++ app.appReport.getApplicationId()
++ ". Use a placeholder for its latest attempt id. ", e);
+  } else { // Attempt not found
+LOG.info("No application attempt found for "
++ app.appReport.getApplicationId()
++ ". Use a placeholder for its latest attempt id. ", e);
+  }
   // It's possible that the app is finished before the first attempt is 
created.
   app.appReport.setDiagnostics(null);
   app.appReport.setCurrentApplicationAttemptId(null);



hadoop git commit: HADOOP-12622. Improve the loggings in RetryPolicies and RetryInvocationHandler. Contributed by Junping Du

2016-02-29 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 69b195d61 -> 07623aa94


HADOOP-12622. Improve the loggings in RetryPolicies and RetryInvocationHandler. 
Contributed by Junping Du

(cherry picked from commit d8f390d015510950ccf78174af8891cd613d4438)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07623aa9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07623aa9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07623aa9

Branch: refs/heads/branch-2.8
Commit: 07623aa941ad48d18d373f5fa629fa22ad8fcb25
Parents: 69b195d
Author: Jian He 
Authored: Mon Feb 29 16:24:05 2016 -0800
Committer: Jian He 
Committed: Mon Feb 29 16:25:00 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop/io/retry/RetryInvocationHandler.java |  22 ++--
 .../apache/hadoop/io/retry/RetryPolicies.java   |  60 ---
 .../apache/hadoop/io/retry/TestRetryProxy.java  | 101 +--
 4 files changed, 153 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07623aa9/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8ef0723..601a9ce 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -440,6 +440,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12825. Log slow name resolutions.
 (Sidharta Seethana via stevel)
 
+HADOOP-12622. Improve the loggings in RetryPolicies and 
RetryInvocationHandler.
+(Junping Du via jianhe)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07623aa9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 5d94c3b..d57dc84 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -121,6 +121,7 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 invocationFailoverCount, isIdempotentOrAtMostOnce);
 RetryAction failAction = getFailAction(actions);
 if (failAction != null) {
+  // fail.
   if (failAction.reason != null) {
 LOG.warn("Exception while invoking " + 
currentProxy.proxy.getClass()
 + "." + method.getName() + " over " + currentProxy.proxyInfo
@@ -136,7 +137,8 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
   worthLogging |= LOG.isDebugEnabled();
   RetryAction failOverAction = getFailOverAction(actions);
   long delay = getDelayMillis(actions);
-  if (failOverAction != null && worthLogging) {
+
+  if (worthLogging) {
 String msg = "Exception while invoking " + method.getName()
 + " of class " + currentProxy.proxy.getClass().getSimpleName()
 + " over " + currentProxy.proxyInfo;
@@ -144,21 +146,21 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 if (invocationFailoverCount > 0) {
   msg += " after " + invocationFailoverCount + " fail over 
attempts"; 
 }
-msg += ". Trying to fail over " + formatSleepMessage(delay);
-LOG.info(msg, ex);
-  } else {
-if(LOG.isDebugEnabled()) {
-  LOG.debug("Exception while invoking " + method.getName()
-  + " of class " + 
currentProxy.proxy.getClass().getSimpleName()
-  + " over " + currentProxy.proxyInfo + ". Retrying "
-  + formatSleepMessage(delay), ex);
+
+if (failOverAction != null) {
+  // failover
+  msg += ". Trying to fail over " + formatSleepMessage(delay);
+} else {
+  // retry
+  msg += ". Retrying " + formatSleepMessage(delay);
 }
+LOG.info(msg, ex);
   }
 
   if (delay > 0) {
 Thread.sleep(delay);
   }
-  
+
   if (failOverAction != null) {
 // Make sure that concurrent failed method invocations only cause a
 // single actual fail over.

http://git-wip-us.ap

hadoop git commit: HADOOP-12622. Improve the loggings in RetryPolicies and RetryInvocationHandler. Contributed by Junping Du

2016-02-29 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 84172b047 -> 3e842de1d


HADOOP-12622. Improve the loggings in RetryPolicies and RetryInvocationHandler. 
Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e842de1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e842de1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e842de1

Branch: refs/heads/branch-2
Commit: 3e842de1d520f9d00c608e2e8d39808238bd157e
Parents: 84172b0
Author: Jian He 
Authored: Mon Feb 29 16:24:05 2016 -0800
Committer: Jian He 
Committed: Mon Feb 29 16:24:30 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop/io/retry/RetryInvocationHandler.java |  22 ++--
 .../apache/hadoop/io/retry/RetryPolicies.java   |  60 ---
 .../apache/hadoop/io/retry/TestRetryProxy.java  | 101 +--
 4 files changed, 153 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e842de1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ac31a0c..eae216b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -514,6 +514,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12825. Log slow name resolutions.
 (Sidharta Seethana via stevel)
 
+HADOOP-12622. Improve the loggings in RetryPolicies and 
RetryInvocationHandler.
+(Junping Du via jianhe)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e842de1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 5d94c3b..d57dc84 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -121,6 +121,7 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 invocationFailoverCount, isIdempotentOrAtMostOnce);
 RetryAction failAction = getFailAction(actions);
 if (failAction != null) {
+  // fail.
   if (failAction.reason != null) {
 LOG.warn("Exception while invoking " + 
currentProxy.proxy.getClass()
 + "." + method.getName() + " over " + currentProxy.proxyInfo
@@ -136,7 +137,8 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
   worthLogging |= LOG.isDebugEnabled();
   RetryAction failOverAction = getFailOverAction(actions);
   long delay = getDelayMillis(actions);
-  if (failOverAction != null && worthLogging) {
+
+  if (worthLogging) {
 String msg = "Exception while invoking " + method.getName()
 + " of class " + currentProxy.proxy.getClass().getSimpleName()
 + " over " + currentProxy.proxyInfo;
@@ -144,21 +146,21 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 if (invocationFailoverCount > 0) {
   msg += " after " + invocationFailoverCount + " fail over 
attempts"; 
 }
-msg += ". Trying to fail over " + formatSleepMessage(delay);
-LOG.info(msg, ex);
-  } else {
-if(LOG.isDebugEnabled()) {
-  LOG.debug("Exception while invoking " + method.getName()
-  + " of class " + 
currentProxy.proxy.getClass().getSimpleName()
-  + " over " + currentProxy.proxyInfo + ". Retrying "
-  + formatSleepMessage(delay), ex);
+
+if (failOverAction != null) {
+  // failover
+  msg += ". Trying to fail over " + formatSleepMessage(delay);
+} else {
+  // retry
+  msg += ". Retrying " + formatSleepMessage(delay);
 }
+LOG.info(msg, ex);
   }
 
   if (delay > 0) {
 Thread.sleep(delay);
   }
-  
+
   if (failOverAction != null) {
 // Make sure that concurrent failed method invocations only cause a
 // single actual fail over.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e842de1/hadoop-common-project/hadoop-commo

hadoop git commit: HADOOP-12622. Improve the loggings in RetryPolicies and RetryInvocationHandler. Contributed by Junping Du

2016-02-29 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9dafaaaf0 -> d8f390d01


HADOOP-12622. Improve the loggings in RetryPolicies and RetryInvocationHandler. 
Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8f390d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8f390d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8f390d0

Branch: refs/heads/trunk
Commit: d8f390d015510950ccf78174af8891cd613d4438
Parents: 9dafaaa
Author: Jian He 
Authored: Mon Feb 29 16:24:05 2016 -0800
Committer: Jian He 
Committed: Mon Feb 29 16:24:05 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop/io/retry/RetryInvocationHandler.java |  22 ++--
 .../apache/hadoop/io/retry/RetryPolicies.java   |  60 ---
 .../apache/hadoop/io/retry/TestRetryProxy.java  | 101 +--
 4 files changed, 153 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8f390d0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f444b71..8655d24 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1165,6 +1165,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12825. Log slow name resolutions.
 (Sidharta Seethana via stevel)
 
+HADOOP-12622. Improve the loggings in RetryPolicies and 
RetryInvocationHandler.
+(Junping Du via jianhe)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8f390d0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 6864d5d..a67c84f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -120,6 +120,7 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 invocationFailoverCount, isIdempotentOrAtMostOnce);
 RetryAction failAction = getFailAction(actions);
 if (failAction != null) {
+  // fail.
   if (failAction.reason != null) {
 LOG.warn("Exception while invoking " + 
currentProxy.proxy.getClass()
 + "." + method.getName() + " over " + currentProxy.proxyInfo
@@ -135,7 +136,8 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
   worthLogging |= LOG.isDebugEnabled();
   RetryAction failOverAction = getFailOverAction(actions);
   long delay = getDelayMillis(actions);
-  if (failOverAction != null && worthLogging) {
+
+  if (worthLogging) {
 String msg = "Exception while invoking " + method.getName()
 + " of class " + currentProxy.proxy.getClass().getSimpleName()
 + " over " + currentProxy.proxyInfo;
@@ -143,21 +145,21 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 if (invocationFailoverCount > 0) {
   msg += " after " + invocationFailoverCount + " fail over 
attempts"; 
 }
-msg += ". Trying to fail over " + formatSleepMessage(delay);
-LOG.info(msg, ex);
-  } else {
-if(LOG.isDebugEnabled()) {
-  LOG.debug("Exception while invoking " + method.getName()
-  + " of class " + 
currentProxy.proxy.getClass().getSimpleName()
-  + " over " + currentProxy.proxyInfo + ". Retrying "
-  + formatSleepMessage(delay), ex);
+
+if (failOverAction != null) {
+  // failover
+  msg += ". Trying to fail over " + formatSleepMessage(delay);
+} else {
+  // retry
+  msg += ". Retrying " + formatSleepMessage(delay);
 }
+LOG.info(msg, ex);
   }
 
   if (delay > 0) {
 Thread.sleep(delay);
   }
-  
+
   if (failOverAction != null) {
 // Make sure that concurrent failed method invocations only cause a
 // single actual fail over.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8f390d0/hadoop-common-project/hadoop-common/sr

hadoop git commit: YARN-4704. TestResourceManager#testResourceAllocation() fails when using FairScheduler. (Yufei Gu via kasha)

2016-02-29 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3cb7ae11a -> 84172b047


YARN-4704. TestResourceManager#testResourceAllocation() fails when using 
FairScheduler. (Yufei Gu via kasha)

(cherry picked from commit 9dafaaaf0de68ce7f5e495ea4b8e0ce036dc35a2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84172b04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84172b04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84172b04

Branch: refs/heads/branch-2
Commit: 84172b047bd91ee6acd94b45a877f3ed51b186ce
Parents: 3cb7ae1
Author: Karthik Kambatla 
Authored: Mon Feb 29 16:09:53 2016 -0800
Committer: Karthik Kambatla 
Committed: Mon Feb 29 16:10:26 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../yarn/server/resourcemanager/TestResourceManager.java  | 7 +++
 2 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84172b04/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fe2fd5e..1710e5e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -181,6 +181,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4731. container-executor should not follow symlinks in
 recursive_unlink_children (Colin Patrick McCabe via jlowe)
 
+YARN-4704. TestResourceManager#testResourceAllocation() fails when using 
+FairScheduler. (Yufei Gu via kasha)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84172b04/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index 9ceeffb..3b59417 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
@@ -57,6 +58,8 @@ public class TestResourceManager {
   @Before
   public void setUp() throws Exception {
 Configuration conf = new YarnConfiguration();
+conf.set(YarnConfiguration.RM_SCHEDULER,
+CapacityScheduler.class.getCanonicalName());
 UserGroupInformation.setConfiguration(conf);
 resourceManager = new ResourceManager();
 resourceManager.init(conf);
@@ -261,6 +264,8 @@ public class TestResourceManager {
 }
   };
   Configuration conf = new YarnConfiguration();
+  conf.set(YarnConfiguration.RM_SCHEDULER,
+CapacityScheduler.class.getCanonicalName());
   conf.set(filterInitializerConfKey, filterInitializer);
   conf.set("hadoop.security.authentication", "kerberos");
   conf.set("hadoop.http.authentication.type", "kerberos");
@@ -295,6 +300,8 @@ public class TestResourceManager {
 for (String filterInitializer : simpleFilterInitializers) {
   resourceManager = new ResourceManager();
   Configuration conf = new YarnConfiguration();
+  conf.set(YarnConfiguration.RM_SCHEDULER,
+CapacityScheduler.class.getCanonicalName());
   conf.set(filterInitializerConfKey, filterInitializer);
   try {
 UserGroupInformation.setConfiguration(conf);



hadoop git commit: YARN-4704. TestResourceManager#testResourceAllocation() fails when using FairScheduler. (Yufei Gu via kasha)

2016-02-29 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 215171683 -> 9dafaaaf0


YARN-4704. TestResourceManager#testResourceAllocation() fails when using 
FairScheduler. (Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dafaaaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dafaaaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dafaaaf

Branch: refs/heads/trunk
Commit: 9dafaaaf0de68ce7f5e495ea4b8e0ce036dc35a2
Parents: 2151716
Author: Karthik Kambatla 
Authored: Mon Feb 29 16:09:53 2016 -0800
Committer: Karthik Kambatla 
Committed: Mon Feb 29 16:10:12 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../yarn/server/resourcemanager/TestResourceManager.java  | 7 +++
 2 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dafaaaf/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 27eff2d..88396af 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -242,6 +242,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4731. container-executor should not follow symlinks in
 recursive_unlink_children (Colin Patrick McCabe via jlowe)
 
+YARN-4704. TestResourceManager#testResourceAllocation() fails when using 
+FairScheduler. (Yufei Gu via kasha)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dafaaaf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index 9ceeffb..3b59417 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
@@ -57,6 +58,8 @@ public class TestResourceManager {
   @Before
   public void setUp() throws Exception {
 Configuration conf = new YarnConfiguration();
+conf.set(YarnConfiguration.RM_SCHEDULER,
+CapacityScheduler.class.getCanonicalName());
 UserGroupInformation.setConfiguration(conf);
 resourceManager = new ResourceManager();
 resourceManager.init(conf);
@@ -261,6 +264,8 @@ public class TestResourceManager {
 }
   };
   Configuration conf = new YarnConfiguration();
+  conf.set(YarnConfiguration.RM_SCHEDULER,
+CapacityScheduler.class.getCanonicalName());
   conf.set(filterInitializerConfKey, filterInitializer);
   conf.set("hadoop.security.authentication", "kerberos");
   conf.set("hadoop.http.authentication.type", "kerberos");
@@ -295,6 +300,8 @@ public class TestResourceManager {
 for (String filterInitializer : simpleFilterInitializers) {
   resourceManager = new ResourceManager();
   Configuration conf = new YarnConfiguration();
+  conf.set(YarnConfiguration.RM_SCHEDULER,
+CapacityScheduler.class.getCanonicalName());
   conf.set(filterInitializerConfKey, filterInitializer);
   try {
 UserGroupInformation.setConfiguration(conf);



[2/2] hadoop git commit: HDFS-7964. Add support for async edit logging. Contributed by Daryn Sharp.

2016-02-29 Thread jing9
HDFS-7964. Add support for async edit logging. Contributed by Daryn Sharp.

(cherry picked from commit 2151716832ad14932dd65b1a4e47e64d8d6cd767)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cb7ae11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cb7ae11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cb7ae11

Branch: refs/heads/branch-2
Commit: 3cb7ae11a839c01b8be629774874c1873f51b747
Parents: c5db4ab
Author: Jing Zhao 
Authored: Mon Feb 29 15:34:43 2016 -0800
Committer: Jing Zhao 
Committed: Mon Feb 29 15:45:23 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../bkjournal/TestBookKeeperAsHASharedDir.java  |  46 ++-
 .../src/test/resources/log4j.properties |   2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../hadoop/hdfs/server/namenode/BackupNode.java |   4 +
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 109 ---
 .../hdfs/server/namenode/FSEditLogAsync.java| 322 +++
 .../hdfs/server/namenode/FSEditLogOp.java   | 215 +++--
 .../hdfs/server/namenode/FSEditLogOpCodes.java  | 108 ---
 .../hadoop/hdfs/server/namenode/FSImage.java|   2 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   1 -
 .../namenode/metrics/NameNodeMetrics.java   |   4 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   3 +
 .../hdfs/server/namenode/TestAuditLogs.java |  15 +-
 .../hdfs/server/namenode/TestEditLog.java   |  59 +++-
 .../server/namenode/TestEditLogAutoroll.java|  26 ++
 .../namenode/TestEditLogJournalFailures.java|  35 +-
 .../hdfs/server/namenode/TestEditLogRace.java   | 144 +
 .../server/namenode/TestFSEditLogLoader.java|  37 ++-
 .../server/namenode/TestNameNodeRecovery.java   |  31 +-
 .../server/namenode/ha/TestEditLogTailer.java   |  39 ++-
 21 files changed, 904 insertions(+), 306 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cb7ae11/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a8d451..5deae96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -70,6 +70,8 @@ Release 2.9.0 - UNRELEASED
 HDFS-9754. Avoid unnecessary getBlockCollection calls in BlockManager.
 (jing9)
 
+HDFS-7964. Add support for async edit logging. (Daryn Sharp)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cb7ae11/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
index 5611bb8..ff8c00d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
@@ -24,6 +24,9 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.AfterClass;
 
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
@@ -56,11 +59,14 @@ import org.apache.commons.logging.LogFactory;
 import java.io.File;
 import java.io.IOException;
 import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collection;
 
 /**
  * Integration test to ensure that the BookKeeper JournalManager
  * works for HDFS Namenode HA
  */
+@RunWith(Parameterized.class)
 public class TestBookKeeperAsHASharedDir {
   static final Log LOG = LogFactory.getLog(TestBookKeeperAsHASharedDir.class);
 
@@ -69,6 +75,27 @@ public class TestBookKeeperAsHASharedDir {
 
   private static final String TEST_FILE_DATA = "HA BookKeeperJournalManager";
 
+  @Parameters
+  public static Collection data() {
+Collection params = new ArrayList();
+params.add(new Object[]{ Boolean.FALSE });
+params.add(new Object[]{ Boolean.TRUE });
+return params;
+  }
+
+  private static boolean useAsyncEditLog;
+  public TestBookKeeperAsHASharedDir(Boolean async) {
+useAsyncEditLog = async;
+  }
+
+  private sta

[1/2] hadoop git commit: HDFS-7964. Add support for async edit logging. Contributed by Daryn Sharp.

2016-02-29 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c5db4ab0b -> 3cb7ae11a


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cb7ae11/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
index 885abc3..cb6a09d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
@@ -27,6 +27,8 @@ import static org.mockito.Mockito.spy;
 import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
+import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -50,13 +52,38 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 import com.google.common.collect.Sets;
 
 /**
  * This tests data recovery mode for the NameNode.
  */
+
+@RunWith(Parameterized.class)
 public class TestNameNodeRecovery {
+  @Parameters
+  public static Collection data() {
+Collection params = new ArrayList();
+params.add(new Object[]{ Boolean.FALSE });
+params.add(new Object[]{ Boolean.TRUE });
+return params;
+  }
+
+  private static boolean useAsyncEditLog;
+  public TestNameNodeRecovery(Boolean async) {
+useAsyncEditLog = async;
+  }
+
+  private static Configuration getConf() {
+Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_ASYNC_LOGGING,
+useAsyncEditLog);
+return conf;
+  }
+
   private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class);
   private static final StartupOption recoverStartOpt = StartupOption.RECOVER;
   private static final File TEST_DIR = 
PathUtils.getTestDir(TestNameNodeRecovery.class);
@@ -73,7 +100,7 @@ public class TestNameNodeRecovery {
 EditLogFileOutputStream elfos = null;
 EditLogFileInputStream elfis = null;
 try {
-  elfos = new EditLogFileOutputStream(new Configuration(), TEST_LOG_NAME, 
0);
+  elfos = new EditLogFileOutputStream(getConf(), TEST_LOG_NAME, 0);
   elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
 
   elts.addTransactionsToLog(elfos, cache);
@@ -519,7 +546,7 @@ public class TestNameNodeRecovery {
 final boolean needRecovery = corruptor.needRecovery(finalize);
 
 // start a cluster
-Configuration conf = new HdfsConfiguration();
+Configuration conf = getConf();
 setupRecoveryTestConf(conf);
 MiniDFSCluster cluster = null;
 FileSystem fileSys = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cb7ae11/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
index d486920..c400a09 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
@@ -22,6 +22,8 @@ import static org.junit.Assert.assertTrue;
 import java.io.File;
 import java.io.IOException;
 import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -32,6 +34,7 @@ import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -40,11 +43,31 @@ import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 import com.google.common.base.Supplier;
 
+@RunWith(Parameterized.class)
 public class Tes

[2/2] hadoop git commit: HDFS-7964. Add support for async edit logging. Contributed by Daryn Sharp.

2016-02-29 Thread jing9
HDFS-7964. Add support for async edit logging. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21517168
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21517168
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21517168

Branch: refs/heads/trunk
Commit: 2151716832ad14932dd65b1a4e47e64d8d6cd767
Parents: 0fa54d4
Author: Jing Zhao 
Authored: Mon Feb 29 15:34:43 2016 -0800
Committer: Jing Zhao 
Committed: Mon Feb 29 15:34:43 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../bkjournal/TestBookKeeperAsHASharedDir.java  |  46 ++-
 .../src/test/resources/log4j.properties |   2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../hadoop/hdfs/server/namenode/BackupNode.java |   4 +
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 109 ---
 .../hdfs/server/namenode/FSEditLogAsync.java| 322 +++
 .../hdfs/server/namenode/FSEditLogOp.java   | 213 ++--
 .../hdfs/server/namenode/FSEditLogOpCodes.java  | 108 ---
 .../hadoop/hdfs/server/namenode/FSImage.java|   2 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   1 -
 .../namenode/metrics/NameNodeMetrics.java   |   4 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   3 +
 .../hdfs/server/namenode/TestAuditLogs.java |  15 +-
 .../hdfs/server/namenode/TestEditLog.java   |  59 +++-
 .../server/namenode/TestEditLogAutoroll.java|  26 ++
 .../namenode/TestEditLogJournalFailures.java|  35 +-
 .../hdfs/server/namenode/TestEditLogRace.java   | 144 +
 .../server/namenode/TestFSEditLogLoader.java|  37 ++-
 .../server/namenode/TestNameNodeRecovery.java   |  31 +-
 .../server/namenode/ha/TestEditLogTailer.java   |  39 ++-
 21 files changed, 904 insertions(+), 304 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21517168/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3d57efa..c3ea5ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1037,6 +1037,8 @@ Release 2.9.0 - UNRELEASED
 HDFS-9754. Avoid unnecessary getBlockCollection calls in BlockManager.
 (jing9)
 
+HDFS-7964. Add support for async edit logging. (Daryn Sharp)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21517168/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
index 5611bb8..ff8c00d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
@@ -24,6 +24,9 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.AfterClass;
 
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
@@ -56,11 +59,14 @@ import org.apache.commons.logging.LogFactory;
 import java.io.File;
 import java.io.IOException;
 import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collection;
 
 /**
  * Integration test to ensure that the BookKeeper JournalManager
  * works for HDFS Namenode HA
  */
+@RunWith(Parameterized.class)
 public class TestBookKeeperAsHASharedDir {
   static final Log LOG = LogFactory.getLog(TestBookKeeperAsHASharedDir.class);
 
@@ -69,6 +75,27 @@ public class TestBookKeeperAsHASharedDir {
 
   private static final String TEST_FILE_DATA = "HA BookKeeperJournalManager";
 
+  @Parameters
+  public static Collection data() {
+Collection params = new ArrayList();
+params.add(new Object[]{ Boolean.FALSE });
+params.add(new Object[]{ Boolean.TRUE });
+return params;
+  }
+
+  private static boolean useAsyncEditLog;
+  public TestBookKeeperAsHASharedDir(Boolean async) {
+useAsyncEditLog = async;
+  }
+
+  private static Configuration getConf() {
+Configuration conf = new Configurat

[1/2] hadoop git commit: HDFS-7964. Add support for async edit logging. Contributed by Daryn Sharp.

2016-02-29 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0fa54d45b -> 215171683


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21517168/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
index 0265a4d..87e2523 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
@@ -27,6 +27,8 @@ import static org.mockito.Mockito.spy;
 import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
+import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -50,13 +52,38 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 import com.google.common.collect.Sets;
 
 /**
  * This tests data recovery mode for the NameNode.
  */
+
+@RunWith(Parameterized.class)
 public class TestNameNodeRecovery {
+  @Parameters
+  public static Collection data() {
+Collection params = new ArrayList();
+params.add(new Object[]{ Boolean.FALSE });
+params.add(new Object[]{ Boolean.TRUE });
+return params;
+  }
+
+  private static boolean useAsyncEditLog;
+  public TestNameNodeRecovery(Boolean async) {
+useAsyncEditLog = async;
+  }
+
+  private static Configuration getConf() {
+Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_ASYNC_LOGGING,
+useAsyncEditLog);
+return conf;
+  }
+
   private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class);
   private static final StartupOption recoverStartOpt = StartupOption.RECOVER;
   private static final File TEST_DIR = 
PathUtils.getTestDir(TestNameNodeRecovery.class);
@@ -73,7 +100,7 @@ public class TestNameNodeRecovery {
 EditLogFileOutputStream elfos = null;
 EditLogFileInputStream elfis = null;
 try {
-  elfos = new EditLogFileOutputStream(new Configuration(), TEST_LOG_NAME, 
0);
+  elfos = new EditLogFileOutputStream(getConf(), TEST_LOG_NAME, 0);
   elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
 
   elts.addTransactionsToLog(elfos, cache);
@@ -525,7 +552,7 @@ public class TestNameNodeRecovery {
 final boolean needRecovery = corruptor.needRecovery(finalize);
 
 // start a cluster
-Configuration conf = new HdfsConfiguration();
+Configuration conf = getConf();
 setupRecoveryTestConf(conf);
 MiniDFSCluster cluster = null;
 FileSystem fileSys = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21517168/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
index 5a104ad..30db429 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
@@ -22,6 +22,8 @@ import static org.junit.Assert.assertTrue;
 import java.io.File;
 import java.io.IOException;
 import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -32,6 +34,7 @@ import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -40,11 +43,31 @@ import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 import com.google.common.base.Supplier;
 
+@RunWith(Parameterized.class)
 public class TestEd

hadoop git commit: HDFS-9853. Ozone: Add container definitions. Contributed by Anu Engineer.

2016-02-29 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 1b7df69d3 -> f3fbae8b8


HDFS-9853. Ozone: Add container definitions. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3fbae8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3fbae8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3fbae8b

Branch: refs/heads/HDFS-7240
Commit: f3fbae8b84d4a9dcd9713d379e46fd2348c745df
Parents: 1b7df69
Author: Anu Engineer 
Authored: Mon Feb 29 14:26:49 2016 -0800
Committer: Anu Engineer 
Committed: Mon Feb 29 14:26:49 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   1 +
 .../org/apache/hadoop/ozone/package-info.java   |  35 +++
 .../main/proto/DatanodeContainerProtocol.proto  | 314 +++
 3 files changed, 350 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3fbae8b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index f1d5af9..4fc1588 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -351,6 +351,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   QJournalProtocol.proto
   editlog.proto
   fsimage.proto
+  DatanodeContainerProtocol.proto
 
   
   
${project.build.directory}/generated-sources/java

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3fbae8b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/package-info.java
new file mode 100644
index 000..db399db
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/package-info.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+/**
+ This package contains class that support ozone implementation on the datanode
+ side.
+
+ Main parts of ozone on datanode are:
+
+ 1. REST Interface - This code lives under the web directory and listens to the
+ WebHDFS port.
+
+ 2. Datanode container classes: This support persistence of ozone objects on
+ datanode. These classes live under container directory.
+
+ 3. Client and Shell: We also support a ozone REST client lib, they are under
+ web/client and web/ozShell.
+
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3fbae8b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto
new file mode 100644
index 000..0fba636
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto
@@ -0,0 +1,314 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions a

[2/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. 
Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69b195d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69b195d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69b195d6

Branch: refs/heads/branch-2.8
Commit: 69b195d619fdc4b00c912e61879e689dd33d89e7
Parents: 7ddff4b
Author: Haohui Mai 
Authored: Mon Feb 29 11:41:00 2016 -0800
Committer: Haohui Mai 
Committed: Mon Feb 29 14:15:25 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |3 +
 .../org/apache/hadoop/ipc/RPCCallBenchmark.java |4 +-
 .../hadoop/ipc/TestMultipleProtocolServer.java  |   14 +-
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  |  137 +--
 .../java/org/apache/hadoop/ipc/TestRPC.java | 1071 --
 .../hadoop/ipc/TestRPCServerShutdown.java   |  106 ++
 .../java/org/apache/hadoop/ipc/TestRpcBase.java |  295 +
 .../hadoop-common/src/test/proto/test.proto |   33 +
 .../src/test/proto/test_rpc_service.proto   |7 +
 .../server/nodemanager/TestNMAuditLogger.java   |   40 +-
 .../resourcemanager/TestRMAuditLogger.java  |   44 +-
 11 files changed, 1030 insertions(+), 724 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b195d6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 03a844f..8ef0723 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1055,6 +1055,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12846. Credential Provider Recursive Dependencies.
 (Larry McCay via cnauroth)
 
+HADOOP-12813. Migrate TestRPC and related codes to rebase on
+ProtobufRpcEngine. (Kai Zheng via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b195d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
index 6400e87..eb7b949 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
@@ -34,8 +34,6 @@ import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.RPC.Server;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
@@ -54,7 +52,7 @@ import com.google.protobuf.BlockingService;
  * Benchmark for protobuf RPC.
  * Run with --help option for usage.
  */
-public class RPCCallBenchmark implements Tool {
+public class RPCCallBenchmark extends TestRpcBase implements Tool {
   private Configuration conf;
   private AtomicLong callCount = new AtomicLong(0);
   private static ThreadMXBean threadBean =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b195d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
index 29a293f..8b419e3 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
@@ -23,8 +23,6 @@ import java.net.InetSocketAddress;
 import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import 
org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.Before;
@@ -32,8 +30,7 @@ import org.junit.After;
 import org.junit.Test;
 import com.google.protobuf.BlockingService;
 
-publ

[1/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7ddff4b37 -> 69b195d61


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b195d6/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto 
b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
index 722af89..abb3883 100644
--- a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
+++ b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
@@ -32,6 +32,13 @@ service TestProtobufRpcProto {
   rpc echo(EchoRequestProto) returns (EchoResponseProto);
   rpc error(EmptyRequestProto) returns (EmptyResponseProto);
   rpc error2(EmptyRequestProto) returns (EmptyResponseProto);
+  rpc slowPing(SlowPingRequestProto) returns (EmptyResponseProto);
+  rpc echo2(EchoRequestProto2) returns (EchoResponseProto2);
+  rpc add(AddRequestProto) returns (AddResponseProto);
+  rpc add2(AddRequestProto2) returns (AddResponseProto);
+  rpc testServerGet(EmptyRequestProto) returns (EmptyResponseProto);
+  rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
+  rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
 }
 
 service TestProtobufRpc2Proto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b195d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
index 39e6dc5..44ed883 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
@@ -24,15 +24,25 @@ import static org.mockito.Mockito.when;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ClientId;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.TestRPC.TestImpl;
+import org.apache.hadoop.ipc.TestRpcBase.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
+import org.apache.hadoop.ipc.TestRpcBase;
+import org.apache.hadoop.ipc.protobuf.TestProtos;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.Keys;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -188,12 +198,19 @@ public class TestNMAuditLogger {
* A special extension of {@link TestImpl} RPC server with 
* {@link TestImpl#ping()} testing the audit logs.
*/
-  private class MyTestRPCServer extends TestImpl {
+  private class MyTestRPCServer extends TestRpcBase.PBServerImpl {
 @Override
-public void ping() {
+public TestProtos.EmptyResponseProto ping(
+RpcController unused, TestProtos.EmptyRequestProto request)
+throws ServiceException {
+  // Ensure clientId is received
+  byte[] clientId = Server.getClientId();
+  Assert.assertNotNull(clientId);
+  Assert.assertEquals(ClientId.BYTE_LENGTH, clientId.length);
   // test with ip set
   testSuccessLogFormat(true);
   testFailureLogFormat(true);
+  return TestProtos.EmptyResponseProto.newBuilder().build();
 }
   }
 
@@ -203,9 +220,17 @@ public class TestNMAuditLogger {
   @Test  
   public void testNMAuditLoggerWithIP() throws Exception {
 Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
+
+// Create server side implementation
+MyTestRPCServer serverImpl = new MyTestRPCServer();
+BlockingService service = TestRpcServiceProtos.TestProtobufRpcProto
+.newReflectiveBlockingService(serverImpl);
+
 // start the IPC server
-Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-.setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
+Server server = new RPC.Build

[1/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bd0f5085e -> c5db4ab0b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5db4ab0/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto 
b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
index 722af89..abb3883 100644
--- a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
+++ b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
@@ -32,6 +32,13 @@ service TestProtobufRpcProto {
   rpc echo(EchoRequestProto) returns (EchoResponseProto);
   rpc error(EmptyRequestProto) returns (EmptyResponseProto);
   rpc error2(EmptyRequestProto) returns (EmptyResponseProto);
+  rpc slowPing(SlowPingRequestProto) returns (EmptyResponseProto);
+  rpc echo2(EchoRequestProto2) returns (EchoResponseProto2);
+  rpc add(AddRequestProto) returns (AddResponseProto);
+  rpc add2(AddRequestProto2) returns (AddResponseProto);
+  rpc testServerGet(EmptyRequestProto) returns (EmptyResponseProto);
+  rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
+  rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
 }
 
 service TestProtobufRpc2Proto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5db4ab0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
index 39e6dc5..44ed883 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
@@ -24,15 +24,25 @@ import static org.mockito.Mockito.when;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ClientId;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.TestRPC.TestImpl;
+import org.apache.hadoop.ipc.TestRpcBase.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
+import org.apache.hadoop.ipc.TestRpcBase;
+import org.apache.hadoop.ipc.protobuf.TestProtos;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.Keys;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -188,12 +198,19 @@ public class TestNMAuditLogger {
* A special extension of {@link TestImpl} RPC server with 
* {@link TestImpl#ping()} testing the audit logs.
*/
-  private class MyTestRPCServer extends TestImpl {
+  private class MyTestRPCServer extends TestRpcBase.PBServerImpl {
 @Override
-public void ping() {
+public TestProtos.EmptyResponseProto ping(
+RpcController unused, TestProtos.EmptyRequestProto request)
+throws ServiceException {
+  // Ensure clientId is received
+  byte[] clientId = Server.getClientId();
+  Assert.assertNotNull(clientId);
+  Assert.assertEquals(ClientId.BYTE_LENGTH, clientId.length);
   // test with ip set
   testSuccessLogFormat(true);
   testFailureLogFormat(true);
+  return TestProtos.EmptyResponseProto.newBuilder().build();
 }
   }
 
@@ -203,9 +220,17 @@ public class TestNMAuditLogger {
   @Test  
   public void testNMAuditLoggerWithIP() throws Exception {
 Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
+
+// Create server side implementation
+MyTestRPCServer serverImpl = new MyTestRPCServer();
+BlockingService service = TestRpcServiceProtos.TestProtobufRpcProto
+.newReflectiveBlockingService(serverImpl);
+
 // start the IPC server
-Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-.setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
+Server server = new RPC.Builder

[2/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. 
Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5db4ab0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5db4ab0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5db4ab0

Branch: refs/heads/branch-2
Commit: c5db4ab0b44e7feeb1afe2d9553665d2af3c9a34
Parents: bd0f508
Author: Haohui Mai 
Authored: Mon Feb 29 11:41:00 2016 -0800
Committer: Haohui Mai 
Committed: Mon Feb 29 14:10:18 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |3 +
 .../org/apache/hadoop/ipc/RPCCallBenchmark.java |4 +-
 .../hadoop/ipc/TestMultipleProtocolServer.java  |   14 +-
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  |  137 +--
 .../java/org/apache/hadoop/ipc/TestRPC.java | 1065 --
 .../hadoop/ipc/TestRPCServerShutdown.java   |  106 ++
 .../java/org/apache/hadoop/ipc/TestRpcBase.java |  295 +
 .../hadoop-common/src/test/proto/test.proto |   33 +
 .../src/test/proto/test_rpc_service.proto   |7 +
 .../server/nodemanager/TestNMAuditLogger.java   |   40 +-
 .../resourcemanager/TestRMAuditLogger.java  |   44 +-
 11 files changed, 1021 insertions(+), 727 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5db4ab0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 773edd0..ac31a0c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1127,6 +1127,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12846. Credential Provider Recursive Dependencies.
 (Larry McCay via cnauroth)
 
+HADOOP-12813. Migrate TestRPC and related codes to rebase on
+ProtobufRpcEngine. (Kai Zheng via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5db4ab0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
index 6400e87..eb7b949 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
@@ -34,8 +34,6 @@ import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.RPC.Server;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
@@ -54,7 +52,7 @@ import com.google.protobuf.BlockingService;
  * Benchmark for protobuf RPC.
  * Run with --help option for usage.
  */
-public class RPCCallBenchmark implements Tool {
+public class RPCCallBenchmark extends TestRpcBase implements Tool {
   private Configuration conf;
   private AtomicLong callCount = new AtomicLong(0);
   private static ThreadMXBean threadBean =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5db4ab0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
index 29a293f..8b419e3 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
@@ -23,8 +23,6 @@ import java.net.InetSocketAddress;
 import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import 
org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.Before;
@@ -32,8 +30,7 @@ import org.junit.After;
 import org.junit.Test;
 import com.google.protobuf.BlockingService;
 
-public

[1/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1cb2f9345 -> 0fa54d45b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fa54d45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
index 39e6dc5..44ed883 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
@@ -24,15 +24,25 @@ import static org.mockito.Mockito.when;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ClientId;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.TestRPC.TestImpl;
+import org.apache.hadoop.ipc.TestRpcBase.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
+import org.apache.hadoop.ipc.TestRpcBase;
+import org.apache.hadoop.ipc.protobuf.TestProtos;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.Keys;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -188,12 +198,19 @@ public class TestNMAuditLogger {
* A special extension of {@link TestImpl} RPC server with 
* {@link TestImpl#ping()} testing the audit logs.
*/
-  private class MyTestRPCServer extends TestImpl {
+  private class MyTestRPCServer extends TestRpcBase.PBServerImpl {
 @Override
-public void ping() {
+public TestProtos.EmptyResponseProto ping(
+RpcController unused, TestProtos.EmptyRequestProto request)
+throws ServiceException {
+  // Ensure clientId is received
+  byte[] clientId = Server.getClientId();
+  Assert.assertNotNull(clientId);
+  Assert.assertEquals(ClientId.BYTE_LENGTH, clientId.length);
   // test with ip set
   testSuccessLogFormat(true);
   testFailureLogFormat(true);
+  return TestProtos.EmptyResponseProto.newBuilder().build();
 }
   }
 
@@ -203,9 +220,17 @@ public class TestNMAuditLogger {
   @Test  
   public void testNMAuditLoggerWithIP() throws Exception {
 Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
+
+// Create server side implementation
+MyTestRPCServer serverImpl = new MyTestRPCServer();
+BlockingService service = TestRpcServiceProtos.TestProtobufRpcProto
+.newReflectiveBlockingService(serverImpl);
+
 // start the IPC server
-Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-.setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
+Server server = new RPC.Builder(conf)
+.setProtocol(TestRpcBase.TestRpcService.class)
+.setInstance(service).setBindAddress("0.0.0.0")
 .setPort(0).setNumHandlers(5).setVerbose(true).build();
 
 server.start();
@@ -213,11 +238,14 @@ public class TestNMAuditLogger {
 InetSocketAddress addr = NetUtils.getConnectAddress(server);
 
 // Make a client connection and test the audit log
-TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
+TestRpcService proxy = RPC.getProxy(TestRpcService.class,
TestProtocol.versionID, addr, conf);
 // Start the testcase
-proxy.ping();
+TestProtos.EmptyRequestProto pingRequest =
+TestProtos.EmptyRequestProto.newBuilder().build();
+proxy.ping(null, pingRequest);
 
 server.stop();
+RPC.stopProxy(proxy);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fa54d45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/res

[2/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. 
Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fa54d45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fa54d45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fa54d45

Branch: refs/heads/trunk
Commit: 0fa54d45b1cf8a29f089f64d24f35bd221b4803f
Parents: 1cb2f93
Author: Haohui Mai 
Authored: Mon Feb 29 11:41:00 2016 -0800
Committer: Haohui Mai 
Committed: Mon Feb 29 11:41:00 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |3 +
 .../org/apache/hadoop/ipc/RPCCallBenchmark.java |4 +-
 .../hadoop/ipc/TestMultipleProtocolServer.java  |   14 +-
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  |  137 +--
 .../java/org/apache/hadoop/ipc/TestRPC.java | 1013 --
 .../hadoop/ipc/TestRPCServerShutdown.java   |  106 ++
 .../java/org/apache/hadoop/ipc/TestRpcBase.java |  295 +
 .../hadoop-common/src/test/proto/test.proto |   33 +
 .../src/test/proto/test_rpc_service.proto   |7 +
 .../server/nodemanager/TestNMAuditLogger.java   |   40 +-
 .../resourcemanager/TestRMAuditLogger.java  |   44 +-
 11 files changed, 987 insertions(+), 709 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fa54d45/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index eb33464..f444b71 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1763,6 +1763,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12846. Credential Provider Recursive Dependencies.
 (Larry McCay via cnauroth)
 
+HADOOP-12813. Migrate TestRPC and related codes to rebase on
+ProtobufRpcEngine. (Kai Zheng via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fa54d45/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
index 6400e87..eb7b949 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
@@ -34,8 +34,6 @@ import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.RPC.Server;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
@@ -54,7 +52,7 @@ import com.google.protobuf.BlockingService;
  * Benchmark for protobuf RPC.
  * Run with --help option for usage.
  */
-public class RPCCallBenchmark implements Tool {
+public class RPCCallBenchmark extends TestRpcBase implements Tool {
   private Configuration conf;
   private AtomicLong callCount = new AtomicLong(0);
   private static ThreadMXBean threadBean =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fa54d45/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
index 29a293f..8b419e3 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
@@ -23,8 +23,6 @@ import java.net.InetSocketAddress;
 import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import 
org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.Before;
@@ -32,8 +30,7 @@ import org.junit.After;
 import org.junit.Test;
 import com.google.protobuf.BlockingService;
 
-public cla

hadoop git commit: HDFS-9791: libhdfs+_+: ConfigurationLoader throws parse_exception on invalid input. Contributed by Bob Hansen.

2016-02-29 Thread bobhansen
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 8d215aabc -> eca19c154


HDFS-9791: libhdfs+_+: ConfigurationLoader throws parse_exception on invalid 
input.  Contributed by Bob Hansen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eca19c15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eca19c15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eca19c15

Branch: refs/heads/HDFS-8707
Commit: eca19c154c4a42ee14a10271d59b9306a32221b4
Parents: 8d215aa
Author: Bob Hansen 
Authored: Mon Feb 29 13:40:28 2016 -0500
Committer: Bob Hansen 
Committed: Mon Feb 29 13:40:28 2016 -0500

--
 .../lib/common/configuration_loader.cc  | 67 +++-
 1 file changed, 36 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eca19c15/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc
index 1ffb773..9bcf7e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc
@@ -185,45 +185,50 @@ bool ConfigurationLoader::UpdateMapWithString(ConfigMap & 
map,
 
 bool ConfigurationLoader::UpdateMapWithBytes(ConfigMap& map,
  std::vector& raw_bytes) 
{
-  rapidxml::xml_document<> dom;
-  dom.parse(&raw_bytes[0]);
+  try {
+rapidxml::xml_document<> dom;
+dom.parse(&raw_bytes[0]);
 
-  /* File must contain a single  stanza */
-  auto config_node = dom.first_node("configuration", 0, false);
-  if (!config_node) {
-return false;
-  }
+/* File must contain a single  stanza */
+auto config_node = dom.first_node("configuration", 0, false);
+if (!config_node) {
+  return false;
+}
 
-  /* Walk all of the  nodes, ignoring the rest */
-  for (auto property_node = config_node->first_node("property", 0, false);
-   property_node;
-   property_node = property_node->next_sibling("property", 0, false)) {
-auto name_node = property_node->first_node("name", 0, false);
-auto value_node = property_node->first_node("value", 0, false);
-
-if (name_node && value_node) {
-  std::string final_value;
-  auto final_node = property_node->first_node("final", 0, false);
-  if (final_node) {
-final_value = final_node->value();
+/* Walk all of the  nodes, ignoring the rest */
+for (auto property_node = config_node->first_node("property", 0, false);
+ property_node;
+ property_node = property_node->next_sibling("property", 0, false)) {
+  auto name_node = property_node->first_node("name", 0, false);
+  auto value_node = property_node->first_node("value", 0, false);
+
+  if (name_node && value_node) {
+std::string final_value;
+auto final_node = property_node->first_node("final", 0, false);
+if (final_node) {
+  final_value = final_node->value();
+}
+UpdateMapWithValue(map, name_node->value(), value_node->value(), 
final_value);
   }
-  UpdateMapWithValue(map, name_node->value(), value_node->value(), 
final_value);
-}
 
-auto name_attr = property_node->first_attribute("name", 0, false);
-auto value_attr = property_node->first_attribute("value", 0, false);
+  auto name_attr = property_node->first_attribute("name", 0, false);
+  auto value_attr = property_node->first_attribute("value", 0, false);
 
-if (name_attr && value_attr) {
-  std::string final_value;
-  auto final_attr = property_node->first_attribute("final", 0, false);
-  if (final_attr) {
-final_value = final_attr->value();
+  if (name_attr && value_attr) {
+std::string final_value;
+auto final_attr = property_node->first_attribute("final", 0, false);
+if (final_attr) {
+  final_value = final_attr->value();
+}
+UpdateMapWithValue(map, name_attr->value(), value_attr->value(), 
final_value);
   }
-  UpdateMapWithValue(map, name_attr->value(), value_attr->value(), 
final_value);
 }
-  }
 
-  return true;
+return true;
+  } catch (const rapidxml::parse_error &e) {
+// TODO: Capture the result in a Status object
+return false;
+  }
 }
 
 bool ConfigurationLoader::UpdateMapWithValue(ConfigMap& map,



hadoop git commit: HADOOP-12850. pull shell code out of hadoop-dist

2016-02-29 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk c58a6d53c -> 1cb2f9345


HADOOP-12850. pull shell code out of hadoop-dist

Signed-off-by: Steve Loughran 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cb2f934
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cb2f934
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cb2f934

Branch: refs/heads/trunk
Commit: 1cb2f93451aa444fadd1b7ffa7825ba4a6ae74e3
Parents: c58a6d5
Author: Allen Wittenauer 
Authored: Sun Feb 28 16:51:51 2016 -0800
Committer: Allen Wittenauer 
Committed: Mon Feb 29 08:43:17 2016 -0800

--
 dev-support/bin/dist-layout-stitching | 140 +++
 dev-support/bin/dist-tar-stitching|  44 +
 hadoop-dist/pom.xml   | 148 -
 3 files changed, 202 insertions(+), 130 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cb2f934/dev-support/bin/dist-layout-stitching
--
diff --git a/dev-support/bin/dist-layout-stitching 
b/dev-support/bin/dist-layout-stitching
new file mode 100755
index 000..78533f9
--- /dev/null
+++ b/dev-support/bin/dist-layout-stitching
@@ -0,0 +1,140 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# project.version
+VERSION=$1
+
+# project.build.directory
+BASEDIR=$2
+
+function run()
+{
+  declare res
+
+  echo "\$ ${*}"
+  "${@}"
+  res=$?
+  if [[ ${res} != 0 ]]; then
+echo
+echo "Failed!"
+echo
+exit "${res}"
+  fi
+}
+
+function findfileindir()
+{
+  declare file="$1"
+  declare dir="${2:-./share}"
+  declare count
+
+  count=$(find "${dir}" -iname "${file}" | wc -l)
+
+  #shellcheck disable=SC2086
+  echo ${count}
+}
+
+function copyifnotexists()
+{
+  declare src="$1"
+  declare dest="$2"
+
+  declare srcname
+  declare destdir
+
+  declare child
+  declare childpath
+
+  if [[ -f "${src}" ]]; then
+srcname=${src##*/}
+if [[ "${srcname}" != *.jar ||
+  $(findfileindir "${srcname}") -eq "0" ]]; then
+  destdir=$(dirname "${dest}")
+  mkdir -p "${destdir}"
+  cp -p "${src}" "${dest}"
+fi
+  else
+for childpath in "${src}"/*; do
+  child="${childpath##*/}"
+  if [[ "${child}" == "doc" ||
+"${child}" == "webapps" ]]; then
+mkdir -p "${dest}/${child}"
+cp -r "${src}/${child}"/* "${dest}/${child}"
+continue;
+  fi
+  copyifnotexists "${src}/${child}" "${dest}/${child}"
+done
+  fi
+}
+
+#Copy all contents as is except the lib.
+#for libs check for existence in share directory, if not exist then only copy.
+function copy()
+{
+  declare src="$1"
+  declare dest="$2"
+
+  declare child
+  declare childpath
+
+  if [[ -d "${src}" ]]; then
+for childpath in "${src}"/*; do
+  child="${childpath##*/}"
+
+  if [[ "${child}" == "share" ]]; then
+copyifnotexists "${src}/${child}" "${dest}/${child}"
+  else
+if [[ -d "${src}/${child}" ]]; then
+  mkdir -p "${dest}/${child}"
+  cp -pr "${src}/${child}"/* "${dest}/${child}"
+else
+  cp -pr "${src}/${child}" "${dest}/${child}"
+fi
+  fi
+done
+  fi
+}
+
+# shellcheck disable=SC2164
+ROOT=$(cd "${BASEDIR}"/../..;pwd)
+echo
+echo "Current directory $(pwd)"
+echo
+run rm -rf "hadoop-${VERSION}"
+run mkdir "hadoop-${VERSION}"
+run cd "hadoop-${VERSION}"
+run cp -p "${ROOT}/LICENSE.txt" .
+run cp -p "${ROOT}/NOTICE.txt" .
+run cp -p "${ROOT}/README.txt" .
+
+# Copy hadoop-common first so that it have always have all dependencies.
+# Remaining projects will copy only libraries which are not present already in 
'share' directory.
+run copy 
"${ROOT}/hadoop-common-project/hadoop-common/target/hadoop-common-${VERSION}" .
+run copy 
"${ROOT}/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${VERSION}" .
+run copy 
"${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}" .
+run copy 
"${ROOT}/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${VE

hadoop git commit: YARN-4731. container-executor should not follow symlinks in recursive_unlink_children. Contributed by Colin Patrick McCabe

2016-02-29 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8bc023b3b -> c58a6d53c


YARN-4731. container-executor should not follow symlinks in 
recursive_unlink_children. Contributed by Colin Patrick McCabe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c58a6d53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c58a6d53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c58a6d53

Branch: refs/heads/trunk
Commit: c58a6d53c58209a8f78ff64e04e9112933489fb5
Parents: 8bc023b
Author: Jason Lowe 
Authored: Mon Feb 29 15:24:35 2016 +
Committer: Jason Lowe 
Committed: Mon Feb 29 15:24:35 2016 +

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../impl/container-executor.c   | 54 ++-
 .../test/test-container-executor.c  | 99 +++-
 3 files changed, 153 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58a6d53/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3c91fbd..27eff2d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -239,6 +239,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4566. Fix test failure in TestMiniYarnClusterNodeUtilization.
 (Takashi Ohnishi via rohithsharmaks)
 
+YARN-4731. container-executor should not follow symlinks in
+recursive_unlink_children (Colin Patrick McCabe via jlowe)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58a6d53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 4bc8c78..44de2bb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1579,9 +1579,9 @@ static int rmdir_as_nm(const char* path) {
 static int open_helper(int dirfd, const char *name) {
   int fd;
   if (dirfd >= 0) {
-fd = openat(dirfd, name, O_RDONLY);
+fd = openat(dirfd, name, O_RDONLY | O_NOFOLLOW);
   } else {
-fd = open(name, O_RDONLY);
+fd = open(name, O_RDONLY | O_NOFOLLOW);
   }
   if (fd >= 0) {
 return fd;
@@ -1615,6 +1615,34 @@ static int unlink_helper(int dirfd, const char *name, 
int flags) {
   return errno;
 }
 
+/**
+ * Determine if an entry in a directory is a symlink.
+ *
+ * @param dirfd The directory file descriptor, or -1 if there is none.
+ * @param name  If dirfd is -1, this is the path to examine.
+ *  Otherwise, this is the file name in the directory to
+ *  examine.
+ *
+ * @return  0 if the entry is not a symlink
+ *  1 if the entry is a symlink
+ *  A negative errno code if we couldn't access the entry.
+ */
+static int is_symlink_helper(int dirfd, const char *name)
+{
+  struct stat stat;
+
+  if (dirfd < 0) {
+if (lstat(name, &stat) < 0) {
+  return -errno;
+}
+  } else {
+if (fstatat(dirfd, name, &stat, AT_SYMLINK_NOFOLLOW) < 0) {
+  return -errno;
+}
+  }
+  return !!S_ISLNK(stat.st_mode);
+}
+
 static int recursive_unlink_helper(int dirfd, const char *name,
const char* fullpath)
 {
@@ -1622,6 +1650,28 @@ static int recursive_unlink_helper(int dirfd, const char 
*name,
   DIR *dfd = NULL;
   struct stat stat;
 
+  // Check to see if the file is a symlink.  If so, delete the symlink rather
+  // than what it points to.
+  ret = is_symlink_helper(dirfd, name);
+  if (ret < 0) {
+// is_symlink_helper failed.
+ret = -ret;
+fprintf(LOGFILE, "is_symlink_helper(%s) failed: %s\n",
+fullpath, strerror(ret));
+goto done;
+  } else if (ret == 1) {
+// is_symlink_helper determined that the path is a symlink.
+ret = unlink_helper(dirfd, name, 0);
+if (ret) {
+  fprintf(LOGFILE, "failed to unlink symlink %s: %s\n",
+  fullpath, strerror(ret));
+}
+goto done;
+  }
+
+  // Open the file.  We use O_NOFOLLOW here to ensure that we if a symlink was
+  // swapped in by an attacker, we will fail to follow 

hadoop git commit: YARN-4731. container-executor should not follow symlinks in recursive_unlink_children. Contributed by Colin Patrick McCabe (cherry picked from commit c58a6d53c58209a8f78ff64e04e9112

2016-02-29 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 531a081b0 -> bd0f5085e


YARN-4731. container-executor should not follow symlinks in 
recursive_unlink_children. Contributed by Colin Patrick McCabe
(cherry picked from commit c58a6d53c58209a8f78ff64e04e9112933489fb5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd0f5085
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd0f5085
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd0f5085

Branch: refs/heads/branch-2
Commit: bd0f5085e373cc0ae438524e479749c552be2096
Parents: 531a081
Author: Jason Lowe 
Authored: Mon Feb 29 15:24:35 2016 +
Committer: Jason Lowe 
Committed: Mon Feb 29 15:26:26 2016 +

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../impl/container-executor.c   | 54 ++-
 .../test/test-container-executor.c  | 99 +++-
 3 files changed, 153 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd0f5085/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index edde68a..fe2fd5e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -178,6 +178,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4566. Fix test failure in TestMiniYarnClusterNodeUtilization.
 (Takashi Ohnishi via rohithsharmaks)
 
+YARN-4731. container-executor should not follow symlinks in
+recursive_unlink_children (Colin Patrick McCabe via jlowe)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd0f5085/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index e3ab1fe..9146483 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1579,9 +1579,9 @@ static int rmdir_as_nm(const char* path) {
 static int open_helper(int dirfd, const char *name) {
   int fd;
   if (dirfd >= 0) {
-fd = openat(dirfd, name, O_RDONLY);
+fd = openat(dirfd, name, O_RDONLY | O_NOFOLLOW);
   } else {
-fd = open(name, O_RDONLY);
+fd = open(name, O_RDONLY | O_NOFOLLOW);
   }
   if (fd >= 0) {
 return fd;
@@ -1615,6 +1615,34 @@ static int unlink_helper(int dirfd, const char *name, 
int flags) {
   return errno;
 }
 
+/**
+ * Determine if an entry in a directory is a symlink.
+ *
+ * @param dirfd The directory file descriptor, or -1 if there is none.
+ * @param name  If dirfd is -1, this is the path to examine.
+ *  Otherwise, this is the file name in the directory to
+ *  examine.
+ *
+ * @return  0 if the entry is not a symlink
+ *  1 if the entry is a symlink
+ *  A negative errno code if we couldn't access the entry.
+ */
+static int is_symlink_helper(int dirfd, const char *name)
+{
+  struct stat stat;
+
+  if (dirfd < 0) {
+if (lstat(name, &stat) < 0) {
+  return -errno;
+}
+  } else {
+if (fstatat(dirfd, name, &stat, AT_SYMLINK_NOFOLLOW) < 0) {
+  return -errno;
+}
+  }
+  return !!S_ISLNK(stat.st_mode);
+}
+
 static int recursive_unlink_helper(int dirfd, const char *name,
const char* fullpath)
 {
@@ -1622,6 +1650,28 @@ static int recursive_unlink_helper(int dirfd, const char 
*name,
   DIR *dfd = NULL;
   struct stat stat;
 
+  // Check to see if the file is a symlink.  If so, delete the symlink rather
+  // than what it points to.
+  ret = is_symlink_helper(dirfd, name);
+  if (ret < 0) {
+// is_symlink_helper failed.
+ret = -ret;
+fprintf(LOGFILE, "is_symlink_helper(%s) failed: %s\n",
+fullpath, strerror(ret));
+goto done;
+  } else if (ret == 1) {
+// is_symlink_helper determined that the path is a symlink.
+ret = unlink_helper(dirfd, name, 0);
+if (ret) {
+  fprintf(LOGFILE, "failed to unlink symlink %s: %s\n",
+  fullpath, strerror(ret));
+}
+goto done;
+  }
+
+  // Open the file.  We use O_NOFOLLOW here to ensure that w

hadoop git commit: HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN in webhdfs doc. Contributed by Brahma Reddy Battula.

2016-02-29 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 048181738 -> e14ab939b


HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN 
in webhdfs doc. Contributed by Brahma Reddy Battula.

(cherry picked from commit 056f9013122dc3f0effc62d620261c312dd7f8ed)
(cherry picked from commit 6a9f8d95b7c4f9c66253d975ebbca1d186267d4e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e14ab939
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e14ab939
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e14ab939

Branch: refs/heads/branch-2.7
Commit: e14ab939bfe3fefa0f6db7e02c5181d4c0262eca
Parents: 0481817
Author: Akira Ajisaka 
Authored: Mon Feb 29 18:40:28 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Feb 29 18:41:57 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 8 
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e14ab939/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f094728..2fd8905 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -116,6 +116,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9765. TestBlockScanner#testVolumeIteratorWithCaching fails
 intermittently. (aajisaka)
 
+HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and
+CANCELDELEGATIONTOKEN in webhdfs doc. (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e14ab939/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index d2ce6d4..efab9eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -160,8 +160,8 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 * [`SETOWNER`](#Set_Owner) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setOwner)
 * [`SETPERMISSION`](#Set_Permission) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setPermission)
 * [`SETTIMES`](#Set_Access_or_Modification_Time) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setTimes)
-* [`RENEWDELEGATIONTOKEN`](#Renew_Delegation_Token) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renewDelegationToken)
-* [`CANCELDELEGATIONTOKEN`](#Cancel_Delegation_Token) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).cancelDelegationToken)
+* [`RENEWDELEGATIONTOKEN`](#Renew_Delegation_Token) (see 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).renewDelegationToken)
+* [`CANCELDELEGATIONTOKEN`](#Cancel_Delegation_Token) (see 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).cancelDelegationToken)
 * [`CREATESNAPSHOT`](#Create_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).createSnapshot)
 * [`RENAMESNAPSHOT`](#Rename_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSnapshot)
 * [`SETXATTR`](#Set_XAttr) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setXAttr)
@@ -974,7 +974,7 @@ See also: [`renewer`](#Renewer), 
[FileSystem](../../api/org/apache/hadoop/fs/Fil
 
 {"long": 1320962673997}   //the new expiration time
 
-See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renewDelegationToken
+See also: [`token`](#Token), 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).renewDelegationToken
 
 ### Cancel Delegation Token
 
@@ -987,7 +987,7 @@ See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSys
 HTTP/1.1 200 OK
 Content-Length: 0
 
-See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).cancelDelegationToken
+See also: [`token`](#Token), 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).cancelDelegationToken
 
 Error Responses
 ---



hadoop git commit: HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN in webhdfs doc. Contributed by Brahma Reddy Battula.

2016-02-29 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c7824e8e9 -> 7ddff4b37


HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN 
in webhdfs doc. Contributed by Brahma Reddy Battula.

(cherry picked from commit 056f9013122dc3f0effc62d620261c312dd7f8ed)
(cherry picked from commit 6a9f8d95b7c4f9c66253d975ebbca1d186267d4e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ddff4b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ddff4b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ddff4b3

Branch: refs/heads/branch-2.8
Commit: 7ddff4b37ab136b00196d6ccba3da08ff4dfe23e
Parents: c7824e8
Author: Akira Ajisaka 
Authored: Mon Feb 29 18:40:28 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Feb 29 18:42:48 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 8 
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ddff4b3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5b03649..0789be1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1914,6 +1914,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9765. TestBlockScanner#testVolumeIteratorWithCaching fails
 intermittently. (aajisaka)
 
+HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and
+CANCELDELEGATIONTOKEN in webhdfs doc. (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ddff4b3/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 9df9179..7283e34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -163,8 +163,8 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 * [`SETOWNER`](#Set_Owner) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setOwner)
 * [`SETPERMISSION`](#Set_Permission) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setPermission)
 * [`SETTIMES`](#Set_Access_or_Modification_Time) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setTimes)
-* [`RENEWDELEGATIONTOKEN`](#Renew_Delegation_Token) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renewDelegationToken)
-* [`CANCELDELEGATIONTOKEN`](#Cancel_Delegation_Token) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).cancelDelegationToken)
+* [`RENEWDELEGATIONTOKEN`](#Renew_Delegation_Token) (see 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).renewDelegationToken)
+* [`CANCELDELEGATIONTOKEN`](#Cancel_Delegation_Token) (see 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).cancelDelegationToken)
 * [`CREATESNAPSHOT`](#Create_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).createSnapshot)
 * [`RENAMESNAPSHOT`](#Rename_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSnapshot)
 * [`SETXATTR`](#Set_XAttr) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setXAttr)
@@ -1072,7 +1072,7 @@ See also: [`renewer`](#Renewer), 
[FileSystem](../../api/org/apache/hadoop/fs/Fil
 
 {"long": 1320962673997}   //the new expiration time
 
-See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renewDelegationToken
+See also: [`token`](#Token), 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).renewDelegationToken
 
 ### Cancel Delegation Token
 
@@ -1085,7 +1085,7 @@ See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSys
 HTTP/1.1 200 OK
 Content-Length: 0
 
-See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).cancelDelegationToken
+See also: [`token`](#Token), 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).cancelDelegationToken
 
 Error Responses
 ---



hadoop git commit: HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN in webhdfs doc. Contributed by Brahma Reddy Battula.

2016-02-29 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 32c7791a6 -> 531a081b0


HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN 
in webhdfs doc. Contributed by Brahma Reddy Battula.

(cherry picked from commit 056f9013122dc3f0effc62d620261c312dd7f8ed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/531a081b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/531a081b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/531a081b

Branch: refs/heads/branch-2
Commit: 531a081b01702bb4a78bb88a74376e70ed140355
Parents: 32c7791
Author: Akira Ajisaka 
Authored: Mon Feb 29 18:40:28 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Feb 29 18:42:35 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 8 
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/531a081b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 38eac6c..5a8d451 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2004,6 +2004,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9765. TestBlockScanner#testVolumeIteratorWithCaching fails
 intermittently. (aajisaka)
 
+HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and
+CANCELDELEGATIONTOKEN in webhdfs doc. (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/531a081b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 9df9179..7283e34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -163,8 +163,8 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 * [`SETOWNER`](#Set_Owner) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setOwner)
 * [`SETPERMISSION`](#Set_Permission) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setPermission)
 * [`SETTIMES`](#Set_Access_or_Modification_Time) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setTimes)
-* [`RENEWDELEGATIONTOKEN`](#Renew_Delegation_Token) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renewDelegationToken)
-* [`CANCELDELEGATIONTOKEN`](#Cancel_Delegation_Token) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).cancelDelegationToken)
+* [`RENEWDELEGATIONTOKEN`](#Renew_Delegation_Token) (see 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).renewDelegationToken)
+* [`CANCELDELEGATIONTOKEN`](#Cancel_Delegation_Token) (see 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).cancelDelegationToken)
 * [`CREATESNAPSHOT`](#Create_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).createSnapshot)
 * [`RENAMESNAPSHOT`](#Rename_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSnapshot)
 * [`SETXATTR`](#Set_XAttr) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setXAttr)
@@ -1072,7 +1072,7 @@ See also: [`renewer`](#Renewer), 
[FileSystem](../../api/org/apache/hadoop/fs/Fil
 
 {"long": 1320962673997}   //the new expiration time
 
-See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renewDelegationToken
+See also: [`token`](#Token), 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).renewDelegationToken
 
 ### Cancel Delegation Token
 
@@ -1085,7 +1085,7 @@ See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSys
 HTTP/1.1 200 OK
 Content-Length: 0
 
-See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).cancelDelegationToken
+See also: [`token`](#Token), 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).cancelDelegationToken
 
 Error Responses
 ---



hadoop git commit: HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN in webhdfs doc. Contributed by Brahma Reddy Battula.

2016-02-29 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 809ebc0b1 -> 8bc023b3b


HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN 
in webhdfs doc. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bc023b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bc023b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bc023b3

Branch: refs/heads/trunk
Commit: 8bc023b3b17754bf422eb9a8e749e8ea01768ac2
Parents: 809ebc0
Author: Akira Ajisaka 
Authored: Mon Feb 29 18:40:28 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Feb 29 18:42:21 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 8 
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bc023b3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 78d2721..3d57efa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2949,6 +2949,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9765. TestBlockScanner#testVolumeIteratorWithCaching fails
 intermittently. (aajisaka)
 
+HDFS-9864. Correct reference for RENEWDELEGATIONTOKEN and
+CANCELDELEGATIONTOKEN in webhdfs doc. (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bc023b3/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 2d3d361..79e79b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -163,8 +163,8 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 * [`SETOWNER`](#Set_Owner) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setOwner)
 * [`SETPERMISSION`](#Set_Permission) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setPermission)
 * [`SETTIMES`](#Set_Access_or_Modification_Time) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setTimes)
-* [`RENEWDELEGATIONTOKEN`](#Renew_Delegation_Token) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renewDelegationToken)
-* [`CANCELDELEGATIONTOKEN`](#Cancel_Delegation_Token) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).cancelDelegationToken)
+* [`RENEWDELEGATIONTOKEN`](#Renew_Delegation_Token) (see 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).renewDelegationToken)
+* [`CANCELDELEGATIONTOKEN`](#Cancel_Delegation_Token) (see 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).cancelDelegationToken)
 * [`CREATESNAPSHOT`](#Create_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).createSnapshot)
 * [`RENAMESNAPSHOT`](#Rename_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSnapshot)
 * [`SETXATTR`](#Set_XAttr) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setXAttr)
@@ -1071,7 +1071,7 @@ See also: [`renewer`](#Renewer), 
[FileSystem](../../api/org/apache/hadoop/fs/Fil
 
 {"long": 1320962673997}   //the new expiration time
 
-See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renewDelegationToken
+See also: [`token`](#Token), 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).renewDelegationToken
 
 ### Cancel Delegation Token
 
@@ -1084,7 +1084,7 @@ See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSys
 HTTP/1.1 200 OK
 Content-Length: 0
 
-See also: [`token`](#Token), 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).cancelDelegationToken
+See also: [`token`](#Token), 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).cancelDelegationToken
 
 Error Responses
 ---



hadoop git commit: HADOOP-12552. Fix undeclared/unused dependency to httpclient (iwasakims)

2016-02-29 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 2cb1fd3bd -> c7824e8e9


HADOOP-12552. Fix undeclared/unused dependency to httpclient (iwasakims)

(cherry picked from commit 809ebc0b146135d86433e9c7bfa17e294b7928f2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7824e8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7824e8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7824e8e

Branch: refs/heads/branch-2.8
Commit: c7824e8e9e57ac007807afbf7e9ff5a6be68e254
Parents: 2cb1fd3
Author: Masatake Iwasaki 
Authored: Mon Feb 29 18:20:14 2016 +0900
Committer: Masatake Iwasaki 
Committed: Mon Feb 29 18:26:09 2016 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-common-project/hadoop-common/pom.xml | 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml   | 4 ++--
 3 files changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7824e8e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1fe6259..03a844f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -9,6 +9,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12416. Trash messages should be handled by Logger instead of being
 delivered on System.out. (Mingliang Liu via aajisaka)
 
+HADOOP-12552. Fix undeclared/unused dependency to httpclient (iwasakims)
+
   NEW FEATURES
 
 HADOOP-11226. Add a configuration to set ipc.Client's traffic class with

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7824e8e/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index ccbe0d6..28fcdaf 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -64,8 +64,8 @@
   compile
 
 
-  commons-httpclient
-  commons-httpclient
+  org.apache.httpcomponents
+  httpclient
   compile
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7824e8e/hadoop-tools/hadoop-openstack/pom.xml
--
diff --git a/hadoop-tools/hadoop-openstack/pom.xml 
b/hadoop-tools/hadoop-openstack/pom.xml
index 3e6ed6f..18d754f 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -113,8 +113,8 @@
   compile
 
 
-  org.apache.httpcomponents
-  httpclient
+  commons-httpclient
+  commons-httpclient
   compile
 
 



hadoop git commit: HADOOP-12552. Fix undeclared/unused dependency to httpclient (iwasakims)

2016-02-29 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2a1bb6cb6 -> 32c7791a6


HADOOP-12552. Fix undeclared/unused dependency to httpclient (iwasakims)

(cherry picked from commit 809ebc0b146135d86433e9c7bfa17e294b7928f2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32c7791a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32c7791a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32c7791a

Branch: refs/heads/branch-2
Commit: 32c7791a6c9e74af0a6ea80c8429a3eddc96a94c
Parents: 2a1bb6c
Author: Masatake Iwasaki 
Authored: Mon Feb 29 18:20:14 2016 +0900
Committer: Masatake Iwasaki 
Committed: Mon Feb 29 18:23:33 2016 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-common-project/hadoop-common/pom.xml | 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml   | 4 ++--
 3 files changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32c7791a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 17dc1f0..773edd0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -80,6 +80,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12416. Trash messages should be handled by Logger instead of being
 delivered on System.out. (Mingliang Liu via aajisaka)
 
+HADOOP-12552. Fix undeclared/unused dependency to httpclient (iwasakims)
+
   NEW FEATURES
 
 HADOOP-11226. Add a configuration to set ipc.Client's traffic class with

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32c7791a/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 3cfcfa2..947cff3 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -64,8 +64,8 @@
   compile
 
 
-  commons-httpclient
-  commons-httpclient
+  org.apache.httpcomponents
+  httpclient
   compile
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32c7791a/hadoop-tools/hadoop-openstack/pom.xml
--
diff --git a/hadoop-tools/hadoop-openstack/pom.xml 
b/hadoop-tools/hadoop-openstack/pom.xml
index cd88d13..ea2e75d 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -113,8 +113,8 @@
   compile
 
 
-  org.apache.httpcomponents
-  httpclient
+  commons-httpclient
+  commons-httpclient
   compile
 
 



hadoop git commit: HADOOP-12552. Fix undeclared/unused dependency to httpclient (iwasakims)

2016-02-29 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/trunk e3ac231f7 -> 809ebc0b1


HADOOP-12552. Fix undeclared/unused dependency to httpclient (iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/809ebc0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/809ebc0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/809ebc0b

Branch: refs/heads/trunk
Commit: 809ebc0b146135d86433e9c7bfa17e294b7928f2
Parents: e3ac231
Author: Masatake Iwasaki 
Authored: Mon Feb 29 18:20:14 2016 +0900
Committer: Masatake Iwasaki 
Committed: Mon Feb 29 18:20:14 2016 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-common-project/hadoop-common/pom.xml | 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml   | 4 ++--
 3 files changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/809ebc0b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 64ec531..eb33464 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -733,6 +733,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12416. Trash messages should be handled by Logger instead of being
 delivered on System.out. (Mingliang Liu via aajisaka)
 
+HADOOP-12552. Fix undeclared/unused dependency to httpclient (iwasakims)
+
   NEW FEATURES
 
 HADOOP-11226. Add a configuration to set ipc.Client's traffic class with

http://git-wip-us.apache.org/repos/asf/hadoop/blob/809ebc0b/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 7e4d090..503f312 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -66,8 +66,8 @@
   compile
 
 
-  commons-httpclient
-  commons-httpclient
+  org.apache.httpcomponents
+  httpclient
   compile
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/809ebc0b/hadoop-tools/hadoop-openstack/pom.xml
--
diff --git a/hadoop-tools/hadoop-openstack/pom.xml 
b/hadoop-tools/hadoop-openstack/pom.xml
index 1b541e2..542a523 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -113,8 +113,8 @@
   compile
 
 
-  org.apache.httpcomponents
-  httpclient
+  commons-httpclient
+  commons-httpclient
   compile