hadoop git commit: HDFS-9438. TestPipelinesFailover assumes Linux ifconfig. (John Zhuge via Yongjun Zhang)

2015-11-25 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 177975e96 -> 8176ea7dc


HDFS-9438. TestPipelinesFailover assumes Linux ifconfig. (John Zhuge via 
Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8176ea7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8176ea7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8176ea7d

Branch: refs/heads/trunk
Commit: 8176ea7dc694841a993f2bfc30669fe22f9ec1d2
Parents: 177975e
Author: Yongjun Zhang 
Authored: Wed Nov 25 07:40:16 2015 -0800
Committer: Yongjun Zhang 
Committed: Wed Nov 25 07:40:16 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../namenode/ha/TestPipelinesFailover.java  | 45 ++--
 2 files changed, 26 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8176ea7d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index db49e54..ce0e74f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1684,6 +1684,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8807.  dfs.datanode.data.dir does not handle spaces between
 storageType and URI correctly.  (Anu Engineer via szetszwo)
 
+HDFS-9438. TestPipelinesFailover assumes Linux ifconfig.
+(John Zhuge via Yongjun Zhang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8176ea7d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
index 3da37f5..f1858a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.log4j.Level;
 import org.junit.Test;
@@ -429,28 +430,28 @@ public class TestPipelinesFailover {
 // The following section of code is to help debug HDFS-6694 about
 // this test that fails from time to time due to "too many open files".
 //
-String[] scmd = new String[] {"/bin/sh", "-c", "ulimit -a"};
-ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
-sce.execute();
-
-System.out.println("HDFS-6694 Debug Data BEGIN===");
-System.out.println("'ulimit -a' output:\n" + sce.getOutput());
-
-scmd = new String[] {"hostname"};
-sce = new ShellCommandExecutor(scmd);
-sce.execute();
-System.out.println("'hostname' output:\n" + sce.getOutput());
-
-scmd = new String[] {"ifconfig"};
-sce = new ShellCommandExecutor(scmd);
-sce.execute();
-System.out.println("'ifconfig' output:\n" + sce.getOutput());
-
-scmd = new String[] {"whoami"};
-sce = new ShellCommandExecutor(scmd);
-sce.execute();
-System.out.println("'whoami' output:\n" + sce.getOutput());
-System.out.println("===HDFS-6694 Debug Data END");
+
+// Only collect debug data on these OSes.
+if (Shell.LINUX || Shell.SOLARIS || Shell.MAC) {
+  System.out.println("HDFS-6694 Debug Data BEGIN===");
+  
+  String[] scmd = new String[] {"/bin/sh", "-c", "ulimit -a"};
+  ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
+  sce.execute();
+  System.out.println("'ulimit -a' output:\n" + sce.getOutput());
+
+  scmd = new String[] {"hostname"};
+  sce = new ShellCommandExecutor(scmd);
+  sce.execute();
+  System.out.println("'hostname' output:\n" + sce.getOutput());
+
+  scmd = new String[] {"ifconfig", "-a"};
+  sce = new ShellCommandExecutor(scmd);
+  sce.execute();
+  System.out.println("'ifconfig' output:\n" + sce.getOutput());
+
+  System.out.println("===HDFS-6694 Debug Data END");
+}
 
 HAStressTestHarness harness = new HAStressTestHarness();
 // Disable permissions so that another 

hadoop git commit: YARN-4380. TestResourceLocalizationService.testDownloadingResourcesOnContainerKill fails intermittently. Contributed by Varun Saxena.

2015-11-25 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5794dc83b -> d76b523b0


YARN-4380. 
TestResourceLocalizationService.testDownloadingResourcesOnContainerKill fails 
intermittently. Contributed by Varun Saxena.

(cherry picked from commit 0656d2dc83af6a48a8d8d0e37cdf1f813124f366)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d76b523b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d76b523b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d76b523b

Branch: refs/heads/branch-2
Commit: d76b523b020fcd76e9abdd661967ca4a931d9863
Parents: 5794dc8
Author: Tsuyoshi Ozawa 
Authored: Thu Nov 26 01:10:02 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Thu Nov 26 01:10:41 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../TestResourceLocalizationService.java  | 18 +-
 2 files changed, 20 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d76b523b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index feef3a3..ac03f82 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1054,6 +1054,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4365. FileSystemNodeLabelStore should check for root dir existence on
 startup (Kuhu Shukla via jlowe)
 
+YARN-4380. 
TestResourceLocalizationService.testDownloadingResourcesOnContainerKill
+fails intermittently. (Varun Saxena via ozawa)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d76b523b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index c14ec7f..64d3d68 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -63,6 +63,7 @@ import java.util.Set;
 import java.util.concurrent.BrokenBarrierException;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.fs.Options;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
@@ -1101,14 +1102,21 @@ public class TestResourceLocalizationService {
 
   private static class DummyExecutor extends DefaultContainerExecutor {
 private volatile boolean stopLocalization = false;
+private AtomicInteger numLocalizers = new AtomicInteger(0);
 @Override
 public void startLocalizer(LocalizerStartContext ctx)
 throws IOException, InterruptedException {
+  numLocalizers.incrementAndGet();
   while (!stopLocalization) {
 Thread.yield();
   }
 }
-void setStopLocalization() {
+private void waitForLocalizers(int num) {
+  while (numLocalizers.intValue() < num) {
+Thread.yield();
+  }
+}
+private void setStopLocalization() {
   stopLocalization = true;
 }
   }
@@ -1251,6 +1259,10 @@ public class TestResourceLocalizationService {
   spyService.handle(new ContainerLocalizationRequestEvent(c2, rsrcs1));
 
   dispatcher.await();
+  // Wait for localizers of both container c1 and c2 to begin.
+  exec.waitForLocalizers(2);
+  LocalizerRunner locC1 =
+  spyService.getLocalizerRunner(c1.getContainerId().toString());
   final String containerIdStr = c1.getContainerId().toString();
   // Heartbeats from container localizer
   LocalResourceStatus rsrc1success = mock(LocalResourceStatus.class);
@@ -1318,6 +1330,10 @@ public class TestResourceLocalizationService {
   Set paths =
   Sets.newHashSet(new Path(locPath1), new Path(locPath1 + "_tmp"),
   new Path(locPath2), new Path(locPath2 + "_tmp"));
+  // Wait 

hadoop git commit: HDFS-6694. Addendum. Update CHANGES.txt for cherry-picking to 2.8.

2015-11-25 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk bab03f3ee -> 84d01ad7f


HDFS-6694. Addendum. Update CHANGES.txt for cherry-picking to 2.8.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84d01ad7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84d01ad7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84d01ad7

Branch: refs/heads/trunk
Commit: 84d01ad7f43bc498bc2e9d3afe68aed7f4a4d462
Parents: bab03f3
Author: Yongjun Zhang 
Authored: Wed Nov 25 08:27:26 2015 -0800
Committer: Yongjun Zhang 
Committed: Wed Nov 25 08:41:39 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d01ad7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ce0e74f..8100c4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -319,10 +319,6 @@ Trunk (Unreleased)
 HDFS-5794. Fix the inconsistency of layout version number of 
 ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
 
-HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail
-intermittently with various symptoms - debugging patch. (Yongjun Zhang via
-Arpit Agarwal)
-
 HDFS-6893. crypto subcommand is not sorted properly in hdfs's hadoop_usage
 (David Luo via aw)
 
@@ -2388,6 +2384,10 @@ Release 2.8.0 - UNRELEASED
 HDFS-8335. FSNamesystem should construct FSPermissionChecker only if
 permission is enabled. (Gabor Liptak via wheat9)
 
+HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail
+intermittently with various symptoms - debugging patch. (Yongjun Zhang via
+Arpit Agarwal)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: Tests in mapreduce-client-app are writing outside of target. Contributed by Akira AJISAKA.

2015-11-25 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/trunk 84d01ad7f -> 15d577bfb


Tests in mapreduce-client-app are writing outside of target. Contributed by 
Akira AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15d577bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15d577bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15d577bf

Branch: refs/heads/trunk
Commit: 15d577bfbb3f18fc95251d22378b53aa4210115f
Parents: 84d01ad
Author: Junping Du 
Authored: Wed Nov 25 09:15:26 2015 -0800
Committer: Junping Du 
Committed: Wed Nov 25 09:15:26 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../mapreduce/v2/app/TestMRAppMaster.java   | 24 +---
 2 files changed, 19 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15d577bf/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 26a0776..c6e80e7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -647,6 +647,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6553. Replace '\u2b05' with '<-' in rendering job configuration.
 (Gabor Liptak via aajisaka)
 
+MAPREDUCE-6557. Tests in mapreduce-client-app are writing outside of
+target. (Akira AJISAKA via junping_du)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15d577bf/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
index 86fa33e..5116491 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
@@ -87,6 +87,7 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
+import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -95,11 +96,13 @@ import org.mockito.Mockito;
 
 public class TestMRAppMaster {
   private static final Log LOG = LogFactory.getLog(TestMRAppMaster.class);
-  static String stagingDir = "staging/";
+  private static final Path TEST_ROOT_DIR =
+  new Path(System.getProperty("test.build.data", "target/test-dir"));
+  private static final Path testDir = new Path(TEST_ROOT_DIR,
+  TestMRAppMaster.class.getName() + "-tmpDir");
+  static String stagingDir = new Path(testDir, "staging").toString();
   private static FileContext localFS = null;
-  private static final File testDir = new File("target",
-TestMRAppMaster.class.getName() + "-tmpDir").getAbsoluteFile();
-  
+
   @BeforeClass
   public static void setup() throws AccessControlException,
   FileNotFoundException, IllegalArgumentException, IOException {
@@ -108,12 +111,12 @@ public class TestMRAppMaster {
 File dir = new File(stagingDir);
 stagingDir = dir.getAbsolutePath();
 localFS = FileContext.getLocalFSFileContext();
-localFS.delete(new Path(testDir.getAbsolutePath()), true);
-testDir.mkdir();
+localFS.delete(testDir, true);
+new File(testDir.toString()).mkdir();
   }
   
   @Before
-  public void cleanup() throws IOException {
+  public void prepare() throws IOException {
 File dir = new File(stagingDir);
 if(dir.exists()) {
   FileUtils.deleteDirectory(dir);
@@ -121,6 +124,11 @@ public class TestMRAppMaster {
 dir.mkdirs();
   }
 
+  @AfterClass
+  public static void cleanup() throws IOException {
+localFS.delete(testDir, true);
+  }
+
   @Test
   public void testMRAppMasterForDifferentUser() throws IOException,
   InterruptedException {
@@ -427,7 +435,7 @@ public class TestMRAppMaster {
 
 JobConf conf = new JobConf();
 
-Path tokenFilePath = new Path(testDir.getAbsolutePath(), "tokens-file");
+Path tokenFilePath = new Path(testDir, "tokens-file");
 Map newEnv = new HashMap();
 

hadoop git commit: HADOOP-12598. Add XML namespace declarations for some hadoop/tools modules. Contributed by Xin Wang.

2015-11-25 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0656d2dc8 -> bab03f3ee


HADOOP-12598. Add XML namespace declarations for some hadoop/tools modules. 
Contributed by Xin Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bab03f3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bab03f3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bab03f3e

Branch: refs/heads/trunk
Commit: bab03f3ee69f887c23776da5f882bbabad596b4f
Parents: 0656d2d
Author: Akira Ajisaka 
Authored: Thu Nov 26 01:20:35 2015 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 26 01:21:20 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-tools/hadoop-datajoin/pom.xml| 5 -
 hadoop-tools/hadoop-gridmix/pom.xml | 5 -
 hadoop-tools/hadoop-pipes/pom.xml   | 5 -
 4 files changed, 15 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bab03f3e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 70295be..d586057 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1362,6 +1362,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11954. Solaris does not support RLIMIT_MEMLOCK as in Linux
 (Alan Burlison via aw)
 
+HADOOP-12598. Add XML namespace declarations for some hadoop/tools modules.
+(Xin Wang via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bab03f3e/hadoop-tools/hadoop-datajoin/pom.xml
--
diff --git a/hadoop-tools/hadoop-datajoin/pom.xml 
b/hadoop-tools/hadoop-datajoin/pom.xml
index ff511f1..9469521 100644
--- a/hadoop-tools/hadoop-datajoin/pom.xml
+++ b/hadoop-tools/hadoop-datajoin/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-
+http://maven.apache.org/POM/4.0.0;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+  http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   4.0.0
   
 org.apache.hadoop

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bab03f3e/hadoop-tools/hadoop-gridmix/pom.xml
--
diff --git a/hadoop-tools/hadoop-gridmix/pom.xml 
b/hadoop-tools/hadoop-gridmix/pom.xml
index baa72ca..db84f8f 100644
--- a/hadoop-tools/hadoop-gridmix/pom.xml
+++ b/hadoop-tools/hadoop-gridmix/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-
+http://maven.apache.org/POM/4.0.0;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+  http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   4.0.0
   
 org.apache.hadoop

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bab03f3e/hadoop-tools/hadoop-pipes/pom.xml
--
diff --git a/hadoop-tools/hadoop-pipes/pom.xml 
b/hadoop-tools/hadoop-pipes/pom.xml
index cf3a1d3..e463cb5 100644
--- a/hadoop-tools/hadoop-pipes/pom.xml
+++ b/hadoop-tools/hadoop-pipes/pom.xml
@@ -12,7 +12,10 @@
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-
+http://maven.apache.org/POM/4.0.0;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+  http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   4.0.0
   
 org.apache.hadoop



hadoop git commit: YARN-4380. TestResourceLocalizationService.testDownloadingResourcesOnContainerKill fails intermittently. Contributed by Varun Saxena.

2015-11-25 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3e85542a7 -> 0656d2dc8


YARN-4380. 
TestResourceLocalizationService.testDownloadingResourcesOnContainerKill fails 
intermittently. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0656d2dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0656d2dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0656d2dc

Branch: refs/heads/trunk
Commit: 0656d2dc83af6a48a8d8d0e37cdf1f813124f366
Parents: 3e85542
Author: Tsuyoshi Ozawa 
Authored: Thu Nov 26 01:10:02 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Thu Nov 26 01:10:02 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../TestResourceLocalizationService.java  | 18 +-
 2 files changed, 20 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0656d2dc/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e036335..d0b31dd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1106,6 +1106,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4365. FileSystemNodeLabelStore should check for root dir existence on
 startup (Kuhu Shukla via jlowe)
 
+YARN-4380. 
TestResourceLocalizationService.testDownloadingResourcesOnContainerKill
+fails intermittently. (Varun Saxena via ozawa)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0656d2dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index c14ec7f..64d3d68 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -63,6 +63,7 @@ import java.util.Set;
 import java.util.concurrent.BrokenBarrierException;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.fs.Options;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
@@ -1101,14 +1102,21 @@ public class TestResourceLocalizationService {
 
   private static class DummyExecutor extends DefaultContainerExecutor {
 private volatile boolean stopLocalization = false;
+private AtomicInteger numLocalizers = new AtomicInteger(0);
 @Override
 public void startLocalizer(LocalizerStartContext ctx)
 throws IOException, InterruptedException {
+  numLocalizers.incrementAndGet();
   while (!stopLocalization) {
 Thread.yield();
   }
 }
-void setStopLocalization() {
+private void waitForLocalizers(int num) {
+  while (numLocalizers.intValue() < num) {
+Thread.yield();
+  }
+}
+private void setStopLocalization() {
   stopLocalization = true;
 }
   }
@@ -1251,6 +1259,10 @@ public class TestResourceLocalizationService {
   spyService.handle(new ContainerLocalizationRequestEvent(c2, rsrcs1));
 
   dispatcher.await();
+  // Wait for localizers of both container c1 and c2 to begin.
+  exec.waitForLocalizers(2);
+  LocalizerRunner locC1 =
+  spyService.getLocalizerRunner(c1.getContainerId().toString());
   final String containerIdStr = c1.getContainerId().toString();
   // Heartbeats from container localizer
   LocalResourceStatus rsrc1success = mock(LocalResourceStatus.class);
@@ -1318,6 +1330,10 @@ public class TestResourceLocalizationService {
   Set paths =
   Sets.newHashSet(new Path(locPath1), new Path(locPath1 + "_tmp"),
   new Path(locPath2), new Path(locPath2 + "_tmp"));
+  // Wait for localizer runner thread for container c1 to finish.
+  while 

hadoop git commit: YARN-4380. TestResourceLocalizationService.testDownloadingResourcesOnContainerKill fails intermittently. Contributed by Varun Saxena.

2015-11-25 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 b68f527b9 -> f50f889c1


YARN-4380. 
TestResourceLocalizationService.testDownloadingResourcesOnContainerKill fails 
intermittently. Contributed by Varun Saxena.

(cherry picked from commit 0656d2dc83af6a48a8d8d0e37cdf1f813124f366)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f50f889c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f50f889c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f50f889c

Branch: refs/heads/branch-2.7
Commit: f50f889c1789b08765eeb9b86a92891dcd54c6ff
Parents: b68f527
Author: Tsuyoshi Ozawa 
Authored: Thu Nov 26 01:10:02 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Thu Nov 26 01:22:11 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../TestResourceLocalizationService.java  | 18 +-
 2 files changed, 20 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f50f889c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a2e9798..84037d2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -34,6 +34,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4365. FileSystemNodeLabelStore should check for root dir existence on
 startup (Kuhu Shukla via jlowe)
 
+YARN-4380. 
TestResourceLocalizationService.testDownloadingResourcesOnContainerKill
+fails intermittently. (Varun Saxena via ozawa)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f50f889c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index cfed5cd..ca0c29f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -63,6 +63,7 @@ import java.util.Set;
 import java.util.concurrent.BrokenBarrierException;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.fs.Options;
 import org.junit.Assert;
@@ -1091,16 +1092,23 @@ public class TestResourceLocalizationService {
 
   private static class DummyExecutor extends DefaultContainerExecutor {
 private volatile boolean stopLocalization = false;
+private AtomicInteger numLocalizers = new AtomicInteger(0);
 @Override
 public void startLocalizer(Path nmPrivateContainerTokensPath,
 InetSocketAddress nmAddr, String user, String appId, String locId,
 LocalDirsHandlerService dirsHandler) throws IOException,
 InterruptedException {
+  numLocalizers.incrementAndGet();
   while (!stopLocalization) {
 Thread.yield();
   }
 }
-void setStopLocalization() {
+private void waitForLocalizers(int num) {
+  while (numLocalizers.intValue() < num) {
+Thread.yield();
+  }
+}
+private void setStopLocalization() {
   stopLocalization = true;
 }
   }
@@ -1243,6 +1251,10 @@ public class TestResourceLocalizationService {
   spyService.handle(new ContainerLocalizationRequestEvent(c2, rsrcs1));
 
   dispatcher.await();
+  // Wait for localizers of both container c1 and c2 to begin.
+  exec.waitForLocalizers(2);
+  LocalizerRunner locC1 =
+  spyService.getLocalizerRunner(c1.getContainerId().toString());
   final String containerIdStr = c1.getContainerId().toString();
   // Heartbeats from container localizer
   

[1/3] hadoop git commit: HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail intermittently with various symptoms - debugging patch (Contributed by Yongjun Zhang)

2015-11-25 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 425799125 -> 48b294c58


HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail 
intermittently with various symptoms - debugging patch (Contributed by Yongjun 
Zhang)

(cherry picked from commit c5d9a4a91e4e0faae3a8530408da35b591396060)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/566ceac1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/566ceac1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/566ceac1

Branch: refs/heads/branch-2
Commit: 566ceac1ff07ac1ebb4286853416c1426b17fc04
Parents: 4257991
Author: arp 
Authored: Wed Aug 27 09:52:33 2014 -0700
Committer: Yongjun Zhang 
Committed: Wed Nov 25 08:45:59 2015 -0800

--
 .../namenode/ha/TestPipelinesFailover.java  | 28 
 1 file changed, 28 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/566ceac1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
index e988a7e..76a62ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.log4j.Level;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -414,6 +415,33 @@ public class TestPipelinesFailover {
*/
   @Test(timeout=STRESS_RUNTIME*3)
   public void testPipelineRecoveryStress() throws Exception {
+
+// The following section of code is to help debug HDFS-6694 about
+// this test that fails from time to time due to "too many open files".
+//
+String[] scmd = new String[] {"/bin/sh", "-c", "ulimit -a"};
+ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
+sce.execute();
+
+System.out.println("HDFS-6694 Debug Data BEGIN===");
+System.out.println("'ulimit -a' output:\n" + sce.getOutput());
+
+scmd = new String[] {"hostname"};
+sce = new ShellCommandExecutor(scmd);
+sce.execute();
+System.out.println("'hostname' output:\n" + sce.getOutput());
+
+scmd = new String[] {"ifconfig"};
+sce = new ShellCommandExecutor(scmd);
+sce.execute();
+System.out.println("'ifconfig' output:\n" + sce.getOutput());
+
+scmd = new String[] {"whoami"};
+sce = new ShellCommandExecutor(scmd);
+sce.execute();
+System.out.println("'whoami' output:\n" + sce.getOutput());
+System.out.println("===HDFS-6694 Debug Data END");
+
 HAStressTestHarness harness = new HAStressTestHarness();
 // Disable permissions so that another user can recover the lease.
 harness.conf.setBoolean(



[2/3] hadoop git commit: HDFS-6694. Addendum. Update CHANGES.txt for cherry-picking to 2.8.

2015-11-25 Thread yjzhangal
HDFS-6694. Addendum. Update CHANGES.txt for cherry-picking to 2.8.

(cherry picked from commit 84d01ad7f43bc498bc2e9d3afe68aed7f4a4d462)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ac8d8f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ac8d8f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ac8d8f5

Branch: refs/heads/branch-2
Commit: 7ac8d8f510306eaf51a7f6096d85419e031b528b
Parents: 566ceac
Author: Yongjun Zhang 
Authored: Wed Nov 25 08:27:26 2015 -0800
Committer: Yongjun Zhang 
Committed: Wed Nov 25 08:49:26 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ac8d8f5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0732893..4ff439b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1522,6 +1522,10 @@ Release 2.8.0 - UNRELEASED
 HDFS-8335. FSNamesystem should construct FSPermissionChecker only if
 permission is enabled. (Gabor Liptak via wheat9)
 
+HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail
+intermittently with various symptoms - debugging patch. (Yongjun Zhang via
+Arpit Agarwal)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-9117. Config file reader / options classes for libhdfs++. Contributed by Bob Hansen.

2015-11-25 Thread jhc
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 8b7769557 -> 8c6a84fdd


HDFS-9117.  Config file reader / options classes for libhdfs++.  Contributed by 
Bob Hansen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c6a84fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c6a84fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c6a84fd

Branch: refs/heads/HDFS-8707
Commit: 8c6a84fdd55350791268bb4624b1f2b3163bc90b
Parents: 8b77695
Author: James 
Authored: Wed Nov 25 12:09:13 2015 -0500
Committer: James 
Committed: Wed Nov 25 12:09:13 2015 -0500

--
 .../src/main/native/libhdfspp/CMakeLists.txt|   2 +
 .../native/libhdfspp/lib/common/CMakeLists.txt  |   2 +-
 .../libhdfspp/lib/common/configuration.cc   | 231 
 .../native/libhdfspp/lib/common/configuration.h |  94 +
 .../main/native/libhdfspp/tests/CMakeLists.txt  |   4 +
 .../libhdfspp/tests/configuration_test.cc   | 363 +++
 .../native/libhdfspp/tests/configuration_test.h |  73 
 7 files changed, 768 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c6a84fd/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
index 18e4cb0..d1b60be 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
@@ -49,7 +49,9 @@ include_directories(
   lib
   ${PROJECT_BINARY_DIR}/lib/proto
   third_party/asio-1.10.2/include
+  third_party/rapidxml-1.13
   third_party/gmock-1.7.0
+  third_party/tr2
   ${OPENSSL_INCLUDE_DIR}
   ../libhdfs/include
 )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c6a84fd/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt
index b03f00b..8dbcd03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt
@@ -1 +1 @@
-add_library(common base64.cc options.cc status.cc sasl_digest_md5.cc 
hdfs_public_api.cc)
+add_library(common base64.cc status.cc sasl_digest_md5.cc hdfs_public_api.cc 
options.cc configuration.cc)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c6a84fd/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc
new file mode 100644
index 000..2baf84b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc
@@ -0,0 +1,231 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * The following features are not currently implemented
+ * - Deprecated values
+ * - Make filename and config file contents unicode-safe
+ * - Config redirection/environment substitution
+ *
+ * - getInts (comma separated))
+ * - getStrings (comma separated))
+ * - getIntegerRange
+ * - getSocketAddr
+ * - getTimeDuration
+ * - getBytes (e.g. 1M or 1G)
+ * - hex values
+ */
+
+#include "configuration.h"
+
+#include 
+#include 
+#include 

hadoop git commit: MAPREDUCE-6555. TestMRAppMaster fails on trunk. (Junping Du via ozawa)

2015-11-25 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8176ea7dc -> 3e85542a7


MAPREDUCE-6555. TestMRAppMaster fails on trunk. (Junping Du via ozawa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e85542a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e85542a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e85542a

Branch: refs/heads/trunk
Commit: 3e85542a7afb8bbbc19f2aa59da04ec824168c0e
Parents: 8176ea7
Author: Tsuyoshi Ozawa 
Authored: Thu Nov 26 01:02:27 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Thu Nov 26 01:02:27 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt   | 2 ++
 .../java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java   | 2 ++
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e85542a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 1631668..26a0776 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -231,6 +231,8 @@ Trunk (Unreleased)
 
 MAPREDUCE-6540. TestMRTimelineEventHandling fails (sjlee)
 
+MAPREDUCE-6555. TestMRAppMaster fails on trunk. (Junping Du via ozawa)
+
   BREAKDOWN OF MAPREDUCE-2841 (NATIVE TASK) SUBTASKS
 
 MAPREDUCE-5985. native-task: Fix build on macosx. Contributed by

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e85542a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
index 9e0dafc..86fa33e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
@@ -151,6 +151,8 @@ public class TestMRAppMaster {
 String userName = "TestAppMasterUser";
 JobConf conf = new JobConf();
 conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
+conf.setInt(org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.
+FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, 1);
 ApplicationAttemptId applicationAttemptId = ConverterUtils
 .toApplicationAttemptId(applicationAttemptIdStr);
 JobId jobId =  TypeConverter.toYarn(



hadoop git commit: Tests in mapreduce-client-app are writing outside of target. Contributed by Akira AJISAKA. (cherry picked from commit 15d577bfbb3f18fc95251d22378b53aa4210115f)

2015-11-25 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 48b294c58 -> 628c78287


Tests in mapreduce-client-app are writing outside of target. Contributed by 
Akira AJISAKA.
(cherry picked from commit 15d577bfbb3f18fc95251d22378b53aa4210115f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/628c7828
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/628c7828
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/628c7828

Branch: refs/heads/branch-2
Commit: 628c782870c5c1e6fd5f89e342ae1a2d74b2338d
Parents: 48b294c
Author: Junping Du 
Authored: Wed Nov 25 09:15:26 2015 -0800
Committer: Junping Du 
Committed: Wed Nov 25 09:16:22 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../mapreduce/v2/app/TestMRAppMaster.java   | 24 +---
 2 files changed, 19 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/628c7828/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 8046e88..1999126 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -355,6 +355,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6553. Replace '\u2b05' with '<-' in rendering job configuration.
 (Gabor Liptak via aajisaka)
 
+MAPREDUCE-6557. Tests in mapreduce-client-app are writing outside of
+target. (Akira AJISAKA via junping_du)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/628c7828/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
index 9e0dafc..78a6178 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
@@ -87,6 +87,7 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
+import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -95,11 +96,13 @@ import org.mockito.Mockito;
 
 public class TestMRAppMaster {
   private static final Log LOG = LogFactory.getLog(TestMRAppMaster.class);
-  static String stagingDir = "staging/";
+  private static final Path TEST_ROOT_DIR =
+  new Path(System.getProperty("test.build.data", "target/test-dir"));
+  private static final Path testDir = new Path(TEST_ROOT_DIR,
+  TestMRAppMaster.class.getName() + "-tmpDir");
+  static String stagingDir = new Path(testDir, "staging").toString();
   private static FileContext localFS = null;
-  private static final File testDir = new File("target",
-TestMRAppMaster.class.getName() + "-tmpDir").getAbsoluteFile();
-  
+
   @BeforeClass
   public static void setup() throws AccessControlException,
   FileNotFoundException, IllegalArgumentException, IOException {
@@ -108,12 +111,12 @@ public class TestMRAppMaster {
 File dir = new File(stagingDir);
 stagingDir = dir.getAbsolutePath();
 localFS = FileContext.getLocalFSFileContext();
-localFS.delete(new Path(testDir.getAbsolutePath()), true);
-testDir.mkdir();
+localFS.delete(testDir, true);
+new File(testDir.toString()).mkdir();
   }
   
   @Before
-  public void cleanup() throws IOException {
+  public void prepare() throws IOException {
 File dir = new File(stagingDir);
 if(dir.exists()) {
   FileUtils.deleteDirectory(dir);
@@ -121,6 +124,11 @@ public class TestMRAppMaster {
 dir.mkdirs();
   }
 
+  @AfterClass
+  public static void cleanup() throws IOException {
+localFS.delete(testDir, true);
+  }
+
   @Test
   public void testMRAppMasterForDifferentUser() throws IOException,
   InterruptedException {
@@ -425,7 +433,7 @@ public class TestMRAppMaster {
 
 JobConf conf = new JobConf();
 
-Path tokenFilePath = new Path(testDir.getAbsolutePath(), "tokens-file");
+Path tokenFilePath = new Path(testDir, "tokens-file");
 Map newEnv = new 

[3/3] hadoop git commit: HDFS-9438. TestPipelinesFailover assumes Linux ifconfig. (John Zhuge via Yongjun Zhang)

2015-11-25 Thread yjzhangal
HDFS-9438. TestPipelinesFailover assumes Linux ifconfig. (John Zhuge via 
Yongjun Zhang)

(cherry picked from commit 8176ea7dc694841a993f2bfc30669fe22f9ec1d2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48b294c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48b294c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48b294c5

Branch: refs/heads/branch-2
Commit: 48b294c58ef8b688c2a1973d870e83ba4b003cf5
Parents: 7ac8d8f
Author: Yongjun Zhang 
Authored: Wed Nov 25 07:40:16 2015 -0800
Committer: Yongjun Zhang 
Committed: Wed Nov 25 08:50:02 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../namenode/ha/TestPipelinesFailover.java  | 45 ++--
 2 files changed, 26 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48b294c5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4ff439b..8387571 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -817,6 +817,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8807.  dfs.datanode.data.dir does not handle spaces between
 storageType and URI correctly.  (Anu Engineer via szetszwo)
 
+HDFS-9438. TestPipelinesFailover assumes Linux ifconfig.
+(John Zhuge via Yongjun Zhang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48b294c5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
index 76a62ff..47b3817 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.log4j.Level;
 import org.junit.Test;
@@ -419,28 +420,28 @@ public class TestPipelinesFailover {
 // The following section of code is to help debug HDFS-6694 about
 // this test that fails from time to time due to "too many open files".
 //
-String[] scmd = new String[] {"/bin/sh", "-c", "ulimit -a"};
-ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
-sce.execute();
-
-System.out.println("HDFS-6694 Debug Data BEGIN===");
-System.out.println("'ulimit -a' output:\n" + sce.getOutput());
-
-scmd = new String[] {"hostname"};
-sce = new ShellCommandExecutor(scmd);
-sce.execute();
-System.out.println("'hostname' output:\n" + sce.getOutput());
-
-scmd = new String[] {"ifconfig"};
-sce = new ShellCommandExecutor(scmd);
-sce.execute();
-System.out.println("'ifconfig' output:\n" + sce.getOutput());
-
-scmd = new String[] {"whoami"};
-sce = new ShellCommandExecutor(scmd);
-sce.execute();
-System.out.println("'whoami' output:\n" + sce.getOutput());
-System.out.println("===HDFS-6694 Debug Data END");
+
+// Only collect debug data on these OSes.
+if (Shell.LINUX || Shell.SOLARIS || Shell.MAC) {
+  System.out.println("HDFS-6694 Debug Data BEGIN===");
+  
+  String[] scmd = new String[] {"/bin/sh", "-c", "ulimit -a"};
+  ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
+  sce.execute();
+  System.out.println("'ulimit -a' output:\n" + sce.getOutput());
+
+  scmd = new String[] {"hostname"};
+  sce = new ShellCommandExecutor(scmd);
+  sce.execute();
+  System.out.println("'hostname' output:\n" + sce.getOutput());
+
+  scmd = new String[] {"ifconfig", "-a"};
+  sce = new ShellCommandExecutor(scmd);
+  sce.execute();
+  System.out.println("'ifconfig' output:\n" + sce.getOutput());
+
+  System.out.println("===HDFS-6694 Debug Data END");
+}
 
 HAStressTestHarness harness = new HAStressTestHarness();
 // Disable permissions so that another user can 

hadoop git commit: MAPREDUCE-6549. multibyte delimiters with LineRecordReader cause duplicate records (wilfreds via rkanter)

2015-11-25 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 288cf8437 -> 8c6273d11


MAPREDUCE-6549. multibyte delimiters with LineRecordReader cause duplicate 
records (wilfreds via rkanter)

(cherry picked from commit 7fd00b3db4b7d73afd41276ba9a06ec06a0e1762)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c6273d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c6273d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c6273d1

Branch: refs/heads/branch-2
Commit: 8c6273d117fda28e4ac6e64faba9a087745a13a5
Parents: 288cf84
Author: Robert Kanter 
Authored: Wed Nov 25 17:03:38 2015 -0800
Committer: Robert Kanter 
Committed: Wed Nov 25 17:03:52 2015 -0800

--
 .../java/org/apache/hadoop/util/LineReader.java |   9 +
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../lib/input/UncompressedSplitLineReader.java  |   5 +
 .../hadoop/mapred/TestLineRecordReader.java | 230 +-
 .../lib/input/TestLineRecordReader.java | 237 ++-
 5 files changed, 361 insertions(+), 123 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c6273d1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
index 900215a..153953d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
@@ -333,6 +333,10 @@ public class LineReader implements Closeable {
 //appending the ambiguous characters (refer case 2.2)
 str.append(recordDelimiterBytes, 0, ambiguousByteCount);
 ambiguousByteCount = 0;
+// since it is now certain that the split did not split a delimiter we
+// should not read the next record: clear the flag otherwise duplicate
+// records could be generated
+unsetNeedAdditionalRecordAfterSplit();
   }
   if (appendLength > 0) {
 str.append(buffer, startPosn, appendLength);
@@ -380,4 +384,9 @@ public class LineReader implements Closeable {
   protected int getBufferSize() {
 return bufferSize;
   }
+
+  protected void unsetNeedAdditionalRecordAfterSplit() {
+// needed for custom multi byte line delimiters only
+// see MAPREDUCE-6549 for details
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c6273d1/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 1999126..012c87b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -358,6 +358,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6557. Tests in mapreduce-client-app are writing outside of
 target. (Akira AJISAKA via junping_du)
 
+MAPREDUCE-6549. multibyte delimiters with LineRecordReader cause
+duplicate records (wilfreds via rkanter)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c6273d1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
index 38491b0..6d495ef 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
@@ -97,4 +97,9 @@ public class UncompressedSplitLineReader extends 
SplitLineReader {
   public boolean needAdditionalRecordAfterSplit() {
 return !finished && needAdditionalRecord;
   }
+
+  @Override
+  protected void unsetNeedAdditionalRecordAfterSplit() {
+needAdditionalRecord = false;
+  }
 }


hadoop git commit: MAPREDUCE-6549. multibyte delimiters with LineRecordReader cause duplicate records (wilfreds via rkanter)

2015-11-25 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk e556c35b0 -> 7fd00b3db


MAPREDUCE-6549. multibyte delimiters with LineRecordReader cause duplicate 
records (wilfreds via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fd00b3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fd00b3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fd00b3d

Branch: refs/heads/trunk
Commit: 7fd00b3db4b7d73afd41276ba9a06ec06a0e1762
Parents: e556c35
Author: Robert Kanter 
Authored: Wed Nov 25 17:03:38 2015 -0800
Committer: Robert Kanter 
Committed: Wed Nov 25 17:03:38 2015 -0800

--
 .../java/org/apache/hadoop/util/LineReader.java |   9 +
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../lib/input/UncompressedSplitLineReader.java  |   5 +
 .../hadoop/mapred/TestLineRecordReader.java | 230 +-
 .../lib/input/TestLineRecordReader.java | 237 ++-
 5 files changed, 361 insertions(+), 123 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd00b3d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
index 900215a..153953d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
@@ -333,6 +333,10 @@ public class LineReader implements Closeable {
 //appending the ambiguous characters (refer case 2.2)
 str.append(recordDelimiterBytes, 0, ambiguousByteCount);
 ambiguousByteCount = 0;
+// since it is now certain that the split did not split a delimiter we
+// should not read the next record: clear the flag otherwise duplicate
+// records could be generated
+unsetNeedAdditionalRecordAfterSplit();
   }
   if (appendLength > 0) {
 str.append(buffer, startPosn, appendLength);
@@ -380,4 +384,9 @@ public class LineReader implements Closeable {
   protected int getBufferSize() {
 return bufferSize;
   }
+
+  protected void unsetNeedAdditionalRecordAfterSplit() {
+// needed for custom multi byte line delimiters only
+// see MAPREDUCE-6549 for details
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd00b3d/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c6e80e7..503e687 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -650,6 +650,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6557. Tests in mapreduce-client-app are writing outside of
 target. (Akira AJISAKA via junping_du)
 
+MAPREDUCE-6549. multibyte delimiters with LineRecordReader cause
+duplicate records (wilfreds via rkanter)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd00b3d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
index 38491b0..6d495ef 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
@@ -97,4 +97,9 @@ public class UncompressedSplitLineReader extends 
SplitLineReader {
   public boolean needAdditionalRecordAfterSplit() {
 return !finished && needAdditionalRecord;
   }
+
+  @Override
+  protected void unsetNeedAdditionalRecordAfterSplit() {
+needAdditionalRecord = false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd00b3d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java

hadoop git commit: Adding release 2.8.0 to CHANGES.txt

2015-11-25 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6d84cc16b -> d57fd181c


Adding release 2.8.0 to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d57fd181
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d57fd181
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d57fd181

Branch: refs/heads/trunk
Commit: d57fd181c75c2578775fda0ec575b66da416adff
Parents: 6d84cc1
Author: Vinod Kumar Vavilapalli 
Authored: Wed Nov 25 17:33:26 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Wed Nov 25 17:33:26 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d57fd181/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d586057..2bda09c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -627,6 +627,18 @@ Trunk (Unreleased)
   HADOOP-12544. Erasure Coding: create dummy raw coder to isolate 
performance
   issues in testing. (Rui Li via zhz)
 
+Release 2.9.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d57fd181/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b085e67..97801c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -871,6 +871,18 @@ Trunk (Unreleased)
   HDFS-9451. Clean up depreated umasks and related unit tests.
   (Wei-Chiu Chuang via wheat9)
 
+Release 2.9.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d57fd181/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index aadb0c6..dc37bf1 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -303,6 +303,18 @@ Trunk (Unreleased)
 MAPREDUCE-6525. Fix test failure of TestMiniMRClientCluster.testRestart.
 (Masatake Iwasaki via aajisaka)
 
+Release 2.9.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d57fd181/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d0b31dd..eef241b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -55,6 +55,18 @@ Trunk - Unreleased
 
 YARN-3915. scmadmin help message correction (Bibin A Chundatt via aw)
 
+Release 2.9.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[4/4] hadoop git commit: HDFS-9426. Rollingupgrade finalization is not backward compatible (Contributed by Kihwal Lee)

2015-11-25 Thread vinayakumarb
HDFS-9426. Rollingupgrade finalization is not backward compatible (Contributed 
by Kihwal Lee)

(cherry picked from commit c62d42cd8bb09a5ffc0c5eefa2d87913e71b9e7e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c48c17a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c48c17a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c48c17a

Branch: refs/heads/branch-2.7
Commit: 0c48c17ae940b2b850b349ccc38dd7b6953f5a3c
Parents: f50f889
Author: Vinayakumar B 
Authored: Thu Nov 26 09:33:21 2015 +0530
Committer: Vinayakumar B 
Committed: Thu Nov 26 09:42:26 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../DatanodeProtocolClientSideTranslatorPB.java |  6 +-
 .../DatanodeProtocolServerSideTranslatorPB.java | 12 ++--
 .../hadoop-hdfs/src/main/proto/DatanodeProtocol.proto   |  1 +
 4 files changed, 19 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c48c17a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 89aede2..af6f8ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -120,6 +120,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9413. getContentSummary() on standby should throw StandbyException.
 (Brahma Reddy Battula via mingma)
 
+HDFS-9426. Rollingupgrade finalization is not backward compatible
+(Kihwal Lee via vinayakumarb)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c48c17a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 825e835..86422e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -161,7 +161,11 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
   index++;
 }
 RollingUpgradeStatus rollingUpdateStatus = null;
-if (resp.hasRollingUpgradeStatus()) {
+
+// Use v2 semantics if available.
+if (resp.hasRollingUpgradeStatusV2()) {
+  rollingUpdateStatus = PBHelper.convert(resp.getRollingUpgradeStatusV2());
+} else if (resp.hasRollingUpgradeStatus()) {
   rollingUpdateStatus = PBHelper.convert(resp.getRollingUpgradeStatus());
 }
 return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c48c17a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
index 873eb6d..ca9e4df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
@@ -132,8 +133,15 @@ public class DatanodeProtocolServerSideTranslatorPB 
implements
 RollingUpgradeStatus rollingUpdateStatus = response
 .getRollingUpdateStatus();
 if (rollingUpdateStatus 

[2/4] hadoop git commit: HDFS-9426. Rollingupgrade finalization is not backward compatible (Contributed by Kihwal Lee)

2015-11-25 Thread vinayakumarb
HDFS-9426. Rollingupgrade finalization is not backward compatible (Contributed 
by Kihwal Lee)

(cherry picked from commit c62d42cd8bb09a5ffc0c5eefa2d87913e71b9e7e)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f256d1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f256d1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f256d1d

Branch: refs/heads/trunk
Commit: 9f256d1d716a7e17606245fcfc619901a8fa299a
Parents: 0348e76
Author: Vinayakumar B 
Authored: Thu Nov 26 09:33:21 2015 +0530
Committer: Vinayakumar B 
Committed: Thu Nov 26 09:39:47 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../DatanodeProtocolClientSideTranslatorPB.java|  5 -
 .../DatanodeProtocolServerSideTranslatorPB.java| 13 +++--
 .../hadoop-hdfs/src/main/proto/DatanodeProtocol.proto  |  1 +
 4 files changed, 19 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f256d1d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 97801c8..d73dbd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2514,6 +2514,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the
 length of storageIDs. (szetszwo via Arpit Agarwal)
 
+HDFS-9426. Rollingupgrade finalization is not backward compatible
+(Kihwal Lee via vinayakumarb)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f256d1d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 6b20af5..fd421c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -163,7 +163,10 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
   index++;
 }
 RollingUpgradeStatus rollingUpdateStatus = null;
-if (resp.hasRollingUpgradeStatus()) {
+// Use v2 semantics if available.
+if (resp.hasRollingUpgradeStatusV2()) {
+  rollingUpdateStatus = 
PBHelperClient.convert(resp.getRollingUpgradeStatusV2());
+} else if (resp.hasRollingUpgradeStatus()) {
   rollingUpdateStatus = 
PBHelperClient.convert(resp.getRollingUpgradeStatus());
 }
 return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f256d1d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
index 93c11ba..63fd1ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto;
 import 

[3/4] hadoop git commit: HDFS-9426. Rollingupgrade finalization is not backward compatible (Contributed by Kihwal Lee)

2015-11-25 Thread vinayakumarb
HDFS-9426. Rollingupgrade finalization is not backward compatible (Contributed 
by Kihwal Lee)

(cherry picked from commit c62d42cd8bb09a5ffc0c5eefa2d87913e71b9e7e)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto

(cherry picked from commit 9f256d1d716a7e17606245fcfc619901a8fa299a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57e0c735
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57e0c735
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57e0c735

Branch: refs/heads/branch-2
Commit: 57e0c735f807b9f2ddaa55a0ca919c9540be0e4f
Parents: 165abe6
Author: Vinayakumar B 
Authored: Thu Nov 26 09:33:21 2015 +0530
Committer: Vinayakumar B 
Committed: Thu Nov 26 09:40:44 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../DatanodeProtocolClientSideTranslatorPB.java|  5 -
 .../DatanodeProtocolServerSideTranslatorPB.java| 13 +++--
 .../hadoop-hdfs/src/main/proto/DatanodeProtocol.proto  |  1 +
 4 files changed, 19 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57e0c735/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6e8a45d..16e6036 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1652,6 +1652,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the
 length of storageIDs. (szetszwo via Arpit Agarwal)
 
+HDFS-9426. Rollingupgrade finalization is not backward compatible
+(Kihwal Lee via vinayakumarb)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57e0c735/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 705d573..388680b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -163,7 +163,10 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
   index++;
 }
 RollingUpgradeStatus rollingUpdateStatus = null;
-if (resp.hasRollingUpgradeStatus()) {
+// Use v2 semantics if available.
+if (resp.hasRollingUpgradeStatusV2()) {
+  rollingUpdateStatus = 
PBHelperClient.convert(resp.getRollingUpgradeStatusV2());
+} else if (resp.hasRollingUpgradeStatus()) {
   rollingUpdateStatus = 
PBHelperClient.convert(resp.getRollingUpgradeStatus());
 }
 return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57e0c735/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
index 4b9f7c4..4f8f44f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+import 

[1/4] hadoop git commit: HDFS-9426. Rollingupgrade finalization is not backward compatible (Contributed by Kihwal Lee)

2015-11-25 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 165abe6c4 -> 57e0c735f
  refs/heads/branch-2.7 f50f889c1 -> 0c48c17ae
  refs/heads/branch-2.7.2 4e8799fff -> c62d42cd8
  refs/heads/trunk 0348e769a -> 9f256d1d7


HDFS-9426. Rollingupgrade finalization is not backward compatible (Contributed 
by Kihwal Lee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c62d42cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c62d42cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c62d42cd

Branch: refs/heads/branch-2.7.2
Commit: c62d42cd8bb09a5ffc0c5eefa2d87913e71b9e7e
Parents: 4e8799f
Author: Vinayakumar B 
Authored: Thu Nov 26 09:33:21 2015 +0530
Committer: Vinayakumar B 
Committed: Thu Nov 26 09:33:21 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../DatanodeProtocolClientSideTranslatorPB.java |  6 +-
 .../DatanodeProtocolServerSideTranslatorPB.java | 12 ++--
 .../hadoop-hdfs/src/main/proto/DatanodeProtocol.proto   |  1 +
 4 files changed, 19 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62d42cd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d2e1b7d..4fefe52 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -95,6 +95,9 @@ Release 2.7.2 - 2015-11-11
 HDFS-9413. getContentSummary() on standby should throw StandbyException.
 (Brahma Reddy Battula via mingma)
 
+HDFS-9426. Rollingupgrade finalization is not backward compatible
+(Kihwal Lee via vinayakumarb)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62d42cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 825e835..86422e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -161,7 +161,11 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
   index++;
 }
 RollingUpgradeStatus rollingUpdateStatus = null;
-if (resp.hasRollingUpgradeStatus()) {
+
+// Use v2 semantics if available.
+if (resp.hasRollingUpgradeStatusV2()) {
+  rollingUpdateStatus = PBHelper.convert(resp.getRollingUpgradeStatusV2());
+} else if (resp.hasRollingUpgradeStatus()) {
   rollingUpdateStatus = PBHelper.convert(resp.getRollingUpgradeStatus());
 }
 return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62d42cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
index 873eb6d..ca9e4df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
@@ -132,8 +133,15 @@ public class 

hadoop git commit: Preparing for 2.9.0 development: mvn versions:set -DnewVersion=2.9.0

2015-11-25 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 767815f31 -> b20e77eba


Preparing for 2.9.0 development: mvn versions:set -DnewVersion=2.9.0


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b20e77eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b20e77eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b20e77eb

Branch: refs/heads/branch-2
Commit: b20e77eba3d4972299b49af96da979643c877b6d
Parents: 767815f
Author: Vinod Kumar Vavilapalli 
Authored: Wed Nov 25 17:47:22 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Wed Nov 25 17:47:22 2015 -0800

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client/pom.xml| 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml| 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-ant/pom.xml  | 4 ++--
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml| 4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml   | 4 ++--
 hadoop-tools/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml  | 4 ++--
 .../hadoop-yarn-applications-distributedshell/pom.xml| 4 ++--
 .../hadoop-yarn-applications-unmanaged-am-launcher/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 4 ++--
 

hadoop git commit: MAPREDUCE-6550. archive-logs tool changes log ownership to the Yarn user when using DefaultContainerExecutor (rkanter)

2015-11-25 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7fd00b3db -> 6d84cc16b


MAPREDUCE-6550. archive-logs tool changes log ownership to the Yarn user when 
using DefaultContainerExecutor (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d84cc16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d84cc16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d84cc16

Branch: refs/heads/trunk
Commit: 6d84cc16b3e0685fef01d0e3526b0f7556ceff51
Parents: 7fd00b3
Author: Robert Kanter 
Authored: Wed Nov 25 17:12:40 2015 -0800
Committer: Robert Kanter 
Committed: Wed Nov 25 17:12:40 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../apache/hadoop/tools/HadoopArchiveLogs.java  | 18 +-
 .../hadoop/tools/HadoopArchiveLogsRunner.java   | 66 
 .../src/site/markdown/HadoopArchiveLogs.md  | 17 +
 .../hadoop/tools/TestHadoopArchiveLogs.java | 34 +++---
 .../tools/TestHadoopArchiveLogsRunner.java  | 21 ++-
 6 files changed, 139 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d84cc16/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 503e687..aadb0c6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -653,6 +653,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6549. multibyte delimiters with LineRecordReader cause
 duplicate records (wilfreds via rkanter)
 
+MAPREDUCE-6550. archive-logs tool changes log ownership to the Yarn
+user when using DefaultContainerExecutor (rkanter)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d84cc16/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
--
diff --git 
a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
 
b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
index 363e287..6b8af97 100644
--- 
a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
+++ 
b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
@@ -77,6 +77,7 @@ public class HadoopArchiveLogs implements Tool {
   private static final String MEMORY_OPTION = "memory";
   private static final String VERBOSE_OPTION = "verbose";
   private static final String FORCE_OPTION = "force";
+  private static final String NO_PROXY_OPTION = "noProxy";
 
   private static final int DEFAULT_MAX_ELIGIBLE = -1;
   private static final int DEFAULT_MIN_NUM_LOG_FILES = 20;
@@ -94,6 +95,8 @@ public class HadoopArchiveLogs implements Tool {
   private boolean verbose = false;
   @VisibleForTesting
   boolean force = false;
+  @VisibleForTesting
+  boolean proxy = true;
 
   @VisibleForTesting
   Set eligibleApplications;
@@ -208,6 +211,12 @@ public class HadoopArchiveLogs implements Tool {
 "Force recreating the working directory if an existing one is found. " 
+
 "This should only be used if you know that another instance is " +
 "not currently running");
+Option noProxyOpt = new Option(NO_PROXY_OPTION, false,
+"When specified, all processing will be done as the user running this" 
+
+" command (or the Yarn user if DefaultContainerExecutor is in " +
+"use). When not specified, all processing will be done as the " +
+"user who owns that application; if the user running this command" 
+
+" is not allowed to impersonate that user, it will fail");
 opts.addOption(helpOpt);
 opts.addOption(maxEligibleOpt);
 opts.addOption(minNumLogFilesOpt);
@@ -215,6 +224,7 @@ public class HadoopArchiveLogs implements Tool {
 opts.addOption(memoryOpt);
 opts.addOption(verboseOpt);
 opts.addOption(forceOpt);
+opts.addOption(noProxyOpt);
 
 try {
   CommandLineParser parser = new GnuParser();
@@ -252,6 +262,9 @@ public class HadoopArchiveLogs implements Tool {
   if (commandLine.hasOption(FORCE_OPTION)) {
 force = true;
   }
+  if (commandLine.hasOption(NO_PROXY_OPTION)) {
+proxy = false;
+  }
 } catch (ParseException pe) {
   HelpFormatter formatter = new HelpFormatter();
   formatter.printHelp("mapred archive-logs", opts);
@@ -274,7 +287,7 @@ public class HadoopArchiveLogs implements Tool {
 }
 fs.mkdirs(workingDir);
 fs.setPermission(workingDir,
-new 

hadoop git commit: HADOOP-12468. Partial group resolution failure should not result in user lockout. (Wei-Chiu Chuang via Yongjun Zhang)

2015-11-25 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b20e77eba -> 165abe6c4


HADOOP-12468. Partial group resolution failure should not result in user 
lockout. (Wei-Chiu Chuang via Yongjun Zhang)

(cherry picked from commit 0348e769abc507c69d644db7bc56d31d971c51d1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/165abe6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/165abe6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/165abe6c

Branch: refs/heads/branch-2
Commit: 165abe6c4fe18ab3ba6d34b19b940a16e11e7242
Parents: b20e77e
Author: Yongjun Zhang 
Authored: Wed Nov 25 18:37:52 2015 -0800
Committer: Yongjun Zhang 
Committed: Wed Nov 25 18:48:10 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../security/ShellBasedUnixGroupsMapping.java   | 181 ++--
 .../main/java/org/apache/hadoop/util/Shell.java |  19 +-
 .../TestShellBasedUnixGroupsMapping.java| 213 +++
 4 files changed, 393 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/165abe6c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6a753b2..22a40c5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -753,6 +753,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12598. Add XML namespace declarations for some hadoop/tools modules.
 (Xin Wang via aajisaka)
 
+HADOOP-12468. Partial group resolution failure should not result in user
+lockout. (Wei-Chiu Chuang via Yongjun Zhang)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/165abe6c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
index da6e434..9b80be9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
@@ -21,12 +21,14 @@ import java.io.IOException;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.StringTokenizer;
+import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ExitCodeException;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
 /**
  * A simple shell-based implementation of {@link GroupMappingServiceProvider} 
@@ -40,16 +42,34 @@ public class ShellBasedUnixGroupsMapping
   
   private static final Log LOG =
 LogFactory.getLog(ShellBasedUnixGroupsMapping.class);
-  
+
+  @SuppressWarnings("serial")
+  private static class PartialGroupNameException extends IOException {
+public PartialGroupNameException(String message) {
+  super(message);
+}
+
+public PartialGroupNameException(String message, Throwable err) {
+  super(message, err);
+}
+
+@Override
+public String toString() {
+  final StringBuilder sb =
+  new StringBuilder("PartialGroupNameException ");
+  sb.append(super.getMessage());
+  return sb.toString();
+}
+  }
   /**
* Returns list of groups for a user
*
-   * @param user get groups for this user
+   * @param userName get groups for this user
* @return list of groups for a given user
*/
   @Override
-  public List getGroups(String user) throws IOException {
-return getUnixGroups(user);
+  public List getGroups(String userName) throws IOException {
+return getUnixGroups(userName);
   }
 
   /**
@@ -70,30 +90,52 @@ public class ShellBasedUnixGroupsMapping
 // does nothing in this provider of user to groups mapping
   }
 
-  /** 
+  /**
+   * Create a ShellCommandExecutor object using the user's name.
+   *
+   * @param userName user's name
+   * @return a ShellCommandExecutor object
+   */
+  protected ShellCommandExecutor createGroupExecutor(String userName) {
+return new ShellCommandExecutor(
+

hadoop git commit: Reverting previous commit which added 2.9.0 entries. Don't need that in branch-2.8. Revert "Adding release 2.8.0 to CHANGES.txt"

2015-11-25 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 [created] efaef45a2


Reverting previous commit which added 2.9.0 entries. Don't need that in 
branch-2.8.
Revert "Adding release 2.8.0 to CHANGES.txt"

This reverts commit 767815f3176c4776ea828994a662404df303f890.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efaef45a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efaef45a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efaef45a

Branch: refs/heads/branch-2.8
Commit: efaef45a2422a79d721e31b86bc032d37cb2aaa0
Parents: 767815f
Author: Vinod Kumar Vavilapalli 
Authored: Wed Nov 25 17:49:48 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Wed Nov 25 17:49:48 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efaef45a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6a753b2..594a6ec 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1,17 +1,5 @@
 Hadoop Change Log
 
-Release 2.9.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efaef45a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6e8a45d..3f421ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,17 +1,5 @@
  Hadoop HDFS Change Log
 
-Release 2.9.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efaef45a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 2ecd661..4fafa78 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1,17 +1,5 @@
 Hadoop MapReduce Change Log
 
-Release 2.9.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efaef45a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6c338b6..ac03f82 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1,17 +1,5 @@
 Hadoop YARN Change Log
 
-Release 2.9.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HADOOP-12468. Partial group resolution failure should not result in user lockout. (Wei-Chiu Chuang via Yongjun Zhang)

2015-11-25 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk d57fd181c -> 0348e769a


HADOOP-12468. Partial group resolution failure should not result in user 
lockout. (Wei-Chiu Chuang via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0348e769
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0348e769
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0348e769

Branch: refs/heads/trunk
Commit: 0348e769abc507c69d644db7bc56d31d971c51d1
Parents: d57fd18
Author: Yongjun Zhang 
Authored: Wed Nov 25 18:37:52 2015 -0800
Committer: Yongjun Zhang 
Committed: Wed Nov 25 18:37:52 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../security/ShellBasedUnixGroupsMapping.java   | 181 ++--
 .../main/java/org/apache/hadoop/util/Shell.java |  19 +-
 .../TestShellBasedUnixGroupsMapping.java| 213 +++
 4 files changed, 393 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0348e769/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2bda09c..7cdf21b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1377,6 +1377,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12598. Add XML namespace declarations for some hadoop/tools modules.
 (Xin Wang via aajisaka)
 
+HADOOP-12468. Partial group resolution failure should not result in user
+lockout. (Wei-Chiu Chuang via Yongjun Zhang)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0348e769/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
index da6e434..9b80be9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
@@ -21,12 +21,14 @@ import java.io.IOException;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.StringTokenizer;
+import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ExitCodeException;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
 /**
  * A simple shell-based implementation of {@link GroupMappingServiceProvider} 
@@ -40,16 +42,34 @@ public class ShellBasedUnixGroupsMapping
   
   private static final Log LOG =
 LogFactory.getLog(ShellBasedUnixGroupsMapping.class);
-  
+
+  @SuppressWarnings("serial")
+  private static class PartialGroupNameException extends IOException {
+public PartialGroupNameException(String message) {
+  super(message);
+}
+
+public PartialGroupNameException(String message, Throwable err) {
+  super(message, err);
+}
+
+@Override
+public String toString() {
+  final StringBuilder sb =
+  new StringBuilder("PartialGroupNameException ");
+  sb.append(super.getMessage());
+  return sb.toString();
+}
+  }
   /**
* Returns list of groups for a user
*
-   * @param user get groups for this user
+   * @param userName get groups for this user
* @return list of groups for a given user
*/
   @Override
-  public List getGroups(String user) throws IOException {
-return getUnixGroups(user);
+  public List getGroups(String userName) throws IOException {
+return getUnixGroups(userName);
   }
 
   /**
@@ -70,30 +90,52 @@ public class ShellBasedUnixGroupsMapping
 // does nothing in this provider of user to groups mapping
   }
 
-  /** 
+  /**
+   * Create a ShellCommandExecutor object using the user's name.
+   *
+   * @param userName user's name
+   * @return a ShellCommandExecutor object
+   */
+  protected ShellCommandExecutor createGroupExecutor(String userName) {
+return new ShellCommandExecutor(
+Shell.getGroupsForUserCommand(userName), null, null, 0L);
+  }
+
+  /**
+   * Create a 

hadoop git commit: Adding release 2.8.0 to CHANGES.txt

2015-11-25 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7b4bf23b2 -> 767815f31


Adding release 2.8.0 to CHANGES.txt

(cherry picked from commit d57fd181c75c2578775fda0ec575b66da416adff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/767815f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/767815f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/767815f3

Branch: refs/heads/branch-2
Commit: 767815f3176c4776ea828994a662404df303f890
Parents: 7b4bf23
Author: Vinod Kumar Vavilapalli 
Authored: Wed Nov 25 17:33:26 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Wed Nov 25 17:37:47 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/767815f3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 594a6ec..6a753b2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop Change Log
 
+Release 2.9.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/767815f3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3f421ba..6e8a45d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,5 +1,17 @@
  Hadoop HDFS Change Log
 
+Release 2.9.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/767815f3/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 4fafa78..2ecd661 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop MapReduce Change Log
 
+Release 2.9.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/767815f3/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ac03f82..6c338b6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop YARN Change Log
 
+Release 2.9.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: YARN-4297. TestJobHistoryEventHandler and TestRMContainerAllocator failing on YARN-2928 branch (Varun Saxena via sjlee)

2015-11-25 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/feature-YARN-2928 a434b77ae -> b1dc7ced3


YARN-4297. TestJobHistoryEventHandler and TestRMContainerAllocator failing on 
YARN-2928 branch (Varun Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1dc7ced
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1dc7ced
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1dc7ced

Branch: refs/heads/feature-YARN-2928
Commit: b1dc7ced3606d1af5b508d6538866af5d84117f0
Parents: a434b77
Author: Sangjin Lee 
Authored: Wed Nov 25 11:28:00 2015 -0800
Committer: Sangjin Lee 
Committed: Wed Nov 25 11:28:00 2015 -0800

--
 .../jobhistory/TestJobHistoryEventHandler.java  | 28 +++-
 .../v2/app/rm/TestRMContainerAllocator.java |  3 ++-
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../collector/NodeTimelineCollectorManager.java |  2 --
 4 files changed, 26 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1dc7ced/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index 71e1ce4..0550222 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.util.JobHistoryEventUtils;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
@@ -66,6 +67,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.junit.After;
@@ -487,7 +489,7 @@ public class TestJobHistoryEventHandler {
   // stored to the Timeline store
   @Test (timeout=5)
   public void testTimelineEventHandling() throws Exception {
-TestParams t = new TestParams(false);
+TestParams t = new TestParams(RunningAppContext.class, false);
 Configuration conf = new YarnConfiguration();
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
 MiniYARNCluster yarnCluster = null;
@@ -742,21 +744,30 @@ public class TestJobHistoryEventHandler {
 }
   }
 
-  private AppContext mockAppContext(ApplicationId appId, boolean 
isLastAMRetry) {
-JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(appId));
-AppContext mockContext = mock(AppContext.class);
+  private Job mockJob() {
 Job mockJob = mock(Job.class);
 when(mockJob.getAllCounters()).thenReturn(new Counters());
 when(mockJob.getTotalMaps()).thenReturn(10);
 when(mockJob.getTotalReduces()).thenReturn(10);
 when(mockJob.getName()).thenReturn("mockjob");
+return mockJob;
+  }
+
+  private AppContext mockAppContext(Class contextClass,
+  ApplicationId appId, boolean isLastAMRetry) {
+JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(appId));
+AppContext mockContext = mock(contextClass);
+Job mockJob = mockJob();
 when(mockContext.getJob(jobId)).thenReturn(mockJob);
 when(mockContext.getApplicationID()).thenReturn(appId);
 when(mockContext.isLastAMRetry()).thenReturn(isLastAMRetry);
+if (mockContext instanceof RunningAppContext) {
+  when(((RunningAppContext)mockContext).getTimelineClient()).
+  thenReturn(TimelineClient.createTimelineClient());
+}
 return mockContext;
   }
 
-
   private class TestParams {
 boolean isLastAMRetry;
 String workDir = setupTestWorkDir();
@@ 

hadoop git commit: HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after transition to ctest. Contributed by Chris Nauroth.

2015-11-25 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 628c78287 -> 15caaa7d0


HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after 
transition to ctest. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15caaa7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15caaa7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15caaa7d

Branch: refs/heads/branch-2
Commit: 15caaa7d0a13044f2908594ace9196e00c9b
Parents: 628c782
Author: Haohui Mai 
Authored: Wed Nov 25 10:31:39 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 25 10:32:13 2015 -0800

--
 .../hadoop-hdfs-native-client/pom.xml   | 16 +++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 2 files changed, 10 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15caaa7d/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index cc59a28..85c0fe4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -162,15 +162,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
 
-
-  
-  
-  
-
-
-
-  
-
+
+  
+  
+  
+  
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15caaa7d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8387571..7564a38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1529,6 +1529,9 @@ Release 2.8.0 - UNRELEASED
 intermittently with various symptoms - debugging patch. (Yongjun Zhang via
 Arpit Agarwal)
 
+HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after
+transition to ctest. (Chris Nauroth via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after transition to ctest. Contributed by Chris Nauroth.

2015-11-25 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 15d577bfb -> 95d5227c7


HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after 
transition to ctest. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95d5227c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95d5227c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95d5227c

Branch: refs/heads/trunk
Commit: 95d5227c75f430da7c77847f31734b34b36157d2
Parents: 15d577b
Author: Haohui Mai 
Authored: Wed Nov 25 10:31:39 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 25 10:32:48 2015 -0800

--
 .../hadoop-hdfs-native-client/pom.xml   | 16 +++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 2 files changed, 10 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95d5227c/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index a966a28..9fa5fbf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -162,15 +162,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
 
-
-  
-  
-  
-
-
-
-  
-
+
+  
+  
+  
+  
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95d5227c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8100c4d..707684b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2388,6 +2388,9 @@ Release 2.8.0 - UNRELEASED
 intermittently with various symptoms - debugging patch. (Yongjun Zhang via
 Arpit Agarwal)
 
+HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after
+transition to ctest. (Chris Nauroth via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-9407. TestFileTruncate should not use fixed NN port. Contributed by Brahma Reddy Battula.

2015-11-25 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk e3d673901 -> fc799ab16


HDFS-9407. TestFileTruncate should not use fixed NN port. Contributed by Brahma 
Reddy Battula.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc799ab1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc799ab1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc799ab1

Branch: refs/heads/trunk
Commit: fc799ab16cc62b04fef5a416af30e4ae845dd7a5
Parents: e3d6739
Author: Konstantin V Shvachko 
Authored: Wed Nov 25 13:52:09 2015 -0800
Committer: Konstantin V Shvachko 
Committed: Wed Nov 25 13:52:09 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 7 +--
 .../apache/hadoop/hdfs/server/namenode/TestFileTruncate.java  | 6 --
 2 files changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc799ab1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 46a286b..8fc911c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2378,8 +2378,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-9435. TestBlockRecovery#testRBWReplicas is failing intermittently.
 (Rakesh R via waltersu4549)
 
-HDFS-9433. DFS getEZForPath API on a non-existent file should throw 
FileNotFoundException
-(Rakesh R via umamahesh)
+HDFS-9433. DFS getEZForPath API on a non-existent file should throw
+FileNotFoundException (Rakesh R via umamahesh)
 
 HDFS-6101. TestReplaceDatanodeOnFailure fails occasionally.
 (Wei-Chiu Chuang via cnauroth)
@@ -2397,6 +2397,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after
 transition to ctest. (Chris Nauroth via wheat9)
 
+HDFS-9407. TestFileTruncate should not use fixed NN port.
+(Brahma Reddy Battula via shv)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc799ab1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index a642492..1c739fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -60,7 +59,6 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
-import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -104,7 +102,6 @@ public class TestFileTruncate {
 cluster = new MiniDFSCluster.Builder(conf)
 .format(true)
 .numDataNodes(DATANODE_NUM)
-.nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)
 .waitSafeMode(true)
 .build();
 fs = cluster.getFileSystem();
@@ -1230,9 +1227,6 @@ public class TestFileTruncate {
   NameNode.doRollback(conf, false);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM)
 .format(false)
-.nameNodePort(
-ServerSocketUtil.getPort(
-HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT, 10))
 .startupOption(o==StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
 .dnStartupOption(o!=StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
 .build();



hadoop git commit: HDFS-9407. TestFileTruncate should not use fixed NN port. Contributed by Brahma Reddy Battula.

2015-11-25 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4f1f9f7ef -> 9c1ef648a


HDFS-9407. TestFileTruncate should not use fixed NN port. Contributed by Brahma 
Reddy Battula.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c1ef648
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c1ef648
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c1ef648

Branch: refs/heads/branch-2
Commit: 9c1ef648a9f57d4f938c789fc19bc44aef89d72d
Parents: 4f1f9f7
Author: Konstantin V Shvachko 
Authored: Wed Nov 25 14:16:03 2015 -0800
Committer: Konstantin V Shvachko 
Committed: Wed Nov 25 14:16:03 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 7 +--
 .../apache/hadoop/hdfs/server/namenode/TestFileTruncate.java  | 6 --
 2 files changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c1ef648/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 796f7d1..976ab2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1519,8 +1519,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-9435. TestBlockRecovery#testRBWReplicas is failing intermittently.
 (Rakesh R via waltersu4549)
 
-HDFS-9433. DFS getEZForPath API on a non-existent file should throw 
FileNotFoundException
-(Rakesh R via umamahesh)
+HDFS-9433. DFS getEZForPath API on a non-existent file should throw
+FileNotFoundException (Rakesh R via umamahesh)
 
 HDFS-6101. TestReplaceDatanodeOnFailure fails occasionally.
 (Wei-Chiu Chuang via cnauroth)
@@ -1535,6 +1535,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after
 transition to ctest. (Chris Nauroth via wheat9)
 
+HDFS-9407. TestFileTruncate should not use fixed NN port.
+(Brahma Reddy Battula via shv)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c1ef648/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 657dc56..7dbf548 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -59,7 +58,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
-import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -103,7 +101,6 @@ public class TestFileTruncate {
 cluster = new MiniDFSCluster.Builder(conf)
 .format(true)
 .numDataNodes(DATANODE_NUM)
-.nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)
 .waitSafeMode(true)
 .build();
 fs = cluster.getFileSystem();
@@ -1228,9 +1225,6 @@ public class TestFileTruncate {
   NameNode.doRollback(conf, false);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM)
 .format(false)
-.nameNodePort(
-ServerSocketUtil.getPort(
-HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT, 10))
 .startupOption(o==StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
 .dnStartupOption(o!=StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
 .build();



hadoop git commit: HDFS-9467. Fix data race accessing writeLockHeldTimeStamp in FSNamesystem. Contributed by Mingliang Liu.

2015-11-25 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk fc799ab16 -> e556c35b0


HDFS-9467. Fix data race accessing writeLockHeldTimeStamp in FSNamesystem. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e556c35b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e556c35b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e556c35b

Branch: refs/heads/trunk
Commit: e556c35b0596700f9ec9d0a51cf5027259d531b5
Parents: fc799ab
Author: Jing Zhao 
Authored: Wed Nov 25 14:21:06 2015 -0800
Committer: Jing Zhao 
Committed: Wed Nov 25 14:22:12 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 11 +--
 2 files changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e556c35b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8fc911c..b085e67 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2400,6 +2400,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9407. TestFileTruncate should not use fixed NN port.
 (Brahma Reddy Battula via shv)
 
+HDFS-9467. Fix data race accessing writeLockHeldTimeStamp in FSNamesystem.
+(Mingliang Liu via jing9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e556c35b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0559288..89df008 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1498,14 +1498,13 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   public void writeUnlock() {
 final boolean needReport = fsLock.getWriteHoldCount() == 1 &&
 fsLock.isWriteLockedByCurrentThread();
+final long writeLockInterval = monotonicNow() - writeLockHeldTimeStamp;
+
 this.fsLock.writeLock().unlock();
 
-if (needReport) {
-  long writeLockInterval = monotonicNow() - writeLockHeldTimeStamp;
-  if (writeLockInterval >= WRITELOCK_REPORTING_THRESHOLD) {
-LOG.info("FSNamesystem write lock held for " + writeLockInterval +
-" ms via\n" + StringUtils.getStackTrace(Thread.currentThread()));
-  }
+if (needReport && writeLockInterval >= WRITELOCK_REPORTING_THRESHOLD) {
+  LOG.info("FSNamesystem write lock held for " + writeLockInterval +
+  " ms via\n" + StringUtils.getStackTrace(Thread.currentThread()));
 }
   }
   @Override



hadoop git commit: HDFS-9467. Fix data race accessing writeLockHeldTimeStamp in FSNamesystem. Contributed by Mingliang Liu.

2015-11-25 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9c1ef648a -> 288cf8437


HDFS-9467. Fix data race accessing writeLockHeldTimeStamp in FSNamesystem. 
Contributed by Mingliang Liu.

(cherry picked from commit e556c35b0596700f9ec9d0a51cf5027259d531b5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/288cf843
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/288cf843
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/288cf843

Branch: refs/heads/branch-2
Commit: 288cf8437b7e03f071e95eb05e83a26e58fff26b
Parents: 9c1ef64
Author: Jing Zhao 
Authored: Wed Nov 25 14:21:06 2015 -0800
Committer: Jing Zhao 
Committed: Wed Nov 25 14:23:06 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 11 +--
 2 files changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/288cf843/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 976ab2c..3f421ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1538,6 +1538,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9407. TestFileTruncate should not use fixed NN port.
 (Brahma Reddy Battula via shv)
 
+HDFS-9467. Fix data race accessing writeLockHeldTimeStamp in FSNamesystem.
+(Mingliang Liu via jing9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/288cf843/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9b33ffc..f0737ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1484,14 +1484,13 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   public void writeUnlock() {
 final boolean needReport = fsLock.getWriteHoldCount() == 1 &&
 fsLock.isWriteLockedByCurrentThread();
+final long writeLockInterval = monotonicNow() - writeLockHeldTimeStamp;
+
 this.fsLock.writeLock().unlock();
 
-if (needReport) {
-  long writeLockInterval = monotonicNow() - writeLockHeldTimeStamp;
-  if (writeLockInterval >= WRITELOCK_REPORTING_THRESHOLD) {
-LOG.info("FSNamesystem write lock held for " + writeLockInterval +
-" ms via\n" + StringUtils.getStackTrace(Thread.currentThread()));
-  }
+if (needReport && writeLockInterval >= WRITELOCK_REPORTING_THRESHOLD) {
+  LOG.info("FSNamesystem write lock held for " + writeLockInterval +
+  " ms via\n" + StringUtils.getStackTrace(Thread.currentThread()));
 }
   }
   @Override



[Hadoop Wiki] Update of "Roadmap" by VinodKumarVavilapalli

2015-11-25 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "Roadmap" page has been changed by VinodKumarVavilapalli:
https://wiki.apache.org/hadoop/Roadmap?action=diff=58=59

Comment:
Updating 2.8 content

* [[https://issues.apache.org/jira/browse/HDFS-6200|HDFS-6200]] Create a 
separate jar for hdfs-client
* [[https://issues.apache.org/jira/browse/HDFS-8008|HDFS-8008]] Support 
client-side back off when the datanodes are congested
* [[https://issues.apache.org/jira/browse/HDFS-8009|HDFS-8009]] Signal 
congestion on the DataNode
-   * [[https://issues.apache.org/jira/browse/HDFS-8155|HDFS-8155]] Support 
OAuth2 in WebHDFS: Alpha / Early feature?
   * YARN
* [[https://issues.apache.org/jira/browse/YARN-4233|YARN-4233]] YARN 
Timeline Service v1.5
* [[https://issues.apache.org/jira/browse/YARN-3214|YARN-3214]] Supporting 
non-exclusive node-labels: Alpha feature


hadoop git commit: HDFS-9451. Clean up depreated umasks and related unit tests. Contributed by Wei-Chiu Chuang.

2015-11-25 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 95d5227c7 -> b21dffb1f


HDFS-9451. Clean up depreated umasks and related unit tests. Contributed by 
Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b21dffb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b21dffb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b21dffb1

Branch: refs/heads/trunk
Commit: b21dffb1fec376784810af89cfc9cc05e5f781ce
Parents: 95d5227
Author: Haohui Mai 
Authored: Wed Nov 25 12:47:24 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 25 12:47:24 2015 -0800

--
 .../hadoop/fs/permission/FsPermission.java  |  7 --
 .../hadoop/fs/permission/TestFsPermission.java  | 10 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../apache/hadoop/security/TestPermission.java  | 23 +---
 4 files changed, 8 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b21dffb1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index d4adbb5..b535fd6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -215,9 +215,6 @@ public class FsPermission implements Writable {
 otheraction.and(umask.otheraction.not()));
   }
 
-  /** umask property label deprecated key and code in getUMask method
-   *  to accommodate it may be removed in version .23 */
-  public static final String DEPRECATED_UMASK_LABEL = "dfs.umask"; 
   public static final String UMASK_LABEL = 
   CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY;
   public static final int DEFAULT_UMASK = 
@@ -236,8 +233,6 @@ public class FsPermission implements Writable {
* '-' sets bits in the mask.
* 
* Octal umask, the specified bits are set in the file mode creation mask.
-   * 
-   * {@code DEPRECATED_UMASK_LABEL} config param has umask value set to 
decimal.
*/
   public static FsPermission getUMask(Configuration conf) {
 int umask = DEFAULT_UMASK;
@@ -246,7 +241,6 @@ public class FsPermission implements Writable {
 // If the deprecated key is not present then check for the new key
 if(conf != null) {
   String confUmask = conf.get(UMASK_LABEL);
-  int oldUmask = conf.getInt(DEPRECATED_UMASK_LABEL, Integer.MIN_VALUE);
   try {
 if(confUmask != null) {
   umask = new UmaskParser(confUmask).getUMask();
@@ -290,7 +284,6 @@ public class FsPermission implements Writable {
   /** Set the user file creation mask (umask) */
   public static void setUMask(Configuration conf, FsPermission umask) {
 conf.set(UMASK_LABEL, String.format("%1$03o", umask.toShort()));
-conf.setInt(DEPRECATED_UMASK_LABEL, umask.toShort());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b21dffb1/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
index 45d6e1a..04dbe01 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
@@ -695,14 +695,4 @@ public class TestFsPermission extends TestCase {
msg.contains(umask) &&
msg.contains("octal or symbolic");
   }
-  
-  // Ensure that when the deprecated decimal umask key is used, it is correctly
-  // parsed as such and converted correctly to an FsPermission value
-  public void testDeprecatedUmask() {
-Configuration conf = new Configuration();
-conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "302"); // 302 = 0456
-FsPermission umask = FsPermission.getUMask(conf);
-
-assertEquals(0456, umask.toShort());
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b21dffb1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 

hadoop git commit: HDFS-8512. WebHDFS : GETFILESTATUS should return LocatedBlock with storage type info. Contributed by Xiaoyu Yao.

2015-11-25 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk b21dffb1f -> e3d673901


HDFS-8512. WebHDFS : GETFILESTATUS should return LocatedBlock with storage type 
info. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3d67390
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3d67390
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3d67390

Branch: refs/heads/trunk
Commit: e3d673901b396cf5bbede5ed6f607ce68301ec0a
Parents: b21dffb
Author: Xiaoyu Yao 
Authored: Wed Nov 25 13:40:43 2015 -0800
Committer: Xiaoyu Yao 
Committed: Wed Nov 25 13:41:06 2015 -0800

--
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 21 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 16 ++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 31 
 4 files changed, 70 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3d67390/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 756f2aa..baebff2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -268,6 +268,23 @@ class JsonUtilClient {
 }
   }
 
+  /** Convert an Object[] to a StorageType[]. */
+  static StorageType[] toStorageTypeArray(final List objects)
+  throws IOException {
+if (objects == null) {
+  return null;
+} else if (objects.isEmpty()) {
+  return StorageType.EMPTY_ARRAY;
+} else {
+  final StorageType[] array = new StorageType[objects.size()];
+  int i = 0;
+  for (Object object : objects) {
+array[i++] = StorageType.parseStorageType(object.toString());
+  }
+  return array;
+}
+  }
+
   /** Convert a Json map to LocatedBlock. */
   static LocatedBlock toLocatedBlock(final Map m) throws IOException {
 if (m == null) {
@@ -282,8 +299,10 @@ class JsonUtilClient {
 final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(
 getList(m, "cachedLocations"));
 
+final StorageType[] storageTypes = toStorageTypeArray(
+getList(m, "storageTypes"));
 final LocatedBlock locatedblock = new LocatedBlock(b, locations,
-null, null, startOffset, isCorrupt, cachedLocations);
+null, storageTypes, startOffset, isCorrupt, cachedLocations);
 locatedblock.setBlockToken(toBlockToken((Map)m.get("blockToken")));
 return locatedblock;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3d67390/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 957087e..46a286b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1686,6 +1686,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9438. TestPipelinesFailover assumes Linux ifconfig.
 (John Zhuge via Yongjun Zhang)
 
+HDFS-8512. WebHDFS : GETFILESTATUS should return LocatedBlock with storage
+type info. (xyao)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3d67390/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index f107e66..1f5eaf6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -190,6 +190,21 @@ public class JsonUtil {
 }
   }
 
+  /** Convert a StorageType[] to a Json array. */
+  private static Object[] toJsonArray(final StorageType[] array) {
+if (array == null) {
+  return null;
+} else if (array.length == 0) {
+  return EMPTY_OBJECT_ARRAY;
+} else {
+  final Object[] a = new Object[array.length];
+  for(int i = 0; i < array.length; i++) {
+a[i] = array[i];
+  }
+  return a;
+}
+  }
+
   

hadoop git commit: HDFS-8512. WebHDFS : GETFILESTATUS should return LocatedBlock with storage type info. Contributed by Xiaoyu Yao.

2015-11-25 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 15caaa7d0 -> 4f1f9f7ef


HDFS-8512. WebHDFS : GETFILESTATUS should return LocatedBlock with storage type 
info. Contributed by Xiaoyu Yao.

(cherry picked from commit e3d673901b396cf5bbede5ed6f607ce68301ec0a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f1f9f7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f1f9f7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f1f9f7e

Branch: refs/heads/branch-2
Commit: 4f1f9f7efffc8638fe6e8ff1502693317d9f1630
Parents: 15caaa7
Author: Xiaoyu Yao 
Authored: Wed Nov 25 13:40:43 2015 -0800
Committer: Xiaoyu Yao 
Committed: Wed Nov 25 13:54:22 2015 -0800

--
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 21 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 16 ++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 31 
 4 files changed, 70 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f1f9f7e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 3f4899d..1166991 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -268,6 +268,23 @@ class JsonUtilClient {
 }
   }
 
+  /** Convert an Object[] to a StorageType[]. */
+  static StorageType[] toStorageTypeArray(final List objects)
+  throws IOException {
+if (objects == null) {
+  return null;
+} else if (objects.isEmpty()) {
+  return StorageType.EMPTY_ARRAY;
+} else {
+  final StorageType[] array = new StorageType[objects.size()];
+  int i = 0;
+  for (Object object : objects) {
+array[i++] = StorageType.parseStorageType(object.toString());
+  }
+  return array;
+}
+  }
+
   /** Convert a Json map to LocatedBlock. */
   static LocatedBlock toLocatedBlock(final Map m) throws IOException {
 if (m == null) {
@@ -282,8 +299,10 @@ class JsonUtilClient {
 final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(
 getList(m, "cachedLocations"));
 
+final StorageType[] storageTypes = toStorageTypeArray(
+getList(m, "storageTypes"));
 final LocatedBlock locatedblock = new LocatedBlock(b, locations,
-null, null, startOffset, isCorrupt, cachedLocations);
+null, storageTypes, startOffset, isCorrupt, cachedLocations);
 locatedblock.setBlockToken(toBlockToken((Map)m.get("blockToken")));
 return locatedblock;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f1f9f7e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7564a38..796f7d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -820,6 +820,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9438. TestPipelinesFailover assumes Linux ifconfig.
 (John Zhuge via Yongjun Zhang)
 
+HDFS-8512. WebHDFS : GETFILESTATUS should return LocatedBlock with storage
+type info. (xyao)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f1f9f7e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index b8153dc..b3cae6b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -190,6 +190,21 @@ public class JsonUtil {
 }
   }
 
+  /** Convert a StorageType[] to a Json array. */
+  private static Object[] toJsonArray(final StorageType[] array) {
+if (array == null) {
+  return null;
+} else if (array.length == 0) {
+  return EMPTY_OBJECT_ARRAY;
+} else {
+  final Object[] a = new Object[array.length];
+  for(int i = 0; i < array.length;