hadoop git commit: YARN-5333. Some recovered apps are put into default queue when RM HA. Contributed by Jun Gong.

2017-05-31 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 1903665b2 -> fd112535c


YARN-5333. Some recovered apps are put into default queue when RM HA. 
Contributed by Jun Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd112535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd112535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd112535

Branch: refs/heads/branch-2.7
Commit: fd112535c81bc08d305503284aaa6c89f30518dd
Parents: 1903665
Author: Sunil G 
Authored: Thu Jun 1 11:19:37 2017 +0530
Committer: Sunil G 
Committed: Thu Jun 1 11:19:37 2017 +0530

--
 .../server/resourcemanager/AdminService.java| 103 ---
 .../scheduler/fair/TestFairScheduler.java   |  70 -
 2 files changed, 134 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd112535/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 5248adb..306bd86 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -302,15 +302,7 @@ public class AdminService extends CompositeService 
implements
 
 UserGroupInformation user = checkAccess("transitionToActive");
 checkHaStateChange(reqInfo);
-try {
-  rm.transitionToActive();
-} catch (Exception e) {
-  RMAuditLogger.logFailure(user.getShortUserName(), "transitionToActive",
-  "", "RMHAProtocolService",
-  "Exception transitioning to active");
-  throw new ServiceFailedException(
-  "Error when transitioning to Active mode", e);
-}
+
 try {
   // call all refresh*s for active RM to get the updated configurations.
   refreshAll();
@@ -320,10 +312,22 @@ public class AdminService extends CompositeService 
implements
   .getDispatcher()
   .getEventHandler()
   .handle(
-  new RMFatalEvent(RMFatalEventType.TRANSITION_TO_ACTIVE_FAILED, e));
+  new RMFatalEvent(RMFatalEventType.TRANSITION_TO_ACTIVE_FAILED,
+  e));
+  throw new ServiceFailedException(
+  "Error on refreshAll during transition to Active", e);
+}
+
+try {
+  rm.transitionToActive();
+} catch (Exception e) {
+  RMAuditLogger.logFailure(user.getShortUserName(), "transitionToActive",
+  "", "RM",
+  "Exception transitioning to active");
   throw new ServiceFailedException(
-  "Error on refreshAll during transistion to Active", e);
+  "Error when transitioning to Active mode", e);
 }
+
 RMAuditLogger.logSuccess(user.getShortUserName(), "transitionToActive",
 "RMHAProtocolService");
   }
@@ -378,12 +382,7 @@ public class AdminService extends CompositeService 
implements
 RefreshQueuesResponse response =
 recordFactory.newRecordInstance(RefreshQueuesResponse.class);
 try {
-  rmContext.getScheduler().reinitialize(getConfig(), this.rmContext);
-  // refresh the reservation system
-  ReservationSystem rSystem = rmContext.getReservationSystem();
-  if (rSystem != null) {
-rSystem.reinitialize(getConfig(), rmContext);
-  }
+  refreshQueues();
   RMAuditLogger.logSuccess(user.getShortUserName(), argName,
   "AdminService");
   return response;
@@ -392,6 +391,15 @@ public class AdminService extends CompositeService 
implements
 }
   }
 
+  private void refreshQueues() throws IOException, YarnException {
+rmContext.getScheduler().reinitialize(getConfig(), this.rmContext);
+// refresh the reservation system
+ReservationSystem rSystem = rmContext.getReservationSystem();
+if (rSystem != null) {
+  rSystem.reinitialize(getConfig(), rmContext);
+}
+  }
+
   @Override
   public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
   throws YarnException, StandbyException {
@@ -414,6 +422,13 @@ public class AdminService extends CompositeService 
implements
 }
   }
 
+  private void refreshNodes() throws IOException, YarnException {
+   

hadoop git commit: HADOOP-13921. Remove log4j classes from JobConf.

2017-05-31 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6e6e96583 -> f5517a820


HADOOP-13921. Remove log4j classes from JobConf.

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5517a82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5517a82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5517a82

Branch: refs/heads/trunk
Commit: f5517a82001eea2207a93d3b70d42ad8f4ddeccb
Parents: 6e6e965
Author: Sean Busbey 
Authored: Thu May 4 11:16:25 2017 -0500
Committer: Akira Ajisaka 
Committed: Thu Jun 1 13:35:14 2017 +0900

--
 hadoop-client-modules/hadoop-client-runtime/pom.xml   | 1 -
 .../main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java | 4 ++--
 .../src/main/java/org/apache/hadoop/mapred/JobConf.java   | 7 +++
 3 files changed, 5 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5517a82/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index dc0f005..3c8364c 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -96,7 +96,6 @@
 
 
 
   log4j

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5517a82/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 5fd66ac..a43da65 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -650,12 +650,12 @@ public class MRApps extends Apps {
 if (isMap) {
   return conf.get(
   MRJobConfig.MAP_LOG_LEVEL,
-  JobConf.DEFAULT_LOG_LEVEL.toString()
+  JobConf.DEFAULT_LOG_LEVEL
   );
 } else {
   return conf.get(
   MRJobConfig.REDUCE_LOG_LEVEL,
-  JobConf.DEFAULT_LOG_LEVEL.toString()
+  JobConf.DEFAULT_LOG_LEVEL
   );
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5517a82/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index f286a96..be8fa9e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.util.ClassUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Tool;
-import org.apache.log4j.Level;
 
 /** 
  * A map/reduce job configuration.
@@ -333,7 +332,7 @@ public class JobConf extends Configuration {
   private Credentials credentials = new Credentials();
   
   /**
-   * Configuration key to set the logging {@link Level} for the map task.
+   * Configuration key to set the logging level for the map task.
*
* The allowed logging levels are:
* OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
@@ -342,7 +341,7 @@ public class JobConf extends Configuration {
 JobContext.MAP_LOG_LEVEL;
   
   /**
-   * Configuration key to set the logging {@link Level} for the reduce task.
+   * Configuration key to set the logging level for the reduce task.
*
* The allowed logging levels are:
* OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
@@ -353,7 +352,7 @@ public class JobConf extends Configuration {
   /**
* Default logging level for map/reduce tasks.
*/
-  public static final Level DEFAULT_LOG_LEVEL = Level.INFO;
+  public static final String DEFAULT_LOG_LEVEL = JobContext.DEFAULT_LOG_LEVEL;
 
   /

hadoop git commit: HADOOP-14466. Remove useless document from TestAliyunOSSFileSystemContract.java. Contributed by Chen Liang.

2017-05-31 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 92243484f -> 6e6e96583


HADOOP-14466. Remove useless document from 
TestAliyunOSSFileSystemContract.java. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e6e9658
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e6e9658
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e6e9658

Branch: refs/heads/trunk
Commit: 6e6e96583f1ee3c8a8775480f41c3adcdd5e2c45
Parents: 9224348
Author: Akira Ajisaka 
Authored: Thu Jun 1 13:08:01 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 1 13:08:01 2017 +0900

--
 .../hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java   | 5 -
 1 file changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e6e9658/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
 
b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
index 321e958..46ab339 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
@@ -38,11 +38,6 @@ import static org.junit.Assume.assumeTrue;
 
 /**
  * Tests a live Aliyun OSS system.
- *
- * This uses BlockJUnit4ClassRunner because FileSystemContractBaseTest from
- * TestCase which uses the old Junit3 runner that doesn't ignore assumptions
- * properly making it impossible to skip the tests if we don't have a valid
- * bucket.
  */
 public class TestAliyunOSSFileSystemContract
 extends FileSystemContractBaseTest {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-9849. License information is missing for native CRC32 code (Contributed by Andrew Wang via Daniel Templeton)

2017-05-31 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 10b7a5fa9 -> 406d8afdd


HADOOP-9849. License information is missing for native CRC32 code
(Contributed by Andrew Wang via Daniel Templeton)

(cherry picked from commit 92243484f9b868aa18618759145e9dd554e245c5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/406d8afd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/406d8afd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/406d8afd

Branch: refs/heads/branch-2
Commit: 406d8afdd4955ec00c7f29e03df927cf7d2d094b
Parents: 10b7a5f
Author: Daniel Templeton 
Authored: Wed May 31 15:57:48 2017 -0700
Committer: Daniel Templeton 
Committed: Wed May 31 15:59:29 2017 -0700

--
 LICENSE.txt | 47 ++-
 1 file changed, 42 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/406d8afd/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 2cad31e..65b9d42 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -246,11 +246,48 @@ For the org.apache.hadoop.util.bloom.* classes:
 For portions of the native implementation of slicing-by-8 CRC calculation
 in src/main/native/src/org/apache/hadoop/util:
 
-/**
- *   Copyright 2008,2009,2010 Massachusetts Institute of Technology.
- *   All rights reserved. Use of this source code is governed by a
- *   BSD-style license that can be found in the LICENSE file.
- */
+Copyright (c) 2008,2009,2010 Massachusetts Institute of Technology.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+* Neither the name of the Massachusetts Institute of Technology nor
+  the names of its contributors may be used to endorse or promote
+  products derived from this software without specific prior written
+  permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Other portions are under the same license from Intel:
+http://sourceforge.net/projects/slicing-by-8/
+/*++
+ *
+ * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
+ *
+ * This software program is licensed subject to the BSD License, 
+ * available at http://www.opensource.org/licenses/bsd-license.html
+ *
+ * Abstract: The main routine
+ * 
+ --*/
 
 For 
src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,lz4hc.c},
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-9849. License information is missing for native CRC32 code (Contributed by Andrew Wang via Daniel Templeton)

2017-05-31 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk d5b71e417 -> 92243484f


HADOOP-9849. License information is missing for native CRC32 code
(Contributed by Andrew Wang via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92243484
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92243484
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92243484

Branch: refs/heads/trunk
Commit: 92243484f9b868aa18618759145e9dd554e245c5
Parents: d5b71e4
Author: Daniel Templeton 
Authored: Wed May 31 15:57:48 2017 -0700
Committer: Daniel Templeton 
Committed: Wed May 31 15:57:48 2017 -0700

--
 LICENSE.txt | 47 ++-
 1 file changed, 42 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92243484/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 969708f..5391fd5 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -246,11 +246,48 @@ For the org.apache.hadoop.util.bloom.* classes:
 For portions of the native implementation of slicing-by-8 CRC calculation
 in src/main/native/src/org/apache/hadoop/util:
 
-/**
- *   Copyright 2008,2009,2010 Massachusetts Institute of Technology.
- *   All rights reserved. Use of this source code is governed by a
- *   BSD-style license that can be found in the LICENSE file.
- */
+Copyright (c) 2008,2009,2010 Massachusetts Institute of Technology.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+* Neither the name of the Massachusetts Institute of Technology nor
+  the names of its contributors may be used to endorse or promote
+  products derived from this software without specific prior written
+  permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Other portions are under the same license from Intel:
+http://sourceforge.net/projects/slicing-by-8/
+/*++
+ *
+ * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
+ *
+ * This software program is licensed subject to the BSD License, 
+ * available at http://www.opensource.org/licenses/bsd-license.html
+ *
+ * Abstract: The main routine
+ * 
+ --*/
 
 For 
src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,lz4hc.c},
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6246. Identifying starved apps does not need the scheduler writelock (Contributed by Karthik Kambatla via Daniel Templeton)

2017-05-31 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 212a56608 -> 10b7a5fa9


YARN-6246. Identifying starved apps does not need the scheduler writelock
(Contributed by Karthik Kambatla via Daniel Templeton)

(cherry picked from commit d5b71e4175c13679d451710be150fc461a661263)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10b7a5fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10b7a5fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10b7a5fa

Branch: refs/heads/branch-2
Commit: 10b7a5fa98e90066a7a0fb50b184d99b9b41b537
Parents: 212a566
Author: Daniel Templeton 
Authored: Wed May 31 15:48:04 2017 -0700
Committer: Daniel Templeton 
Committed: Wed May 31 15:50:11 2017 -0700

--
 .../scheduler/fair/FSLeafQueue.java |  9 +++
 .../scheduler/fair/FSParentQueue.java   |  4 +--
 .../resourcemanager/scheduler/fair/FSQueue.java | 19 +-
 .../scheduler/fair/FairScheduler.java   | 27 ++--
 4 files changed, 38 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10b7a5fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 7785a5c..0fad8be 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -198,13 +198,10 @@ public class FSLeafQueue extends FSQueue {
   }
 
   @Override
-  public void updateInternal(boolean checkStarvation) {
+  void updateInternal() {
 readLock.lock();
 try {
   policy.computeShares(runnableApps, getFairShare());
-  if (checkStarvation) {
-updateStarvedApps();
-  }
 } finally {
   readLock.unlock();
 }
@@ -283,8 +280,10 @@ public class FSLeafQueue extends FSQueue {
* If this queue is starving due to fairshare, there must be at least
* one application that is starved. And, even if the queue is not
* starved due to fairshare, there might still be starved applications.
+   *
+   * Caller does not need read/write lock on the leaf queue.
*/
-  private void updateStarvedApps() {
+  void updateStarvedApps() {
 // Fetch apps with pending demand
 TreeSet appsWithDemand = fetchAppsWithDemand(false);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10b7a5fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index 6050ab5..3bc81ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -79,13 +79,13 @@ public class FSParentQueue extends FSQueue {
   }
 
   @Override
-  public void updateInternal(boolean checkStarvation) {
+  void updateInternal() {
 readLock.lock();
 try {
   policy.computeShares(childQueues, getFairShare());
   for (FSQueue childQueue : childQueues) {
 childQueue.getMetrics().setFairShare(childQueue.getFairShare());
-childQueue.updateInternal(checkStarvation);
+childQueue.updateInternal();
   }
 } finally {
   readLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10b7a5fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/ma

hadoop git commit: YARN-6246. Identifying starved apps does not need the scheduler writelock (Contributed by Karthik Kambatla via Daniel Templeton)

2017-05-31 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4369690ce -> d5b71e417


YARN-6246. Identifying starved apps does not need the scheduler writelock
(Contributed by Karthik Kambatla via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5b71e41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5b71e41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5b71e41

Branch: refs/heads/trunk
Commit: d5b71e4175c13679d451710be150fc461a661263
Parents: 4369690
Author: Daniel Templeton 
Authored: Wed May 31 15:48:04 2017 -0700
Committer: Daniel Templeton 
Committed: Wed May 31 15:48:04 2017 -0700

--
 .../scheduler/fair/FSLeafQueue.java |  9 +++
 .../scheduler/fair/FSParentQueue.java   |  4 +--
 .../resourcemanager/scheduler/fair/FSQueue.java | 19 +-
 .../scheduler/fair/FairScheduler.java   | 27 ++--
 4 files changed, 38 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5b71e41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 10f1e28..1de0e30 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -198,13 +198,10 @@ public class FSLeafQueue extends FSQueue {
   }
 
   @Override
-  public void updateInternal(boolean checkStarvation) {
+  void updateInternal() {
 readLock.lock();
 try {
   policy.computeShares(runnableApps, getFairShare());
-  if (checkStarvation) {
-updateStarvedApps();
-  }
 } finally {
   readLock.unlock();
 }
@@ -283,8 +280,10 @@ public class FSLeafQueue extends FSQueue {
* If this queue is starving due to fairshare, there must be at least
* one application that is starved. And, even if the queue is not
* starved due to fairshare, there might still be starved applications.
+   *
+   * Caller does not need read/write lock on the leaf queue.
*/
-  private void updateStarvedApps() {
+  void updateStarvedApps() {
 // Fetch apps with pending demand
 TreeSet appsWithDemand = fetchAppsWithDemand(false);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5b71e41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index b062c58..5b4e4dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -79,13 +79,13 @@ public class FSParentQueue extends FSQueue {
   }
 
   @Override
-  public void updateInternal(boolean checkStarvation) {
+  void updateInternal() {
 readLock.lock();
 try {
   policy.computeShares(childQueues, getFairShare());
   for (FSQueue childQueue : childQueues) {
 childQueue.getMetrics().setFairShare(childQueue.getFairShare());
-childQueue.updateInternal(checkStarvation);
+childQueue.updateInternal();
   }
 } finally {
   readLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5b71e41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue

[02/50] [abbrv] hadoop git commit: HDFS-11866. JournalNode Sync should be off by default in hdfs-default.xml. Contributed by Hanisha Koneru.

2017-05-31 Thread haibochen
HDFS-11866. JournalNode Sync should be off by default in hdfs-default.xml. 
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca6bcc3c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca6bcc3c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca6bcc3c

Branch: refs/heads/YARN-1011
Commit: ca6bcc3c76babb2f7def1fd413d0917783224110
Parents: 8e0f83e
Author: Arpit Agarwal 
Authored: Mon May 22 17:53:47 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon May 22 17:53:47 2017 -0700

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca6bcc3c/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index f0f2220..9ddd343 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -3839,7 +3839,7 @@
 
 
   dfs.journalnode.enable.sync
-  true
+  false
   
 If true, the journal nodes wil sync with each other. The journal nodes
 will periodically gossip with other journal nodes to compare edit log


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: HDFS-11817. A faulty node can cause a lease leak and NPE on accessing data. Contributed by Kihwal Lee.

2017-05-31 Thread haibochen
HDFS-11817. A faulty node can cause a lease leak and NPE on accessing data. 
Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b5ad487
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b5ad487
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b5ad487

Branch: refs/heads/YARN-1011
Commit: 2b5ad48762587abbcd8bdb50d0ae98f8080d926c
Parents: 8759009
Author: Kihwal Lee 
Authored: Thu May 25 17:17:38 2017 -0500
Committer: Kihwal Lee 
Committed: Thu May 25 17:17:38 2017 -0500

--
 .../BlockUnderConstructionFeature.java  |  9 +++-
 .../server/blockmanagement/DatanodeManager.java |  3 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java   |  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../hdfs/server/namenode/LeaseManager.java  | 15 +--
 .../TestBlockUnderConstructionFeature.java  |  8 ++--
 .../namenode/TestBlockUnderConstruction.java| 45 
 .../TestCommitBlockSynchronization.java |  2 +-
 8 files changed, 73 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b5ad487/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
index 7453184..61390d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
@@ -223,10 +223,17 @@ public class BlockUnderConstructionFeature {
* Initialize lease recovery for this block.
* Find the first alive data-node starting from the previous primary and
* make it primary.
+   * @param blockInfo Block to be recovered
+   * @param recoveryId Recovery ID (new gen stamp)
+   * @param startRecovery Issue recovery command to datanode if true.
*/
-  public void initializeBlockRecovery(BlockInfo blockInfo, long recoveryId) {
+  public void initializeBlockRecovery(BlockInfo blockInfo, long recoveryId,
+  boolean startRecovery) {
 setBlockUCState(BlockUCState.UNDER_RECOVERY);
 blockRecoveryId = recoveryId;
+if (!startRecovery) {
+  return;
+}
 if (replicas.length == 0) {
   NameNode.blockStateChangeLog.warn("BLOCK*" +
   " BlockUnderConstructionFeature.initializeBlockRecovery:" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b5ad487/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 7dcc9fd..c303594 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -642,10 +642,11 @@ public class DatanodeManager {
   String format, Object... args) throws UnregisteredNodeException {
 storageIDs = storageIDs == null ? new String[0] : storageIDs;
 if (datanodeID.length != storageIDs.length) {
+  // Error for pre-2.0.0-alpha clients.
   final String err = (storageIDs.length == 0?
   "Missing storageIDs: It is likely that the HDFS client,"
   + " who made this call, is running in an older version of Hadoop"
-  + " which does not support storageIDs."
+  + "(pre-2.0.0-alpha)  which does not support storageIDs."
   : "Length mismatched: storageIDs.length=" + storageIDs.length + " != 
"
   ) + " datanodeID.length=" + datanodeID.length;
   throw new HadoopIllegalArgumentException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b5ad487/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
index a6305

[12/50] [abbrv] hadoop git commit: Revert "HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan Fajth, Wei-Chiu Chuang."

2017-05-31 Thread haibochen
Revert "HDFS-11515. -du throws ConcurrentModificationException. Contributed by 
Istvan Fajth, Wei-Chiu Chuang."

This reverts commit bc7aff7cec07bbc3fed63a00c8f1584c34670998.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cba5612
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cba5612
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cba5612

Branch: refs/heads/YARN-1011
Commit: 2cba5612282509001a221b9751e1fd36c084807f
Parents: 0e83ed5
Author: Wei-Chiu Chuang 
Authored: Wed May 24 17:20:27 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed May 24 17:20:27 2017 -0700

--
 .../snapshot/DirectoryWithSnapshotFeature.java  |  5 --
 .../snapshot/TestRenameWithSnapshots.java   |  6 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 75 
 3 files changed, 2 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cba5612/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 9840679..9addbfa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -633,11 +633,6 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
 for(DirectoryDiff d : diffs) {
   for(INode deletedNode : d.getChildrenDiff().getList(ListType.DELETED)) {
 context.reportDeletedSnapshottedNode(deletedNode);
-if (deletedNode.isDirectory()){
-  DirectoryWithSnapshotFeature sf =
-  deletedNode.asDirectory().getDirectoryWithSnapshotFeature();
-  sf.computeContentSummary4Snapshot(context);
-}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cba5612/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index d06c384..d1b3aa6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -26,7 +26,6 @@ import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.spy;
-import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -2430,7 +2429,7 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDu() throws Exception {
-File tempFile = File.createTempFile("testDu-", ".tmp", getTestDir());
+File tempFile = File.createTempFile("testDu-", ".tmp");
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);
@@ -2540,8 +2539,7 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDuMultipleDirs() throws Exception {
-File tempFile = File.createTempFile("testDuMultipleDirs-", ".tmp",
-getTestDir());
+File tempFile = File.createTempFile("testDuMultipleDirs-", "" + ".tmp");
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cba5612/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index 7926e44..ca53788 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-proj

[10/50] [abbrv] hadoop git commit: HDFS-11823. Extend TestDFSStripedIutputStream/TestDFSStripedOutputStream with a random EC policy. Contributed by Takanobu Asanuma.

2017-05-31 Thread haibochen
HDFS-11823. Extend TestDFSStripedIutputStream/TestDFSStripedOutputStream with a 
random EC policy. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c8dd6d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c8dd6d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c8dd6d3

Branch: refs/heads/YARN-1011
Commit: 1c8dd6d3d10773f281538e1dea0ffdca9db34bfe
Parents: dcf4559
Author: Jing Zhao 
Authored: Wed May 24 11:14:19 2017 -0700
Committer: Jing Zhao 
Committed: Wed May 24 11:14:19 2017 -0700

--
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 12 ++
 .../TestDFSRSDefault10x4StripedInputStream.java | 34 ---
 ...TestDFSRSDefault10x4StripedOutputStream.java | 35 ---
 ...fault10x4StripedOutputStreamWithFailure.java | 35 ---
 ...DFSStripedInputStreamWithRandomECPolicy.java | 45 
 ...tputStreamWithFailureWithRandomECPolicy.java | 45 
 ...FSStripedOutputStreamWithRandomECPolicy.java | 45 
 .../hdfs/TestDFSXORStripedInputStream.java  | 32 --
 .../hdfs/TestDFSXORStripedOutputStream.java | 34 ---
 ...estDFSXORStripedOutputStreamWithFailure.java | 35 ---
 10 files changed, 147 insertions(+), 205 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c8dd6d3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 1bab5db..057e94a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -49,6 +49,7 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -568,4 +569,15 @@ public class StripedFileTestUtil {
   public static ErasureCodingPolicy getDefaultECPolicy() {
 return SystemErasureCodingPolicies.getPolicies().get(0);
   }
+
+  /**
+   * Get non-default Erasure Coding Policy randomly.
+   * @return ErasureCodingPolicy
+   */
+  public static ErasureCodingPolicy getRandomNonDefaultECPolicy() {
+Random rand = new Random();
+List policies = SystemErasureCodingPolicies
+.getPolicies();
+return policies.get(1 + rand.nextInt(policies.size() - 1));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c8dd6d3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
deleted file mode 100644
index 1d09a6c..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-
-/**
- * This tests read operation of DFS striped file with RS-10-4-64k
- *  erasure code policy.
- */
-public class TestDFSRSDefault10x4StripedInputStream extends
-TestDFSStripedInputStream {
-
-  public ErasureCodingPolicy getEcPolicy() {
-return SystemErasureCodingPolicies.getByID(
-SystemErasureCodingPo

[39/50] [abbrv] hadoop git commit: HADOOP-14458. Add missing imports to TestAliyunOSSFileSystemContract.java. Contributed by Mingliang Liu.

2017-05-31 Thread haibochen
HADOOP-14458. Add missing imports to TestAliyunOSSFileSystemContract.java. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07e60f85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07e60f85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07e60f85

Branch: refs/heads/YARN-1011
Commit: 07e60f85d87ca9a585d351a308ee0ecfa9293750
Parents: d4015f8
Author: Akira Ajisaka 
Authored: Tue May 30 15:11:10 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue May 30 15:11:10 2017 +0900

--
 .../fs/aliyun/oss/TestAliyunOSSFileSystemContract.java   | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07e60f85/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
 
b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
index 419ddee..321e958 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
@@ -20,17 +20,22 @@ package org.apache.hadoop.fs.aliyun.oss;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
+
 import org.junit.Before;
 import org.junit.Test;
 
-import static org.junit.Assume.*;
-import org.apache.hadoop.fs.FileStatus;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeNotNull;
+import static org.junit.Assume.assumeTrue;
+
 /**
  * Tests a live Aliyun OSS system.
  *


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: YARN-6582. FSAppAttempt demand can be updated atomically in updateDemand(). (Karthik Kambatla via Yufei Gu)

2017-05-31 Thread haibochen
YARN-6582. FSAppAttempt demand can be updated atomically in updateDemand(). 
(Karthik Kambatla via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87590090
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87590090
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87590090

Branch: refs/heads/YARN-1011
Commit: 87590090c887829e874a7132be9cf8de061437d6
Parents: 3fd6a2d
Author: Yufei Gu 
Authored: Thu May 25 14:22:13 2017 -0700
Committer: Yufei Gu 
Committed: Thu May 25 14:22:13 2017 -0700

--
 .../scheduler/fair/FSAppAttempt.java| 23 +---
 1 file changed, 10 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87590090/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 4f7e164..a5772ba 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -1286,24 +1286,21 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
   @Override
   public void updateDemand() {
-demand = Resources.createResource(0);
 // Demand is current consumption plus outstanding requests
-Resources.addTo(demand, getCurrentConsumption());
+Resource tmpDemand = Resources.clone(getCurrentConsumption());
 
 // Add up outstanding resource requests
-try {
-  writeLock.lock();
-  for (SchedulerRequestKey k : getSchedulerKeys()) {
-PendingAsk pendingAsk = getPendingAsk(k, ResourceRequest.ANY);
-if (pendingAsk.getCount() > 0) {
-  Resources.multiplyAndAddTo(demand,
-  pendingAsk.getPerAllocationResource(),
-  pendingAsk.getCount());
-}
+for (SchedulerRequestKey k : getSchedulerKeys()) {
+  PendingAsk pendingAsk = getPendingAsk(k, ResourceRequest.ANY);
+  if (pendingAsk.getCount() > 0) {
+Resources.multiplyAndAddTo(tmpDemand,
+pendingAsk.getPerAllocationResource(),
+pendingAsk.getCount());
   }
-} finally {
-  writeLock.unlock();
 }
+
+// Update demand
+demand = tmpDemand;
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: HADOOP-14430 the accessTime of FileStatus returned by SFTPFileSystem's getFileStatus method is always 0. Contributed by Hongyuan Li.

2017-05-31 Thread haibochen
HADOOP-14430 the accessTime of FileStatus returned by SFTPFileSystem's
getFileStatus method is always 0.
Contributed by Hongyuan Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bf0e2d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bf0e2d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bf0e2d6

Branch: refs/heads/YARN-1011
Commit: 8bf0e2d6b38a2cbd3c3d45557ede7575c1f18312
Parents: 1ba9704
Author: Steve Loughran 
Authored: Thu May 25 15:19:58 2017 +0100
Committer: Steve Loughran 
Committed: Thu May 25 15:19:58 2017 +0100

--
 .../org/apache/hadoop/fs/sftp/SFTPFileSystem.java |  2 +-
 .../org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java | 14 ++
 2 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf0e2d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
index 30cf4d3..d91d391 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
@@ -278,7 +278,7 @@ public class SFTPFileSystem extends FileSystem {
 // block sizes on server. The assumption could be less than ideal.
 long blockSize = DEFAULT_BLOCK_SIZE;
 long modTime = attr.getMTime() * 1000; // convert to milliseconds
-long accessTime = 0;
+long accessTime = attr.getATime() * 1000L;
 FsPermission permission = getPermissions(sftpFile);
 // not be able to get the real user group name, just use the user and group
 // id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf0e2d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
index 8dc5324..9b514e1 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.fs.sftp;
 
 import java.io.IOException;
 import java.net.URI;
+import java.nio.file.Files;
+import java.nio.file.attribute.BasicFileAttributes;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -28,6 +30,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -305,4 +308,15 @@ public class TestSFTPFileSystem {
 sftpFs.rename(file1, file2);
   }
 
+  @Test
+  public void testGetAccessTime() throws IOException {
+Path file = touch(localFs, name.getMethodName().toLowerCase());
+LocalFileSystem local = (LocalFileSystem)localFs;
+java.nio.file.Path path = (local).pathToFile(file).toPath();
+long accessTime1 = Files.readAttributes(path, BasicFileAttributes.class)
+.lastAccessTime().toMillis();
+long accessTime2 = sftpFs.getFileStatus(file).getAccessTime();
+assertEquals(accessTime1, accessTime2);
+  }
+
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: YARN-6555. Store application flow context in NM state store for work-preserving restart. (Rohith Sharma K S via Haibo Chen)

2017-05-31 Thread haibochen
YARN-6555. Store application flow context in NM state store for work-preserving 
restart. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47474fff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47474fff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47474fff

Branch: refs/heads/YARN-1011
Commit: 47474fffac085e0e5ea46336bf80ccd0677017a3
Parents: 2b5ad48
Author: Haibo Chen 
Authored: Thu May 25 21:15:27 2017 -0700
Committer: Haibo Chen 
Committed: Thu May 25 21:15:27 2017 -0700

--
 .../containermanager/ContainerManagerImpl.java  | 71 +---
 .../application/ApplicationImpl.java| 27 ++--
 .../yarn_server_nodemanager_recovery.proto  |  7 ++
 .../TestContainerManagerRecovery.java   | 40 +--
 4 files changed, 111 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47474fff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index f65f1ac..50268b9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto;
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
+import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.FlowContextProto;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.ContainerType;
@@ -381,10 +382,20 @@ public class ContainerManagerImpl extends 
CompositeService implements
   new LogAggregationContextPBImpl(p.getLogAggregationContext());
 }
 
+FlowContext fc = null;
+if (p.getFlowContext() != null) {
+  FlowContextProto fcp = p.getFlowContext();
+  fc = new FlowContext(fcp.getFlowName(), fcp.getFlowVersion(),
+  fcp.getFlowRunId());
+  if (LOG.isDebugEnabled()) {
+LOG.debug(
+"Recovering Flow context: " + fc + " for an application " + appId);
+  }
+}
+
 LOG.info("Recovering application " + appId);
-//TODO: Recover flow and flow run ID
-ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), appId,
-creds, context, p.getAppLogAggregationInitedTime());
+ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), fc,
+appId, creds, context, p.getAppLogAggregationInitedTime());
 context.getApplications().put(appId, app);
 app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext));
   }
@@ -936,7 +947,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
   private ContainerManagerApplicationProto buildAppProto(ApplicationId appId,
   String user, Credentials credentials,
   Map appAcls,
-  LogAggregationContext logAggregationContext) {
+  LogAggregationContext logAggregationContext, FlowContext flowContext) {
 
 ContainerManagerApplicationProto.Builder builder =
 ContainerManagerApplicationProto.newBuilder();
@@ -971,6 +982,16 @@ public class ContainerManagerImpl extends CompositeService 
implements
   }
 }
 
+builder.clearFlowContext();
+if (flowContext != null && flowContext.getFlowName() != null
+&& flowContext.getFlowVersion() != null) {
+  FlowContextProto fcp =
+  FlowContextProto.newBuilder().setFlowName(flowContext.getFlowName())
+  .setFlowVersion(flowContext.getFlowVersion())
+  .setFlowRunId(flowContext.getFlowRunId()).build();
+  builder.setFlowContext(fcp);
+}
+
 return builder.build();
   }
 
@@ -1016,25 +1037,29 @@ public class ContainerManagerImpl extends 
CompositeService implements
 this.readLock.lock();
 try {
   if (!isServic

[11/50] [abbrv] hadoop git commit: HDFS-11877. FileJournalManager#getLogFile should ignore in progress edit logs during JN sync. Contributed by Hanisha Koneru.

2017-05-31 Thread haibochen
HDFS-11877. FileJournalManager#getLogFile should ignore in progress edit logs 
during JN sync. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e83ed5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e83ed5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e83ed5e

Branch: refs/heads/YARN-1011
Commit: 0e83ed5e7372c801c9fee01df91b6b56de467ab1
Parents: 1c8dd6d
Author: Arpit Agarwal 
Authored: Wed May 24 16:09:00 2017 -0700
Committer: Arpit Agarwal 
Committed: Wed May 24 16:09:00 2017 -0700

--
 .../hdfs/qjournal/client/IPCLoggerChannel.java|  2 +-
 .../qjournal/server/GetJournalEditServlet.java| 16 +---
 .../hdfs/qjournal/server/JournalNodeSyncer.java   |  2 +-
 .../hdfs/server/namenode/FileJournalManager.java  | 18 +++---
 4 files changed, 30 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e83ed5e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index cbfe5b8..6cd892c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -276,7 +276,7 @@ public class IPCLoggerChannel implements AsyncLogger {
 
 try {
   String path = GetJournalEditServlet.buildPath(
-  journalId, segmentTxId, nsInfo);
+  journalId, segmentTxId, nsInfo, true);
   return new URL(httpServerURL, path);
 } catch (MalformedURLException e) {
   // should never get here.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e83ed5e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
index 2335ea5..e96fd4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
@@ -72,6 +72,7 @@ public class GetJournalEditServlet extends HttpServlet {
   static final String STORAGEINFO_PARAM = "storageInfo";
   static final String JOURNAL_ID_PARAM = "jid";
   static final String SEGMENT_TXID_PARAM = "segmentTxId";
+  static final String IN_PROGRESS_OK = "inProgressOk";
 
   protected boolean isValidRequestor(HttpServletRequest request, Configuration 
conf)
   throws IOException {
@@ -186,6 +187,14 @@ public class GetJournalEditServlet extends HttpServlet {
   final Configuration conf = (Configuration) getServletContext()
   .getAttribute(JspHelper.CURRENT_CONF);
   final String journalId = request.getParameter(JOURNAL_ID_PARAM);
+  final String inProgressOkStr = request.getParameter(IN_PROGRESS_OK);
+  final boolean inProgressOk;
+  if (inProgressOkStr != null &&
+  inProgressOkStr.equalsIgnoreCase("false")) {
+inProgressOk = false;
+  } else {
+inProgressOk = true;
+  }
   QuorumJournalManager.checkJournalId(journalId);
   final JNStorage storage = JournalNodeHttpServer
   .getJournalFromContext(context, journalId).getStorage();
@@ -210,8 +219,7 @@ public class GetJournalEditServlet extends HttpServlet {
 // Synchronize on the FJM so that the file doesn't get finalized
 // out from underneath us while we're in the process of opening
 // it up.
-EditLogFile elf = fjm.getLogFile(
-segmentTxId);
+EditLogFile elf = fjm.getLogFile(segmentTxId, inProgressOk);
 if (elf == null) {
   response.sendError(HttpServletResponse.SC_NOT_FOUND,
   "No edit log found starting at txid " + segmentTxId);
@@ -239,7 +247,7 @@ public class GetJournalEditServlet extends HttpServlet {
   }
 
   public static String buildPath(String journalId, long segmentTxId,
-  NamespaceInfo nsInfo) {
+  NamespaceInfo nsInfo, boolean inProgressOk) {
 StringBuilder path = new StringBuilder("/getJournal?");
 try {
   path.append(JOURNAL_ID_PARAM).append("=")
@@ -248,6 +256,8 @@

[49/50] [abbrv] hadoop git commit: YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object decoding ever fails runtime exception. Contributed by Jon Eagles.

2017-05-31 Thread haibochen
YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object 
decoding ever fails runtime exception. Contributed by Jon Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4369690c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4369690c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4369690c

Branch: refs/heads/YARN-1011
Commit: 4369690ce63566131aee28696bf2683a3cb20205
Parents: 1543d0f
Author: Nathan Roberts 
Authored: Tue May 30 16:10:33 2017 -0500
Committer: Nathan Roberts 
Committed: Wed May 31 11:32:32 2017 -0500

--
 .../timeline/RollingLevelDBTimelineStore.java   | 29 
 1 file changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4369690c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 20e0379..d139346 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -473,9 +473,16 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 }
   } else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
 if (otherInfo) {
-  entity.addOtherInfo(
-  parseRemainingKey(key, prefixlen + OTHER_INFO_COLUMN.length),
-  fstConf.asObject(iterator.peekNext().getValue()));
+  Object o = null;
+  String keyStr = parseRemainingKey(key,
+  prefixlen + OTHER_INFO_COLUMN.length);
+  try {
+o = fstConf.asObject(iterator.peekNext().getValue());
+entity.addOtherInfo(keyStr, o);
+  } catch (Exception e) {
+LOG.warn("Error while decoding "
++ entityId + ":otherInfo:" + keyStr, e);
+  }
 }
   } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
 if (relatedEntities) {
@@ -1338,7 +1345,12 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   TimelineEvent event = new TimelineEvent();
   event.setTimestamp(ts);
   event.setEventType(tstype);
-  Object o = fstConf.asObject(value);
+  Object o = null;
+  try {
+o = fstConf.asObject(value);
+  } catch (Exception e) {
+LOG.warn("Error while decoding " + tstype, e);
+  }
   if (o == null) {
 event.setEventInfo(null);
   } else if (o instanceof Map) {
@@ -1362,8 +1374,13 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 KeyParser kp = new KeyParser(key, offset);
 String name = kp.getNextString();
 byte[] bytes = kp.getRemainingBytes();
-Object value = fstConf.asObject(bytes);
-entity.addPrimaryFilter(name, value);
+Object value = null;
+try {
+  value = fstConf.asObject(bytes);
+  entity.addPrimaryFilter(name, value);
+} catch (Exception e) {
+  LOG.warn("Error while decoding " + name, e);
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: HDFS-11446. TestMaintenanceState#testWithNNAndDNRestart fails intermittently. Contributed by Yiqun Lin.

2017-05-31 Thread haibochen
HDFS-11446. TestMaintenanceState#testWithNNAndDNRestart fails intermittently. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31058b24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31058b24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31058b24

Branch: refs/heads/YARN-1011
Commit: 31058b243e9575d90f038bb2fdf5a556710f6f7f
Parents: 89bb8bf
Author: Yiqun Lin 
Authored: Sun May 28 11:23:32 2017 +0800
Committer: Yiqun Lin 
Committed: Sun May 28 11:23:32 2017 +0800

--
 .../hadoop/hdfs/TestMaintenanceState.java   | 128 ++-
 1 file changed, 66 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31058b24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index a37bdb8..b49fba0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -30,12 +29,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-import com.google.common.collect.Lists;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -48,8 +42,16 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Supplier;
+import com.google.common.collect.Lists;
 
 /**
  * This class tests node maintenance.
@@ -125,8 +127,8 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
 // When node is in ENTERING_MAINTENANCE state, it can still serve read
 // requests
-assertNull(checkWithRetry(ns, fileSys, file, replicas, null,
-nodeOutofService));
+checkWithRetry(ns, fileSys, file, replicas, null,
+nodeOutofService);
 
 putNodeInService(0, nodeOutofService.getDatanodeUuid());
 
@@ -387,8 +389,8 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
 // The block should be replicated to another datanode to meet
 // expected replication count.
-assertNull(checkWithRetry(ns, fileSys, file, expectedReplicasInRead,
-nodeOutofService));
+checkWithRetry(ns, fileSys, file, expectedReplicasInRead,
+nodeOutofService);
 
 cleanupFile(fileSys, file);
 teardown();
@@ -548,19 +550,19 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 client.datanodeReport(DatanodeReportType.LIVE).length);
 
 // test 1, verify the replica in IN_MAINTENANCE state isn't in LocatedBlock
-assertNull(checkWithRetry(ns, fileSys, file, replicas - 1,
-nodeOutofService));
+checkWithRetry(ns, fileSys, file, replicas - 1,
+nodeOutofService);
 
 takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(), 0, null,
 AdminStates.DECOMMISSIONED);
 
 // test 2 after decommission has completed, the replication count is
 // replicas + 1 which includes the decommissioned node.
-assertNull(checkWithRetry(ns, fileSys, file, replicas + 1, null));
+checkWithRetry(ns, fileSys, file, replicas + 1, null);
 
 // test 3, put the node in service, replication count should restore.
 putNodeInService(0, nodeOutofService.getDatanodeUuid());
-assertNull(checkWithRetry(ns, fileSys, file, replicas, null));
+checkWithRetry(ns, fileSys, file, replicas, null);
 
 cleanupFile(fileSys, file);
   }
@@ -587,8 +589,8 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(), L

[30/50] [abbrv] hadoop git commit: Fix NPE in LazyPersistFileScrubber. Contributed by Inigo Goiri.

2017-05-31 Thread haibochen
Fix NPE in LazyPersistFileScrubber. Contributed by Inigo Goiri.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/303c8dc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/303c8dc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/303c8dc9

Branch: refs/heads/YARN-1011
Commit: 303c8dc9b6c853c0939ea9ba14388897cc258071
Parents: d81372d
Author: Inigo Goiri 
Authored: Fri May 26 13:15:44 2017 -0700
Committer: Konstantin V Shvachko 
Committed: Fri May 26 13:16:01 2017 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/303c8dc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 11b62d9..997fd92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3870,9 +3870,13 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 while (it.hasNext()) {
   Block b = it.next();
   BlockInfo blockInfo = blockManager.getStoredBlock(b);
-  BlockCollection bc = getBlockCollection(blockInfo);
-  if (bc.getStoragePolicyID() == lpPolicy.getId()) {
-filesToDelete.add(bc);
+  if (blockInfo == null) {
+LOG.info("Cannot find block info for block " + b);
+  } else {
+BlockCollection bc = getBlockCollection(blockInfo);
+if (bc.getStoragePolicyID() == lpPolicy.getId()) {
+  filesToDelete.add(bc);
+}
   }
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HDFS-11901. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.

2017-05-31 Thread haibochen
HDFS-11901. Modifier 'static' is redundant for inner enums. Contributed by 
ZhangBing Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13de636b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13de636b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13de636b

Branch: refs/heads/YARN-1011
Commit: 13de636b4079b077890ad10389ff350dcf8086a2
Parents: 547f18c
Author: Brahma Reddy Battula 
Authored: Wed May 31 23:09:08 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Wed May 31 23:09:08 2017 +0800

--
 .../java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java  | 4 ++--
 .../src/main/java/org/apache/hadoop/lib/server/Server.java   | 2 +-
 .../main/java/org/apache/hadoop/lib/server/ServerException.java  | 2 +-
 .../src/test/java/org/apache/hadoop/lib/lang/TestXException.java | 2 +-
 .../src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java  | 2 +-
 .../main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java  | 2 +-
 .../main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java | 2 +-
 .../hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java   | 2 +-
 .../hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java  | 2 +-
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java   | 2 +-
 .../org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java| 2 +-
 .../hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java   | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/Content.java | 2 +-
 .../hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java   | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java| 2 +-
 .../hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java   | 2 +-
 .../hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java| 2 +-
 .../hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/util/Diff.java  | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java   | 2 +-
 .../hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java   | 2 +-
 23 files changed, 24 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 055a57e..5922958 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -139,7 +139,7 @@ public class HttpFSFileSystem extends FileSystem
 
   public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream";
 
-  public static enum FILE_TYPE {
+  public enum FILE_TYPE {
 FILE, DIRECTORY, SYMLINK;
 
 public static FILE_TYPE getType(FileStatus fileStatus) {
@@ -210,7 +210,7 @@ public class HttpFSFileSystem extends FileSystem
   private static final String HTTP_DELETE = "DELETE";
 
   @InterfaceAudience.Private
-  public static enum Operation {
+  public enum Operation {
 OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
 GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
 GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
index 82be027..57f651a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
@@ -101,7 +101,7 @@ public class Server {
* Enumeration that defines the server status.
*/
   @InterfaceAudience.Private
-  public static enum Status {
+  public enum Status {
 UNDEF(false, false),
 BOOTING(false, true),
 HALTED(true, true),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/

[47/50] [abbrv] hadoop git commit: YARN-6497. Method length of ResourceManager#serviceInit() is too long (Gergely Novák via jeagles)

2017-05-31 Thread haibochen
YARN-6497. Method length of ResourceManager#serviceInit() is too long (Gergely 
Novák via jeagles)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbfed0e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbfed0e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbfed0e8

Branch: refs/heads/YARN-1011
Commit: cbfed0e82f57e96b8d5309e0613057963840554f
Parents: 13de636
Author: Jonathan Eagles 
Authored: Wed May 31 10:18:09 2017 -0500
Committer: Jonathan Eagles 
Committed: Wed May 31 10:18:42 2017 -0500

--
 .../server/resourcemanager/ResourceManager.java | 27 ++--
 1 file changed, 13 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbfed0e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 8f2c121..f727f55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.EventDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import 
org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
@@ -238,13 +239,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 rmContext.setConfigurationProvider(configurationProvider);
 
 // load core-site.xml
-InputStream coreSiteXMLInputStream =
-this.configurationProvider.getConfigurationInputStream(this.conf,
-YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
-if (coreSiteXMLInputStream != null) {
-  this.conf.addResource(coreSiteXMLInputStream,
-  YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
-}
+loadConfigurationXml(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
 
 // Do refreshUserToGroupsMappings with loaded core-site.xml
 Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(this.conf)
@@ -257,13 +252,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 ProxyUsers.refreshSuperUserGroupsConfiguration(this.conf);
 
 // load yarn-site.xml
-InputStream yarnSiteXMLInputStream =
-this.configurationProvider.getConfigurationInputStream(this.conf,
-YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
-if (yarnSiteXMLInputStream != null) {
-  this.conf.addResource(yarnSiteXMLInputStream,
-  YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
-}
+loadConfigurationXml(YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
 
 validateConfigs(this.conf);
 
@@ -339,6 +328,16 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 super.serviceInit(this.conf);
   }
 
+  private void loadConfigurationXml(String configurationFile)
+  throws YarnException, IOException {
+InputStream configurationInputStream =
+this.configurationProvider.getConfigurationInputStream(this.conf,
+configurationFile);
+if (configurationInputStream != null) {
+  this.conf.addResource(configurationInputStream, configurationFile);
+}
+  }
+
   protected EmbeddedElector createEmbeddedElector() throws IOException {
 EmbeddedElector elector;
 curatorEnabled =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

2017-05-31 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
index 5f9b883..c1df562 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import static org.mockito.Matchers.argThat;
-import static org.mockito.Matchers.isNull;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -28,6 +27,7 @@ import java.io.File;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -58,19 +58,17 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
-import 
org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.ArgumentMatcher;
 
 public class TestNodeManagerReboot {
 
@@ -195,19 +193,18 @@ public class TestNodeManagerReboot {
 // restart the NodeManager
 restartNM(MAX_TRIES);
 checkNumOfLocalDirs();
-
-verify(delService, times(1)).delete(
-  (String) isNull(),
-  argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR
-  + "_DEL_")));
-verify(delService, times(1)).delete((String) isNull(),
-  argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_")));
-verify(delService, times(1)).scheduleFileDeletionTask(
-  argThat(new FileDeletionInclude(user, null,
-new String[] { destinationFile })));
-verify(delService, times(1)).scheduleFileDeletionTask(
-  argThat(new FileDeletionInclude(null, ContainerLocalizer.USERCACHE
-  + "_DEL_", new String[] {})));
+
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, null,
+new Path(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_"), 
null)));
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, null, new Path(ContainerLocalizer.FILECACHE + "_DEL_"),
+null)));
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, user, null, Arrays.asList(new Path(destinationFile);
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, null, new Path(ContainerLocalizer.USERCACHE + "_DEL_"),
+new ArrayList(;
 
 // restart the NodeManager again
 // this time usercache directory should be empty
@@ -329,72 +326,4 @@ public class TestNodeManagerReboot {
   return conf;
 }
   }
-
-  class PathInclude extends ArgumentMatcher {
-
-final String part;
-
-PathInclude(String part) {
-  this.part = part;
-}
-
-@Override
-public boolean matches(Object o) {
-  return ((Path) o).getName().indexOf(part) != -1;
-}
-  }
-  
-  class FileDeletionInclude extends ArgumentMatcher {
-final String user;
-final String subDirIncludes;
-final String[] baseDirIncludes;
-
-public FileDeletionInclude(String user, String subDirIncludes,
-String [] baseDirIncludes) {
-  this.user = user;
-  this.subDirIncludes = subDirIncludes;
-   

[28/50] [abbrv] hadoop git commit: YARN-6641. Non-public resource localization on a bad disk causes subsequent containers failure. Contributed by Kuhu Shukla

2017-05-31 Thread haibochen
YARN-6641. Non-public resource localization on a bad disk causes subsequent 
containers failure. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aea42930
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aea42930
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aea42930

Branch: refs/heads/YARN-1011
Commit: aea42930bbb9566ea6988f684dbd72a72a2bdadf
Parents: 47474ff
Author: Jason Lowe 
Authored: Fri May 26 09:37:56 2017 -0500
Committer: Jason Lowe 
Committed: Fri May 26 09:37:56 2017 -0500

--
 .../localizer/LocalResourcesTrackerImpl.java| 13 ++--
 .../localizer/ResourceLocalizationService.java  | 10 +--
 .../TestLocalResourcesTrackerImpl.java  |  8 +--
 .../TestResourceLocalizationService.java| 71 
 4 files changed, 86 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aea42930/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
index 940c599..af34e92 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
@@ -94,14 +94,6 @@ class LocalResourcesTrackerImpl implements 
LocalResourcesTracker {
 
   public LocalResourcesTrackerImpl(String user, ApplicationId appId,
   Dispatcher dispatcher, boolean useLocalCacheDirectoryManager,
-  Configuration conf, NMStateStoreService stateStore) {
-this(user, appId, dispatcher,
-new ConcurrentHashMap(),
-useLocalCacheDirectoryManager, conf, stateStore, null);
-  }
-
-  public LocalResourcesTrackerImpl(String user, ApplicationId appId,
-  Dispatcher dispatcher, boolean useLocalCacheDirectoryManager,
   Configuration conf, NMStateStoreService stateStore,
   LocalDirsHandlerService dirHandler) {
 this(user, appId, dispatcher,
@@ -528,4 +520,9 @@ class LocalResourcesTrackerImpl implements 
LocalResourcesTracker {
 }
 return mgr;
   }
+
+  @VisibleForTesting
+  LocalDirsHandlerService getDirsHandler() {
+return dirsHandler;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aea42930/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 28fb53c..663bad7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -306,7 +306,7 @@ public class ResourceLocalizationService extends 
CompositeService
   trackerState = userResources.getPrivateTrackerState();
   if (!trackerState.isEmpty()) {
 LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-null, dispatcher, true, super.getConfig(), stateStore);
+null, dispatcher, true, super.getConfig(), stateStore, 
dirsHandler);
 LocalResourcesTracker oldTracker = privateRsrc.putIfAbsent(user,
 tracker);
 if (oldTracker != null) {
@@ -322,7 +322,8 @@ public class ResourceLocalizationService extends 
CompositeService
   Application

[09/50] [abbrv] hadoop git commit: HADOOP-14166. Reset the DecayRpcScheduler AvgResponseTime metric to zero when queue is not used. Contributed by Surendra Singh Lilhore.

2017-05-31 Thread haibochen
HADOOP-14166. Reset the DecayRpcScheduler AvgResponseTime metric to zero when 
queue is not used. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcf4559e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcf4559e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcf4559e

Branch: refs/heads/YARN-1011
Commit: dcf4559ebdb5bb11d03b264a9875bff316b89eef
Parents: b7a0c0e
Author: Brahma Reddy Battula 
Authored: Thu May 25 01:27:13 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Thu May 25 01:27:13 2017 +0800

--
 .../src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcf4559e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
index 3c09625..5ae4e8b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
@@ -618,6 +618,8 @@ public class DecayRpcScheduler implements RpcScheduler,
 } else {
   responseTimeAvgInLastWindow.set(i, averageResponseTime);
 }
+  } else {
+responseTimeAvgInLastWindow.set(i, 0);
   }
   responseTimeCountInLastWindow.set(i, responseTimeCount);
   if (LOG.isDebugEnabled()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: Revert "HDFS-10797. Disk usage summary of snapshots causes renamed blocks to get counted twice. Contributed by Sean Mackrory."

2017-05-31 Thread haibochen
Revert "HDFS-10797. Disk usage summary of snapshots causes renamed blocks to 
get counted twice. Contributed by Sean Mackrory."

This reverts commit 6a38d118d86b7907009bcec34f1b788d076f1d1c.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8b69d79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8b69d79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8b69d79

Branch: refs/heads/YARN-1011
Commit: b8b69d797aed8dfeb65ea462c2856f62e9aa1023
Parents: 2cba561
Author: Wei-Chiu Chuang 
Authored: Wed May 24 17:21:22 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed May 24 17:21:22 2017 -0700

--
 .../ContentSummaryComputationContext.java   |  94 +
 .../hadoop/hdfs/server/namenode/INode.java  |   1 -
 .../hdfs/server/namenode/INodeDirectory.java|  11 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 -
 .../hdfs/server/namenode/INodeReference.java|   2 -
 .../hdfs/server/namenode/INodeSymlink.java  |   1 -
 .../snapshot/DirectorySnapshottableFeature.java |   9 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |  14 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |   1 -
 .../snapshot/TestRenameWithSnapshots.java   | 199 ---
 10 files changed, 26 insertions(+), 307 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b69d79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 53fa552..b35270d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -21,10 +21,6 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
-
-import java.util.HashSet;
-import java.util.Set;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -39,8 +35,6 @@ public class ContentSummaryComputationContext {
   private long yieldCount = 0;
   private long sleepMilliSec = 0;
   private int sleepNanoSec = 0;
-  private Set includedNodes = new HashSet<>();
-  private Set deletedSnapshottedNodes = new HashSet<>();
 
   /**
* Constructor
@@ -57,8 +51,8 @@ public class ContentSummaryComputationContext {
 this.fsn = fsn;
 this.limitPerRun = limitPerRun;
 this.nextCountLimit = limitPerRun;
-setCounts(new ContentCounts.Builder().build());
-setSnapshotCounts(new ContentCounts.Builder().build());
+this.counts = new ContentCounts.Builder().build();
+this.snapshotCounts = new ContentCounts.Builder().build();
 this.sleepMilliSec = sleepMicroSec/1000;
 this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
   }
@@ -88,7 +82,6 @@ public class ContentSummaryComputationContext {
 }
 
 // Have we reached the limit?
-ContentCounts counts = getCounts();
 long currentCount = counts.getFileCount() +
 counts.getSymlinkCount() +
 counts.getDirectoryCount() +
@@ -130,22 +123,14 @@ public class ContentSummaryComputationContext {
   }
 
   /** Get the content counts */
-  public synchronized ContentCounts getCounts() {
+  public ContentCounts getCounts() {
 return counts;
   }
 
-  private synchronized void setCounts(ContentCounts counts) {
-this.counts = counts;
-  }
-
   public ContentCounts getSnapshotCounts() {
 return snapshotCounts;
   }
 
-  private void setSnapshotCounts(ContentCounts snapshotCounts) {
-this.snapshotCounts = snapshotCounts;
-  }
-
   public BlockStoragePolicySuite getBlockStoragePolicySuite() {
 Preconditions.checkState((bsps != null || fsn != null),
 "BlockStoragePolicySuite must be either initialized or available via" +
@@ -153,77 +138,4 @@ public class ContentSummaryComputationContext {
 return (bsps != null) ? bsps:
 fsn.getBlockManager().getStoragePolicySuite();
   }
-
-  /**
-   * If the node is an INodeReference, resolves it to the actual inode.
-   * Snapshot diffs represent renamed / moved files as different
-   * INodeReferences, but the underlying INode it refers to is consistent.
-   *
-   * @param node
-   * @return The referred INode if there i

[06/50] [abbrv] hadoop git commit: HDFS-11793. Allow to enable user defined erasure coding policy. Contributed by Sammi Chen

2017-05-31 Thread haibochen
HDFS-11793. Allow to enable user defined erasure coding policy. Contributed by 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a62be38a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a62be38a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a62be38a

Branch: refs/heads/YARN-1011
Commit: a62be38a5e5d3a61dfb59054b3f5fd5b1b7053b3
Parents: 52661e0
Author: Kai Zheng 
Authored: Wed May 24 18:45:52 2017 +0800
Committer: Kai Zheng 
Committed: Wed May 24 18:45:52 2017 +0800

--
 .../hadoop/io/erasurecode/CodecRegistry.java| 19 ++
 .../apache/hadoop/io/erasurecode/CodecUtil.java |  4 ++
 .../io/erasurecode/TestCodecRegistry.java   |  7 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  9 ++-
 .../hdfs/protocol/AddECPolicyResponse.java  | 66 ++
 .../hdfs/protocol/AddingECPolicyResponse.java   | 66 --
 .../hadoop/hdfs/protocol/ClientProtocol.java|  7 +-
 .../hdfs/protocol/ErasureCodingPolicy.java  |  9 ++-
 .../ClientNamenodeProtocolTranslatorPB.java | 10 +--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 22 +++---
 .../apache/hadoop/hdfs/util/ECPolicyLoader.java |  2 +-
 .../src/main/proto/erasurecoding.proto  |  2 +-
 .../src/main/proto/hdfs.proto   |  2 +-
 .../hdfs/protocol/TestErasureCodingPolicy.java  | 10 +--
 .../src/main/conf/user_ec_policies.xml.template |  1 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  8 +--
 .../namenode/ErasureCodingPolicyManager.java| 72 ++--
 .../server/namenode/FSDirErasureCodingOp.java   |  4 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 35 +++---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  5 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  6 +-
 .../src/main/resources/hdfs-default.xml |  7 ++
 .../src/site/markdown/HDFSErasureCoding.md  |  3 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 39 ---
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 54 +++
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 26 +++
 28 files changed, 289 insertions(+), 212 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a62be38a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
index d53d598..fcf1349 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
@@ -127,28 +127,20 @@ public final class CodecRegistry {
   /**
* Get all coder names of the given codec.
* @param codecName the name of codec
-   * @return an array of all coder names
+   * @return an array of all coder names, null if not exist
*/
   public String[] getCoderNames(String codecName) {
 String[] coderNames = coderNameMap.get(codecName);
-if (coderNames == null) {
-  throw new IllegalArgumentException("No available raw coder factory for "
-  + codecName);
-}
 return coderNames;
   }
 
   /**
* Get all coder factories of the given codec.
* @param codecName the name of codec
-   * @return a list of all coder factories
+   * @return a list of all coder factories, null if not exist
*/
   public List getCoders(String codecName) {
 List coders = coderMap.get(codecName);
-if (coders == null) {
-  throw new IllegalArgumentException("No available raw coder factory for "
-  + codecName);
-}
 return coders;
   }
 
@@ -164,7 +156,7 @@ public final class CodecRegistry {
* Get a specific coder factory defined by codec name and coder name.
* @param codecName name of the codec
* @param coderName name of the coder
-   * @return the specific coder
+   * @return the specific coder, null if not exist
*/
   public RawErasureCoderFactory getCoderByName(
   String codecName, String coderName) {
@@ -176,10 +168,7 @@ public final class CodecRegistry {
 return coder;
   }
 }
-
-// if not found, throw exception
-throw new IllegalArgumentException("No implementation for coder "
-+ coderName + " of codec " + codecName);
+return null;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a62be38a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/er

[23/50] [abbrv] hadoop git commit: HDFS-11879. Fix JN sync interval in case of exception. Contributed by Hanisha Koneru.

2017-05-31 Thread haibochen
HDFS-11879. Fix JN sync interval in case of exception. Contributed by Hanisha 
Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11615631
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11615631
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11615631

Branch: refs/heads/YARN-1011
Commit: 11615631360ba49c1e9d256ed4f65119d99fd67d
Parents: 29b7df9
Author: Arpit Agarwal 
Authored: Thu May 25 14:01:53 2017 -0700
Committer: Arpit Agarwal 
Committed: Thu May 25 14:01:53 2017 -0700

--
 .../hdfs/qjournal/server/JournalNodeSyncer.java | 40 
 1 file changed, 25 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11615631/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
index 99bd499..479f6a0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
@@ -172,7 +172,6 @@ public class JournalNodeSyncer {
   } else {
 syncJournals();
   }
-  Thread.sleep(journalSyncInterval);
 } catch (Throwable t) {
   if (!shouldSync) {
 if (t instanceof InterruptedException) {
@@ -194,6 +193,17 @@ public class JournalNodeSyncer {
   LOG.error(
   "JournalNodeSyncer daemon received Runtime exception. ", t);
 }
+try {
+  Thread.sleep(journalSyncInterval);
+} catch (InterruptedException e) {
+  if (!shouldSync) {
+LOG.info("Stopping JournalNode Sync.");
+  } else {
+LOG.warn("JournalNodeSyncer interrupted", e);
+  }
+  Thread.currentThread().interrupt();
+  return;
+}
   }
 });
 syncJournalDaemon.start();
@@ -320,30 +330,30 @@ public class JournalNodeSyncer {
 
 List missingEditLogs = Lists.newArrayList();
 
-int thisJnIndex = 0, otherJnIndex = 0;
-int thisJnNumLogs = thisJournalEditLogs.size();
-int otherJnNumLogs = otherJournalEditLogs.size();
+int localJnIndex = 0, remoteJnIndex = 0;
+int localJnNumLogs = thisJournalEditLogs.size();
+int remoteJnNumLogs = otherJournalEditLogs.size();
 
-while (thisJnIndex < thisJnNumLogs && otherJnIndex < otherJnNumLogs) {
-  long localJNstartTxId = thisJournalEditLogs.get(thisJnIndex)
+while (localJnIndex < localJnNumLogs && remoteJnIndex < remoteJnNumLogs) {
+  long localJNstartTxId = thisJournalEditLogs.get(localJnIndex)
   .getStartTxId();
-  long remoteJNstartTxId = otherJournalEditLogs.get(otherJnIndex)
+  long remoteJNstartTxId = otherJournalEditLogs.get(remoteJnIndex)
   .getStartTxId();
 
   if (localJNstartTxId == remoteJNstartTxId) {
-thisJnIndex++;
-otherJnIndex++;
+localJnIndex++;
+remoteJnIndex++;
   } else if (localJNstartTxId > remoteJNstartTxId) {
-missingEditLogs.add(otherJournalEditLogs.get(otherJnIndex));
-otherJnIndex++;
+missingEditLogs.add(otherJournalEditLogs.get(remoteJnIndex));
+remoteJnIndex++;
   } else {
-thisJnIndex++;
+localJnIndex++;
   }
 }
 
-if (otherJnIndex < otherJnNumLogs) {
-  for (; otherJnIndex < otherJnNumLogs; otherJnIndex++) {
-missingEditLogs.add(otherJournalEditLogs.get(otherJnIndex));
+if (remoteJnIndex < remoteJnNumLogs) {
+  for (; remoteJnIndex < remoteJnNumLogs; remoteJnIndex++) {
+missingEditLogs.add(otherJournalEditLogs.get(remoteJnIndex));
   }
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.

2017-05-31 Thread haibochen
HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to 
no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91d6fe15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91d6fe15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91d6fe15

Branch: refs/heads/YARN-1011
Commit: 91d6fe151f2e3de21b0a9423ade921e771957d90
Parents: 62857be
Author: Lei Xu 
Authored: Tue May 30 11:09:03 2017 -0700
Committer: Lei Xu 
Committed: Tue May 30 11:10:12 2017 -0700

--
 .../datanode/TestDataNodeHotSwapVolumes.java| 34 ++--
 1 file changed, 32 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91d6fe15/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 5aec174..b308ca9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -97,6 +97,7 @@ public class TestDataNodeHotSwapVolumes {
   private static final int BLOCK_SIZE = 512;
   private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
   private MiniDFSCluster cluster;
+  private Configuration conf;
 
   @After
   public void tearDown() {
@@ -111,7 +112,7 @@ public class TestDataNodeHotSwapVolumes {
   private void startDFSCluster(int numNameNodes, int numDataNodes,
   int storagePerDataNode) throws IOException {
 shutdown();
-Configuration conf = new Configuration();
+conf = new Configuration();
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
 
 /*
@@ -756,7 +757,7 @@ public class TestDataNodeHotSwapVolumes {
 }
   }
 
-  @Test(timeout=18)
+  @Test(timeout=60)
   public void testRemoveVolumeBeingWritten()
   throws InterruptedException, TimeoutException, ReconfigurationException,
   IOException, BrokenBarrierException {
@@ -848,6 +849,9 @@ public class TestDataNodeHotSwapVolumes {
   1, fsVolumeReferences.size());
 }
 
+// Add a new DataNode to help with the pipeline recover.
+cluster.startDataNodes(conf, 1, true, null, null, null);
+
 // Verify the file has sufficient replications.
 DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
 // Read the content back
@@ -857,6 +861,32 @@ public class TestDataNodeHotSwapVolumes {
 if (!exceptions.isEmpty()) {
   throw new IOException(exceptions.get(0).getCause());
 }
+
+// Write more files to make sure that the DataNode that has removed volume
+// is still alive to receive data.
+for (int i = 0; i < 10; i++) {
+  final Path file = new Path("/after-" + i);
+  try (FSDataOutputStream fout = fs.create(file, REPLICATION)) {
+rb.nextBytes(writeBuf);
+fout.write(writeBuf);
+  }
+}
+
+try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
+.getFsVolumeReferences()) {
+  assertEquals("Volume remove wasn't successful.",
+  1, fsVolumeReferences.size());
+  FsVolumeSpi volume = fsVolumeReferences.get(0);
+  String bpid = cluster.getNamesystem().getBlockPoolId();
+  FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, 
"test");
+  int blockCount = 0;
+  while (!blkIter.atEnd()) {
+blkIter.nextBlock();
+blockCount++;
+  }
+  assertTrue(String.format("DataNode(%d) should have more than 1 blocks",
+  dataNodeIdx), blockCount > 1);
+}
   }
 
   @Test(timeout=6)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: HDFS-11878. Fix journal missing log httpServerUrl address in JournalNodeSyncer. Contributed by Hanisha Koneru.

2017-05-31 Thread haibochen
HDFS-11878. Fix journal missing log httpServerUrl address in JournalNodeSyncer. 
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4fb41b31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4fb41b31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4fb41b31

Branch: refs/heads/YARN-1011
Commit: 4fb41b31dbc109f11898ea6d8fc0bb3e6c20d89b
Parents: 2e41f88
Author: Arpit Agarwal 
Authored: Thu May 25 10:42:24 2017 -0700
Committer: Arpit Agarwal 
Committed: Thu May 25 10:42:24 2017 -0700

--
 .../hadoop/hdfs/qjournal/server/JournalNodeSyncer.java| 10 ++
 1 file changed, 2 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fb41b31/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
index 73defc2..99bd499 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
@@ -285,14 +285,8 @@ public class JournalNodeSyncer {
 boolean success = false;
 try {
   if (remoteJNproxy.httpServerUrl == null) {
-if (response.hasFromURL()) {
-  URI uri = URI.create(response.getFromURL());
-  remoteJNproxy.httpServerUrl = getHttpServerURI(uri.getScheme(),
-  uri.getHost(), uri.getPort());
-} else {
-  remoteJNproxy.httpServerUrl = getHttpServerURI("http",
-  remoteJNproxy.jnAddr.getHostName(), response.getHttpPort());
-}
+remoteJNproxy.httpServerUrl = getHttpServerURI("http",
+remoteJNproxy.jnAddr.getHostName(), response.getHttpPort());
   }
 
   String urlPath = GetJournalEditServlet.buildPath(jid, missingLog


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: YARN-6643. TestRMFailover fails rarely due to port conflict. Contributed by Robert Kanter

2017-05-31 Thread haibochen
YARN-6643. TestRMFailover fails rarely due to port conflict. Contributed by 
Robert Kanter


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fd6a2da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fd6a2da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fd6a2da

Branch: refs/heads/YARN-1011
Commit: 3fd6a2da4e537423d1462238e10cc9e1f698d1c2
Parents: 1161563
Author: Jason Lowe 
Authored: Thu May 25 16:07:52 2017 -0500
Committer: Jason Lowe 
Committed: Thu May 25 16:07:52 2017 -0500

--
 .../hadoop/yarn/server/resourcemanager/HATestUtil.java  | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fd6a2da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/HATestUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/HATestUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/HATestUtil.java
index 710ce87..ac245c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/HATestUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/HATestUtil.java
@@ -18,16 +18,19 @@
 package org.apache.hadoop.yarn.server.resourcemanager;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
+import java.io.IOException;
+
 public class HATestUtil {
 
   public static void setRpcAddressForRM(String rmId, int base,
-  Configuration conf) {
+  Configuration conf) throws IOException {
 for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
-  setConfForRM(rmId, confKey, "0.0.0.0:" + (base +
-  YarnConfiguration.getRMDefaultPortNumber(confKey, conf)), conf);
+  setConfForRM(rmId, confKey, "0.0.0.0:" + ServerSocketUtil.getPort(base +
+  YarnConfiguration.getRMDefaultPortNumber(confKey, conf), 10), conf);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: HADOOP-14464. hadoop-aws doc header warning #5 line wrapped. Contributed by John Zhuge.

2017-05-31 Thread haibochen
HADOOP-14464. hadoop-aws doc header warning #5 line wrapped. Contributed by 
John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c6a7a59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c6a7a59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c6a7a59

Branch: refs/heads/YARN-1011
Commit: 6c6a7a59622ba7c1e4faa5534f4479de0cd84993
Parents: 31058b2
Author: John Zhuge 
Authored: Sat May 27 23:56:49 2017 -0700
Committer: John Zhuge 
Committed: Sun May 28 22:25:00 2017 -0700

--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md| 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c6a7a59/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index e5aa431..8c8df1b 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -109,8 +109,7 @@ Do not inadvertently share these credentials through means 
such as
 
 If you do any of these: change your credentials immediately!
 
-### Warning #5: The S3 client provided by Amazon EMR are not from the Apache
-Software foundation, and are only supported by Amazon.
+### Warning #5: The S3 client provided by Amazon EMR are not from the Apache 
Software foundation, and are only supported by Amazon.
 
 Specifically: on Amazon EMR, s3a is not supported, and amazon recommend
 a different filesystem implementation. If you are using Amazon EMR, follow


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: HDFS-5042. Completed files lost after power failure. Contributed by Vinayakumar B.

2017-05-31 Thread haibochen
HDFS-5042. Completed files lost after power failure. Contributed by Vinayakumar 
B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1543d0f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1543d0f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1543d0f5

Branch: refs/heads/YARN-1011
Commit: 1543d0f5be6a02ad00e7a33e35d78af8516043e3
Parents: cbfed0e
Author: Kihwal Lee 
Authored: Wed May 31 10:55:03 2017 -0500
Committer: Kihwal Lee 
Committed: Wed May 31 10:55:03 2017 -0500

--
 .../main/java/org/apache/hadoop/io/IOUtils.java | 55 +++-
 .../hdfs/server/datanode/BlockReceiver.java |  9 +++-
 .../hdfs/server/datanode/FileIoProvider.java| 19 ++-
 .../hdfs/server/datanode/LocalReplica.java  | 13 +
 .../server/datanode/fsdataset/FsDatasetSpi.java |  4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 31 ---
 .../server/datanode/SimulatedFSDataset.java |  3 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|  6 ++-
 .../server/datanode/TestSimulatedFSDataset.java |  4 +-
 .../extdataset/ExternalDatasetImpl.java |  3 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |  2 +-
 11 files changed, 130 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 0d2e797..ee7264b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -27,6 +27,7 @@ import java.nio.file.DirectoryStream;
 import java.nio.file.DirectoryIteratorException;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -36,7 +37,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.ChunkedArrayList;
+import org.apache.hadoop.util.Shell;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -357,4 +358,56 @@ public class IOUtils {
 }
 return list;
   }
+
+  /**
+   * Ensure that any writes to the given file is written to the storage device
+   * that contains it. This method opens channel on given File and closes it
+   * once the sync is done.
+   * Borrowed from Uwe Schindler in LUCENE-5588
+   * @param fileToSync the file to fsync
+   */
+  public static void fsync(File fileToSync) throws IOException {
+if (!fileToSync.exists()) {
+  throw new FileNotFoundException(
+  "File/Directory " + fileToSync.getAbsolutePath() + " does not 
exist");
+}
+boolean isDir = fileToSync.isDirectory();
+// If the file is a directory we have to open read-only, for regular files
+// we must open r/w for the fsync to have an effect. See
+// http://blog.httrack.com/blog/2013/11/15/
+// everything-you-always-wanted-to-know-about-fsync/
+try(FileChannel channel = FileChannel.open(fileToSync.toPath(),
+isDir ? StandardOpenOption.READ : StandardOpenOption.WRITE)){
+  fsync(channel, isDir);
+}
+  }
+
+  /**
+   * Ensure that any writes to the given file is written to the storage device
+   * that contains it. This method opens channel on given File and closes it
+   * once the sync is done.
+   * Borrowed from Uwe Schindler in LUCENE-5588
+   * @param channel Channel to sync
+   * @param isDir if true, the given file is a directory (Channel should be
+   *  opened for read and ignore IOExceptions, because not all file
+   *  systems and operating systems allow to fsync on a directory)
+   * @throws IOException
+   */
+  public static void fsync(FileChannel channel, boolean isDir)
+  throws IOException {
+try {
+  channel.force(true);
+} catch (IOException ioe) {
+  if (isDir) {
+assert !(Shell.LINUX
+|| Shell.MAC) : "On Linux and MacOSX fsyncing a directory"
++ " should not throw IOException, we just don't want to rely"
++ " on that in production (undocumented)" + ". Got: " + ioe;
+// Ignore exception if it is a directory
+return;
+  }
+  // Throw original exception
+  thr

[31/50] [abbrv] hadoop git commit: Update maven version for 3.0.0-alpha4 development

2017-05-31 Thread haibochen
Update maven version for 3.0.0-alpha4 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16ad896d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16ad896d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16ad896d

Branch: refs/heads/YARN-1011
Commit: 16ad896d5cb8ab21e9cb2763df7c15cfcc0a6ede
Parents: 303c8dc
Author: Andrew Wang 
Authored: Fri May 26 14:09:44 2017 -0700
Committer: Andrew Wang 
Committed: Fri May 26 14:09:44 2017 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml| 4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml

[17/50] [abbrv] hadoop git commit: Addendum patch to fix Docker sanitization.

2017-05-31 Thread haibochen
Addendum patch to fix Docker sanitization.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a56a3db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a56a3db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a56a3db

Branch: refs/heads/YARN-1011
Commit: 1a56a3db599659091284e3016d0309052966d018
Parents: bc28da6
Author: Varun Vasudev 
Authored: Wed May 24 16:03:28 2017 +0530
Committer: Varun Vasudev 
Committed: Thu May 25 14:53:57 2017 +0530

--
 .../impl/container-executor.c   |  6 +-
 .../test/test-container-executor.c  | 20 +---
 2 files changed, 6 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a56a3db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 3a87646..5d138f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1292,13 +1292,9 @@ char* sanitize_docker_command(const char *line) {
   }
 
   if(optind < split_counter) {
-quote_and_append_arg(&output, &output_size, "", linesplit[optind++]);
-strcat(output, "'");
 while(optind < split_counter) {
-  strcat(output, linesplit[optind++]);
-  strcat(output, " ");
+  quote_and_append_arg(&output, &output_size, "", linesplit[optind++]);
 }
-strcat(output, "'");
   }
 
   return output;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a56a3db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index ff76d4a..83d11ec 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1087,11 +1087,6 @@ void test_trim_function() {
 
 void test_sanitize_docker_command() {
 
-/*
-  char *input[] = {
-"run "
-  };
-*/
   char *input[] = {
 "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged 
--rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true 
--cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL 
--cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP 
--cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID 
--cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE 
--cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro 
-v /yarn/local/cdir:/yarn/local/cdir -v 
/yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash 
/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
 "run --name=$CID --user=nobody -d --workdir=/yarn/local/cdir --privileged 
--rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true 
--cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL 
--cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP 
--cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID 
--cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE 
--cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro 
-v /yarn/local/cdir:/yarn/local/cdir -v 
/yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash 
/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
@@ -1099,17 +1094,12 @@ void test_sanitize_docker_command() {
 "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged 
--rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true 
--cgroup-parent=/sys/fs/cgroup/cpu/yarn/ci

[41/50] [abbrv] hadoop git commit: HADOOP-14456. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.

2017-05-31 Thread haibochen
HADOOP-14456. Modifier 'static' is redundant for inner enums. Contributed by 
ZhangBing Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62857be2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62857be2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62857be2

Branch: refs/heads/YARN-1011
Commit: 62857be2110aaded84a93fc9891742a1271b2b85
Parents: af03c33
Author: Brahma Reddy Battula 
Authored: Wed May 31 01:07:58 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Wed May 31 01:07:58 2017 +0800

--
 .../src/main/java/org/apache/hadoop/crypto/OpensslCipher.java  | 4 ++--
 .../main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java | 2 +-
 .../src/main/java/org/apache/hadoop/fs/Options.java| 2 +-
 .../org/apache/hadoop/fs/shell/CommandWithDestination.java | 2 +-
 .../main/java/org/apache/hadoop/ha/ActiveStandbyElector.java   | 4 ++--
 .../src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java  | 2 +-
 .../src/main/java/org/apache/hadoop/io/SequenceFile.java   | 2 +-
 .../hadoop/io/compress/zlib/BuiltInGzipDecompressor.java   | 2 +-
 .../org/apache/hadoop/io/compress/zlib/ZlibCompressor.java | 6 +++---
 .../org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java   | 2 +-
 .../main/java/org/apache/hadoop/io/file/tfile/Compression.java | 2 +-
 .../src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java  | 2 +-
 .../src/main/java/org/apache/hadoop/ipc/Server.java| 2 +-
 .../main/java/org/apache/hadoop/security/SaslRpcServer.java| 4 ++--
 .../java/org/apache/hadoop/security/UserGroupInformation.java  | 2 +-
 .../main/java/org/apache/hadoop/security/ssl/SSLFactory.java   | 2 +-
 .../token/delegation/web/DelegationTokenAuthenticator.java | 2 +-
 .../src/main/java/org/apache/hadoop/util/StringUtils.java  | 2 +-
 .../test/java/org/apache/hadoop/fs/FileContextTestHelper.java  | 2 +-
 .../test/java/org/apache/hadoop/fs/FileSystemTestHelper.java   | 2 +-
 .../src/test/java/org/apache/hadoop/io/TestIOUtils.java| 2 +-
 .../org/apache/hadoop/io/retry/UnreliableImplementation.java   | 2 +-
 .../main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java | 2 +-
 .../src/main/java/org/apache/hadoop/mount/MountInterface.java  | 2 +-
 .../src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java | 2 +-
 .../main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java | 2 +-
 .../src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java | 2 +-
 27 files changed, 32 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
index 2eb16ee..6a03bb6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
@@ -47,7 +47,7 @@ public final class OpensslCipher {
   public static final int DECRYPT_MODE = 0;
   
   /** Currently only support AES/CTR/NoPadding. */
-  private static enum AlgMode {
+  private enum AlgMode {
 AES_CTR;
 
 static int get(String algorithm, String mode) 
@@ -61,7 +61,7 @@ public final class OpensslCipher {
 }
   }
   
-  private static enum Padding {
+  private enum Padding {
 NoPadding;
 
 static int get(String padding) throws NoSuchPaddingException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
index 74052eb..8411ffb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
@@ -198,7 +198,7 @@ public class ValueQueue  {
* "n" values and Queue is empty.
* This decides how many values to return when client calls "getAtMost"
*/
-  public static enum SyncGenerationPolicy {
+  public enum SyncGenerationPolicy {
 ATLEAST_ONE, // Return atleast 1 value
 LOW_WATERMARK, // Return min(n, lowWatermark * numValues) values
 ALL // Return n values

http://git-wip-us.apache.org

[45/50] [abbrv] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

2017-05-31 Thread haibochen
YARN-6366. Refactor the NodeManager DeletionService to support additional 
DeletionTask types. Contributed by Shane Kumpf.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/547f18cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/547f18cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/547f18cb

Branch: refs/heads/YARN-1011
Commit: 547f18cb96aeda55cc19b38be2be4d631b3a5f4f
Parents: 4b4a652
Author: Varun Vasudev 
Authored: Wed May 31 16:15:35 2017 +0530
Committer: Varun Vasudev 
Committed: Wed May 31 16:15:35 2017 +0530

--
 .../server/nodemanager/DeletionService.java | 468 ---
 .../nodemanager/api/impl/pb/NMProtoUtils.java   | 110 +
 .../nodemanager/api/impl/pb/package-info.java   |  25 +
 .../recovery/DeletionTaskRecoveryInfo.java  |  73 +++
 .../deletion/recovery/package-info.java |  25 +
 .../deletion/task/DeletionTask.java | 258 ++
 .../deletion/task/DeletionTaskType.java |  24 +
 .../deletion/task/FileDeletionTask.java | 202 
 .../deletion/task/package-info.java |  25 +
 .../localizer/LocalResourcesTrackerImpl.java|  13 +-
 .../localizer/ResourceLocalizationService.java  |  40 +-
 .../logaggregation/AppLogAggregatorImpl.java|  60 ++-
 .../loghandler/NonAggregatingLogHandler.java|   7 +-
 .../yarn_server_nodemanager_recovery.proto  |   1 +
 .../server/nodemanager/TestDeletionService.java |  57 ++-
 .../nodemanager/TestNodeManagerReboot.java  |  99 +---
 .../api/impl/pb/TestNMProtoUtils.java   |  91 
 .../BaseContainerManagerTest.java   |   7 +-
 .../deletion/task/FileDeletionMatcher.java  |  84 
 .../deletion/task/TestFileDeletionTask.java |  85 
 .../TestLocalResourcesTrackerImpl.java  |   5 +-
 .../TestResourceLocalizationService.java|  33 +-
 .../TestAppLogAggregatorImpl.java   |  15 +-
 .../TestLogAggregationService.java  |  17 +-
 .../TestNonAggregatingLogHandler.java   |   8 +-
 25 files changed, 1274 insertions(+), 558 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index aac0af9..38d69a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -21,11 +21,8 @@ package org.apache.hadoop.yarn.server.nodemanager;
 import static java.util.concurrent.TimeUnit.SECONDS;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -38,461 +35,176 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
-import 
org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
+import org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.NMProtoUtils;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery.DeletionTaskRecoveryInfo;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
 import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
-import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState;
 
-import com.google.common.annotations.VisibleForT

[03/50] [abbrv] hadoop git commit: HDFS-11419. Performance analysis of new DFSNetworkTopology#chooseRandom. Contributed by Chen Liang.

2017-05-31 Thread haibochen
HDFS-11419. Performance analysis of new DFSNetworkTopology#chooseRandom. 
Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0f346af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0f346af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0f346af

Branch: refs/heads/YARN-1011
Commit: d0f346af26293f0ac8d118f98628f5528c1d6811
Parents: ca6bcc3
Author: Arpit Agarwal 
Authored: Mon May 22 20:25:34 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon May 22 20:25:34 2017 -0700

--
 .../net/TestDFSNetworkTopologyPerformance.java  | 524 +++
 1 file changed, 524 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0f346af/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
new file mode 100644
index 000..77a059a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
@@ -0,0 +1,524 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.net;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.Node;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.text.NumberFormat;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Performance test of the new DFSNetworkTopology chooseRandom.
+ *
+ * NOTE that the tests are not for correctness but for performance comparison,
+ * so the tests are printing and writing down values rather than doing 
assertion
+ * checks or timeout checks. Therefore, it is pointless to run these
+ * tests without something reading the value. So disabled the tests to for now,
+ * anyone interested in looking at the numbers can enable them.
+ */
+@Ignore
+public class TestDFSNetworkTopologyPerformance {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(TestDFSNetworkTopologyPerformance.class);
+  private static NetworkTopology cluster;
+  private static DFSNetworkTopology dfscluster;
+  private DatanodeDescriptor[] dataNodes;
+
+  private final static int NODE_NUM = 2000;
+  private final static int OP_NUM = 2;
+
+  private final static int L1_NUM = 5;
+  private final static int L2_NUM = 10;
+  private final static int L3_NUM = 10;
+
+  private final static float NS_TO_MS = 100;
+
+  private final static Random RANDOM = new Random();
+
+  private Node node;
+  private long totalStart;
+  private long totalEnd;
+  private int totalTrials;
+  private float totalMs;
+  private Set excluded;
+  private static String[] racks;
+  private static String[] hosts;
+  private static StorageType[] types;
+
+  private static long[] records;
+  private long localStart;
+  private long localEnd;
+
+
+  @BeforeClass
+  public static void init() throws Exception {
+racks = new String[NODE_NUM];
+hosts = new String[NODE_NUM];
+types = new StorageType[NODE_NUM];
+records = new long[OP_NUM];
+for (int i = 0; i < NODE_NUM; i++) {
+  racks[i] = getRandLocation();
+  hosts[i] = "host" + i;
+}
+  }
+
+  @Before
+  public void setup() throws Exception {
+cluster = Networ

[05/50] [abbrv] hadoop git commit: HDFS-11864. Document Metrics to track usage of memory for writes. Contributed by Yiqun Lin.

2017-05-31 Thread haibochen
HDFS-11864. Document Metrics to track usage of memory for writes. Contributed 
by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52661e09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52661e09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52661e09

Branch: refs/heads/YARN-1011
Commit: 52661e0912a79d1e851afc2b46c941ce952ca63f
Parents: 1b5451b
Author: Brahma Reddy Battula 
Authored: Tue May 23 23:52:42 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Tue May 23 23:52:42 2017 +0800

--
 .../hadoop-common/src/site/markdown/Metrics.md   | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52661e09/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index bd44f74..a14c86d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -283,6 +283,21 @@ Each metrics record contains tags such as SessionId and 
Hostname as additional i
 | `WritesFromLocalClient` | Total number of write operations from local client 
|
 | `WritesFromRemoteClient` | Total number of write operations from remote 
client |
 | `BlocksGetLocalPathInfo` | Total number of operations to get local path 
names of blocks |
+| `RamDiskBlocksWrite` | Total number of blocks written to memory |
+| `RamDiskBlocksWriteFallback` | Total number of blocks written to memory but 
not satisfied (failed-over to disk) |
+| `RamDiskBytesWrite` | Total number of bytes written to memory |
+| `RamDiskBlocksReadHits` | Total number of times a block in memory was read |
+| `RamDiskBlocksEvicted` | Total number of blocks evicted in memory |
+| `RamDiskBlocksEvictedWithoutRead` | Total number of blocks evicted in memory 
without ever being read from memory |
+| `RamDiskBlocksEvictionWindowMsNumOps` | Number of blocks evicted in memory|
+| `RamDiskBlocksEvictionWindowMsAvgTime` | Average time of blocks in memory 
before being evicted in milliseconds |
+| `RamDiskBlocksEvictionWindows`*num*`s(50|75|90|95|99)thPercentileLatency` | 
The 50/75/90/95/99th percentile of latency between memory write and eviction in 
milliseconds. Percentile measurement is off by default, by watching no 
intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `RamDiskBlocksLazyPersisted` | Total number of blocks written to disk by 
lazy writer |
+| `RamDiskBlocksDeletedBeforeLazyPersisted` | Total number of blocks deleted 
by application before being persisted to disk |
+| `RamDiskBytesLazyPersisted` | Total number of bytes written to disk by lazy 
writer |
+| `RamDiskBlocksLazyPersistWindowMsNumOps` | Number of blocks written to disk 
by lazy writer |
+| `RamDiskBlocksLazyPersistWindowMsAvgTime` | Average time of blocks written 
to disk by lazy writer in milliseconds |
+| `RamDiskBlocksLazyPersistWindows`*num*`s(50|75|90|95|99)thPercentileLatency` 
| The 50/75/90/95/99th percentile of latency between memory write and disk 
persist in milliseconds. Percentile measurement is off by default, by watching 
no intervals. The intervals are specified by 
`dfs.metrics.percentiles.intervals`. |
 | `FsyncCount` | Total number of fsync |
 | `VolumeFailures` | Total number of volume failures occurred |
 | `ReadBlockOpNumOps` | Total number of read operations |


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: YARN-6615. AmIpFilter drops query parameters on redirect. Contributed by Wilfred Spiegelenburg

2017-05-31 Thread haibochen
YARN-6615. AmIpFilter drops query parameters on redirect. Contributed by 
Wilfred Spiegelenburg


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bf1949c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bf1949c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bf1949c

Branch: refs/heads/YARN-1011
Commit: 8bf1949c0efed700781eb47cf18f9f88443ed506
Parents: a62be38
Author: Jason Lowe 
Authored: Wed May 24 11:22:42 2017 -0500
Committer: Jason Lowe 
Committed: Wed May 24 11:22:42 2017 -0500

--
 .../hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java | 6 ++
 .../hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java   | 8 
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf1949c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
index fe6fc32..6579191 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
@@ -150,6 +150,12 @@ public class AmIpFilter implements Filter {
 insertPoint += PROXY_PATH.length();
 redirect.insert(insertPoint, "/redirect");
   }
+  // add the query parameters on the redirect if there were any
+  String queryString = httpReq.getQueryString();
+  if (queryString != null && !queryString.isEmpty()) {
+redirect.append("?");
+redirect.append(queryString);
+  }
 
   ProxyUtils.sendRedirect(httpReq, httpResp, redirect.toString());
 } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf1949c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
index 9dc0ce0..b788f5d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
@@ -176,6 +176,14 @@ public class TestAmFilter {
 redirect = response.getHeader(ProxyUtils.LOCATION);
 assertEquals("http://bogus/proxy/redirect/application_00_0";, redirect);
 
+// check for query parameters
+
Mockito.when(request.getRequestURI()).thenReturn("/proxy/application_00_0");
+Mockito.when(request.getQueryString()).thenReturn("id=0");
+testFilter.doFilter(request, response, chain);
+assertEquals(HttpURLConnection.HTTP_MOVED_TEMP, response.status);
+redirect = response.getHeader(ProxyUtils.LOCATION);
+assertEquals("http://bogus/proxy/redirect/application_00_0?id=0";, 
redirect);
+
 // "127.0.0.1" contains in host list. Without cookie
 Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1");
 testFilter.doFilter(request, response, chain);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)

2017-05-31 Thread haibochen
YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/153498bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/153498bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/153498bc

Branch: refs/heads/YARN-1011
Commit: 153498bc3adb830f3ae37825fed856fae22eea16
Parents: 4369690
Author: Karthik Kambatla 
Authored: Fri Jan 29 14:31:45 2016 -0800
Committer: Haibo Chen 
Committed: Wed May 31 14:25:41 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 ++-
 .../src/main/resources/yarn-default.xml |  21 
 .../RegisterNodeManagerRequest.java |  14 ++-
 .../pb/RegisterNodeManagerRequestPBImpl.java|  45 +++-
 .../server/api/records/OverAllocationInfo.java  |  45 
 .../server/api/records/ResourceThresholds.java  |  45 
 .../impl/pb/OverAllocationInfoPBImpl.java   | 106 +++
 .../impl/pb/ResourceThresholdsPBImpl.java   |  93 
 .../yarn_server_common_service_protos.proto |  10 ++
 .../hadoop/yarn/server/nodemanager/Context.java |   5 +
 .../yarn/server/nodemanager/NodeManager.java|  17 +++
 .../nodemanager/NodeStatusUpdaterImpl.java  |   6 +-
 .../monitor/ContainersMonitorImpl.java  |  34 ++
 .../amrmproxy/BaseAMRMProxyTest.java|  11 ++
 14 files changed, 455 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/153498bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 5e4c826..bb34626 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1575,7 +1575,6 @@ public class YarnConfiguration extends Configuration {
   public static final boolean 
DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE =
   false;
 
-
   // Configurations for applicaiton life time monitor feature
   public static final String RM_APPLICATION_MONITOR_INTERVAL_MS =
   RM_PREFIX + "application-timeouts.monitor.interval-ms";
@@ -1583,6 +1582,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
+  /** Overallocation (= allocation based on utilization) configs. */
+  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
+  NM_PREFIX + "overallocation.allocation-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
+  = 0f;
+  @Private
+  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
+  = 0f;
+
   /**
* Interval of time the linux container executor should try cleaning up
* cgroups entry when cleaning up a container. This is required due to what 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/153498bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e687eef..c131eec 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1563,6 +1563,27 @@
   
 
   
+The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node,
+  expressed as a float between 0 and 0.95. By default, over-allocation is
+  turned off (value = 0). When turned on, the node allows running
+  OPPORTUNISTIC containers when the aggregate utilization is under the
+  value specified here multiplied by the node's advertised capacity.
+
+yarn.nodemanager.overallocation.allocation-threshold
+0f
+  
+
+  
+When a node is over-allocated to improve utilization by
+  running OPPORTUNISTIC containers, this conf

[29/50] [abbrv] hadoop git commit: YARN-6646. Modifier 'static' is redundant for inner enums (Contributed by ZhangBing Lin via Daniel Templeton)

2017-05-31 Thread haibochen
YARN-6646. Modifier 'static' is redundant for inner enums
(Contributed by ZhangBing Lin via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d81372df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d81372df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d81372df

Branch: refs/heads/YARN-1011
Commit: d81372dfad32488e7c46ffcfccdf0aa26bee04a5
Parents: aea4293
Author: Daniel Templeton 
Authored: Fri May 26 12:05:48 2017 -0700
Committer: Daniel Templeton 
Committed: Fri May 26 12:05:48 2017 -0700

--
 .../hadoop/yarn/api/records/timelineservice/TimelineMetric.java  | 2 +-
 .../yarn/applications/distributedshell/ApplicationMaster.java| 4 ++--
 .../hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java | 4 ++--
 .../ApplicationHistoryManagerOnTimelineStore.java| 2 +-
 .../hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java   | 2 +-
 .../yarn/server/nodemanager/CMgrCompletedContainersEvent.java| 2 +-
 .../org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java | 2 +-
 .../apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java | 2 +-
 .../resourcemanager/metrics/AbstractSystemMetricsPublisher.java  | 2 +-
 .../server/timelineservice/reader/filter/TimelineFilterList.java | 2 +-
 10 files changed, 12 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
index 5c908d6..2fa6d30 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
@@ -43,7 +43,7 @@ public class TimelineMetric {
   /**
* Type of metric.
*/
-  public static enum Type {
+  public enum Type {
 SINGLE_VALUE,
 TIME_SERIES
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 4daebb5..ab4607a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -179,13 +179,13 @@ public class ApplicationMaster {
 
   @VisibleForTesting
   @Private
-  public static enum DSEvent {
+  public enum DSEvent {
 DS_APP_ATTEMPT_START, DS_APP_ATTEMPT_END, DS_CONTAINER_START, 
DS_CONTAINER_END
   }
   
   @VisibleForTesting
   @Private
-  public static enum DSEntity {
+  public enum DSEntity {
 DS_APP_ATTEMPT, DS_CONTAINER
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
index 515a8e8..20be71e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
@@ -406,7 +406,7 @@ public class NMClientAsyncImpl 

[34/50] [abbrv] hadoop git commit: HADOOP-14442. Owner support for ranger-wasb integration. Contributed by Varada Hemeswari

2017-05-31 Thread haibochen
HADOOP-14442. Owner support for ranger-wasb integration. Contributed by Varada 
Hemeswari


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89bb8bfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89bb8bfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89bb8bfe

Branch: refs/heads/YARN-1011
Commit: 89bb8bfe582ba85566cede321b233bb642f1c675
Parents: bd6a217
Author: Mingliang Liu 
Authored: Fri May 26 17:52:56 2017 -0700
Committer: Mingliang Liu 
Committed: Fri May 26 17:54:00 2017 -0700

--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  48 +++-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  |  12 +-
 .../fs/azure/WasbAuthorizerInterface.java   |   3 +-
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java |  52 ++--
 .../TestNativeAzureFileSystemAuthorization.java |  90 --
 ...veAzureFileSystemAuthorizationWithOwner.java | 122 +++
 6 files changed, 247 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bb8bfe/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 0ba47ef..b61baab 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -200,7 +200,7 @@ public class NativeAzureFileSystem extends FileSystem {
   JsonNode oldFolderName = json.get("OldFolderName");
   JsonNode newFolderName = json.get("NewFolderName");
   if (oldFolderName == null || newFolderName == null) {
- this.committed = false;
+this.committed = false;
   } else {
 this.srcKey = oldFolderName.textValue();
 this.dstKey = newFolderName.textValue();
@@ -349,7 +349,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
   return contents;
 }
-
+
 /**
  * This is an exact copy of org.codehaus.jettison.json.JSONObject.quote 
  * method.
@@ -639,7 +639,7 @@ public class NativeAzureFileSystem extends FileSystem {
 return "wasb";
   }
 
-  
+
   /**
* 
* A {@link FileSystem} for reading and writing files stored on http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bb8bfe/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
index ea08b2b..3c912d7 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
@@ -87,6 +87,12 @@ public class RemoteWasbAuthorizerImpl implements 
WasbAuthorizerInterface {
   private static final String DELEGATION_TOKEN_QUERY_PARAM_NAME =
   "delegation";
 
+  /**
+   *  Query parameter name for sending owner of the specific resource {@value}
+   */
+  private static final String WASB_RESOURCE_OWNER_QUERY_PARAM_NAME =
+  "wasb_resource_owner";
+
   private WasbRemoteCallHelper remoteCallHelper = null;
   private String delegationToken;
   private boolean isSecurityEnabled;
@@ -119,7 +125,7 @@ public class RemoteWasbAuthorizerImpl implements 
WasbAuthorizerInterface {
   }
 
   @Override
-  public boolean authorize(String wasbAbsolutePath, String accessType)
+  public boolean authorize(String wasbAbsolutePath, String accessType, String 
resourceOwner)
   throws WasbAuthorizationException, IOException {
 
   try {
@@ -140,6 +146,10 @@ public class RemoteWasbAuthorizerImpl implements 
WasbAuthorizerInterface {
   uriBuilder.addParameter(DELEGATION_TOKEN_QUERY_PARAM_NAME,
   delegationToken);
 }
+if (resourceOwner != null && StringUtils.isNotEmpty(resourceOwner)) {
+  uriBuilder.addParameter(WASB_RESOURCE_OWNER_QUERY_PARAM_NAME,
+  resourceOwner);
+}
 
 String responseBody = null;
 UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bb8bfe/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbAuthorizerInterface.java
--
diff --git 
a/hadoop-tools/ha

[14/50] [abbrv] hadoop git commit: HADOOP-14426. Upgrade Kerby version from 1.0.0-RC2 to 1.0.0. Contributed by Jiajia Li.

2017-05-31 Thread haibochen
HADOOP-14426. Upgrade Kerby version from 1.0.0-RC2 to 1.0.0. Contributed by 
Jiajia Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d049bd2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d049bd2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d049bd2a

Branch: refs/heads/YARN-1011
Commit: d049bd2a8654c9e46c9111f9d38794644908ac5a
Parents: b8b69d7
Author: Wei-Chiu Chuang 
Authored: Wed May 24 18:34:48 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed May 24 18:34:48 2017 -0700

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d049bd2a/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 1d97cfc..5e1da34 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1259,7 +1259,7 @@
 
   org.apache.kerby
   kerb-simplekdc
-  1.0.0-RC2
+  1.0.0
 
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: YARN-6141. ppc64le on Linux doesn't trigger __linux get_executable codepath. Contributed by Sonia Garudi and Ayappan.

2017-05-31 Thread haibochen
YARN-6141. ppc64le on Linux doesn't trigger __linux get_executable codepath. 
Contributed by Sonia Garudi and Ayappan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc28da65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc28da65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc28da65

Branch: refs/heads/YARN-1011
Commit: bc28da65fb1c67904aa3cefd7273cb7423521014
Parents: 6a52b5e
Author: Akira Ajisaka 
Authored: Thu May 25 17:06:26 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu May 25 17:06:26 2017 +0900

--
 .../src/main/native/container-executor/impl/get_executable.c   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc28da65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/get_executable.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/get_executable.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/get_executable.c
index 49ae093..ce46b77 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/get_executable.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/get_executable.c
@@ -142,7 +142,7 @@ char* get_executable(char *argv0) {
   return __get_exec_sysctl(mib);
 }
 
-#elif defined(__linux)
+#elif defined(__linux__)
 
 
 char* get_executable(char *argv0) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: Update 3.0.0-alpha3 changes, release notes, jdiff.

2017-05-31 Thread haibochen
Update 3.0.0-alpha3 changes, release notes, jdiff.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cd612ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cd612ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cd612ba

Branch: refs/heads/YARN-1011
Commit: 2cd612ba8e3b84ddf41acf7b1beb0a4757a2465b
Parents: 16ad896
Author: Andrew Wang 
Authored: Fri May 26 14:14:38 2017 -0700
Committer: Andrew Wang 
Committed: Fri May 26 14:14:38 2017 -0700

--
 .../3.0.0-alpha3/CHANGES.3.0.0-alpha3.md|  71 
 .../3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md   |  22 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml   | 326 +++
 3 files changed, 419 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd612ba/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md
new file mode 100644
index 000..61d63e0
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md
@@ -0,0 +1,71 @@
+
+
+# "Apache Hadoop" Changelog
+
+## Release 3.0.0-alpha3 - 2017-05-25
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [YARN-6336](https://issues.apache.org/jira/browse/YARN-6336) | Jenkins 
report YARN new UI build failure |  Blocker | . | Junping Du | Sunil G |
+| [YARN-6278](https://issues.apache.org/jira/browse/YARN-6278) | Enforce to 
use correct node and npm version in new YARN-UI build |  Critical | . | Sunil G 
| Sunil G |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd612ba/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md
new file mode 100644
index 000..bda1807
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md
@@ -0,0 +1,22 @@
+
+
+# "Apache Hadoop"  3.0.0-alpha3 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd612ba/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml
new file mode 100644
index 000..cadf733
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml
@@ -0,0 +1,326 @@
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  

[04/50] [abbrv] hadoop git commit: HDFS-11794. Add ec sub command -listCodec to show currently supported ec codecs. Contributed by SammiChen.

2017-05-31 Thread haibochen
HDFS-11794. Add ec sub command -listCodec to show currently supported ec 
codecs. Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b5451bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b5451bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b5451bf

Branch: refs/heads/YARN-1011
Commit: 1b5451bf054c335188e4cd66f7b4a1d80013e86d
Parents: d0f346a
Author: Rakesh Radhakrishnan 
Authored: Tue May 23 17:03:28 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Tue May 23 17:03:28 2017 +0530

--
 .../hadoop/io/erasurecode/CodecRegistry.java| 17 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  7 +++
 .../hadoop/hdfs/DistributedFileSystem.java  | 13 +
 .../hadoop/hdfs/protocol/ClientProtocol.java| 13 -
 .../ClientNamenodeProtocolTranslatorPB.java | 23 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  8 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |  2 +
 .../src/main/proto/erasurecoding.proto  | 15 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 23 
 .../server/namenode/FSDirErasureCodingOp.java   | 14 +
 .../hdfs/server/namenode/FSNamesystem.java  | 14 +
 .../hdfs/server/namenode/NameNodeRpcServer.java |  7 +++
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 59 +++-
 .../src/site/markdown/HDFSErasureCoding.md  |  7 ++-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 17 ++
 .../test/resources/testErasureCodingConf.xml| 29 ++
 16 files changed, 264 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5451bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
index 4cb051d..d53d598 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
@@ -55,9 +55,14 @@ public final class CodecRegistry {
 
   private Map coderNameMap;
 
+  // Protobuffer 2.5.0 doesn't support map type well, so use
+  // the compact value instead
+  private HashMap coderNameCompactMap;
+
   private CodecRegistry() {
 coderMap = new HashMap<>();
 coderNameMap = new HashMap<>();
+coderNameCompactMap = new HashMap<>();
 final ServiceLoader coderFactories =
 ServiceLoader.load(RawErasureCoderFactory.class);
 updateCoders(coderFactories);
@@ -113,6 +118,9 @@ public final class CodecRegistry {
   coderNameMap.put(codecName, coders.stream().
   map(RawErasureCoderFactory::getCoderName).
   collect(Collectors.toList()).toArray(new String[0]));
+  coderNameCompactMap.put(codecName, coders.stream().
+  map(RawErasureCoderFactory::getCoderName)
+  .collect(Collectors.joining(", ")));
 }
   }
 
@@ -173,4 +181,13 @@ public final class CodecRegistry {
 throw new IllegalArgumentException("No implementation for coder "
 + coderName + " of codec " + codecName);
   }
+
+  /**
+   * Get all codec names and their corresponding coder list.
+   * @return a map of all codec names, and their corresponding code list
+   * separated by ','.
+   */
+  public HashMap getCodec2CoderCompactMap() {
+return coderNameCompactMap;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5451bf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7de8b71..2e7dc1e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2763,6 +2763,13 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  public HashMap getErasureCodingCodecs() throws IOException {
+checkOpen();
+try (TraceScope ignored = tracer.newScope("getErasureCodingCodecs")) {
+  return namenode.getErasureCodingCodecs();
+}
+  }
+
   public AddingECPolicyResponse[] addErasureCodingPolicies(
   ErasureCodingPolicy[] policies) throws IOException {
 checkOpen();

http://git-wip-us.apache.org/repos/asf/h

[15/50] [abbrv] hadoop git commit: HADOOP-14180. FileSystem contract tests to replace JUnit 3 with 4. Contributed by Xiaobing Zhou and Mingliang Liu.

2017-05-31 Thread haibochen
HADOOP-14180. FileSystem contract tests to replace JUnit 3 with 4. Contributed 
by Xiaobing Zhou and Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a52b5e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a52b5e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a52b5e1

Branch: refs/heads/YARN-1011
Commit: 6a52b5e14495c5b2e0257aec65e61acd43aef309
Parents: d049bd2
Author: Akira Ajisaka 
Authored: Thu May 25 15:36:14 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu May 25 15:36:44 2017 +0900

--
 .../hadoop/fs/FileSystemContractBaseTest.java   | 142 ---
 .../hadoop/hdfs/TestHDFSFileSystemContract.java |  12 +-
 .../hdfs/web/TestWebHdfsFileSystemContract.java |  18 ++-
 .../oss/TestAliyunOSSFileSystemContract.java| 130 -
 .../fs/s3a/ITestS3AFileSystemContract.java  |  21 +--
 .../NativeS3FileSystemContractBaseTest.java |  26 +++-
 .../adl/live/TestAdlFileSystemContractLive.java |  25 ++--
 ...stNativeAzureFileSystemContractEmulator.java |  20 ++-
 .../TestNativeAzureFileSystemContractLive.java  |  32 +++--
 ...TestNativeAzureFileSystemContractMocked.java |  11 +-
 ...tiveAzureFileSystemContractPageBlobLive.java |  25 ++--
 .../fs/swift/TestSwiftFileSystemContract.java   |  16 ++-
 12 files changed, 280 insertions(+), 198 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a52b5e1/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 040e9c8..92e2135 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -22,8 +22,6 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 
-import junit.framework.TestCase;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -32,6 +30,15 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.StringUtils;
 
+import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
+
+import org.junit.After;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.rules.Timeout;
+
 /**
  * 
  * A collection of tests for the contract of the {@link FileSystem}.
@@ -41,11 +48,11 @@ import org.apache.hadoop.util.StringUtils;
  * 
  * 
  * To test a given {@link FileSystem} implementation create a subclass of this
- * test and override {@link #setUp()} to initialize the fs 
+ * test and add a @Before method to initialize the fs
  * {@link FileSystem} instance variable.
  * 
  */
-public abstract class FileSystemContractBaseTest extends TestCase {
+public abstract class FileSystemContractBaseTest {
   private static final Logger LOG =
   LoggerFactory.getLogger(FileSystemContractBaseTest.class);
 
@@ -53,8 +60,13 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
   protected FileSystem fs;
   protected byte[] data = dataset(getBlockSize() * 2, 0, 255);
 
-  @Override
-  protected void tearDown() throws Exception {
+  @Rule
+  public Timeout globalTimeout = new Timeout(3);
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  @After
+  public void tearDown() throws Exception {
 if (fs != null) {
   // some cases use this absolute path
   if (rootDirTestEnabled()) {
@@ -63,7 +75,6 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
   // others use this relative path against test base directory
   cleanupDir(getTestBaseDir());
 }
-super.tearDown();
   }
 
   private void cleanupDir(Path p) {
@@ -131,6 +142,7 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 return true;
   }
 
+  @Test
   public void testFsStatus() throws Exception {
 FsStatus fsStatus = fs.getStatus();
 assertNotNull(fsStatus);
@@ -140,6 +152,7 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 assertTrue(fsStatus.getCapacity() >= 0);
   }
   
+  @Test
   public void testWorkingDirectory() throws Exception {
 
 Path workDir = path(getDefaultWorkingDirectory());
@@ -160,7 +173,8 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 assertEquals(absoluteDir, fs.getWorkingDirectory());
 
   }
-  
+
+  @Test
   public 

[37/50] [abbrv] hadoop git commit: HDFS-11832. Switch leftover logs to slf4j format in BlockManager.java. Contributed by Hui Xu and Chen Liang.

2017-05-31 Thread haibochen
HDFS-11832. Switch leftover logs to slf4j format in BlockManager.java. 
Contributed by Hui Xu and Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7f085d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7f085d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7f085d6

Branch: refs/heads/YARN-1011
Commit: a7f085d6bf499edf23e650a4f7211c53a442da0e
Parents: 6c6a7a5
Author: Akira Ajisaka 
Authored: Mon May 29 17:30:23 2017 +0900
Committer: Akira Ajisaka 
Committed: Mon May 29 17:30:23 2017 +0900

--
 .../server/blockmanagement/BlockManager.java| 153 ---
 .../blockmanagement/InvalidateBlocks.java   |  17 +--
 2 files changed, 72 insertions(+), 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f085d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f0c12cd..42ee850 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -498,14 +498,13 @@ public class BlockManager implements BlockStatsMXBean {
 
 bmSafeMode = new BlockManagerSafeMode(this, namesystem, haEnabled, conf);
 
-LOG.info("defaultReplication = " + defaultReplication);
-LOG.info("maxReplication = " + maxReplication);
-LOG.info("minReplication = " + minReplication);
-LOG.info("maxReplicationStreams  = " + maxReplicationStreams);
-LOG.info("redundancyRecheckInterval  = " + redundancyRecheckIntervalMs +
-"ms");
-LOG.info("encryptDataTransfer= " + encryptDataTransfer);
-LOG.info("maxNumBlocksToLog  = " + maxNumBlocksToLog);
+LOG.info("defaultReplication = {}", defaultReplication);
+LOG.info("maxReplication = {}", maxReplication);
+LOG.info("minReplication = {}", minReplication);
+LOG.info("maxReplicationStreams  = {}", maxReplicationStreams);
+LOG.info("redundancyRecheckInterval  = {}ms", redundancyRecheckIntervalMs);
+LOG.info("encryptDataTransfer= {}", encryptDataTransfer);
+LOG.info("maxNumBlocksToLog  = {}", maxNumBlocksToLog);
   }
 
   private static BlockTokenSecretManager createBlockTokenSecretManager(
@@ -513,7 +512,8 @@ public class BlockManager implements BlockStatsMXBean {
 final boolean isEnabled = conf.getBoolean(
 DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
 DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
-LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + 
isEnabled);
+LOG.info("{} = {}", DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,
+isEnabled);
 
 if (!isEnabled) {
   if (UserGroupInformation.isSecurityEnabled()) {
@@ -534,12 +534,10 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
 final String encryptionAlgorithm = conf.get(
 DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
-LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
-+ "=" + updateMin + " min(s), "
-+ DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
-+ "=" + lifetimeMin + " min(s), "
-+ DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
-+ "=" + encryptionAlgorithm);
+LOG.info("{}={} min(s), {}={} min(s), {}={}",
+DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, updateMin,
+DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, lifetimeMin,
+DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY, encryptionAlgorithm);
 
 String nsId = DFSUtil.getNamenodeNameServiceId(conf);
 boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);
@@ -692,8 +690,8 @@ public class BlockManager implements BlockStatsMXBean {
   Collection corruptNodes =
   corruptReplicas.getNodes(block);
   if (corruptNodes == null) {
-LOG.warn(block.getBlockId() +
-" is corrupt but has no associated node.");
+LOG.warn("{} is corrupt but has no associated node.",
+ block.getBlockId());
 continue;
   }
   int numNodesToFind = corruptNodes.size();
@@ -1156,9 +1154,9 @@ public class BlockManager implements BlockStatsMXBean {
 final int numCorruptNodes = numReplicas.corruptReplicas();
  

[08/50] [abbrv] hadoop git commit: HADOOP-14449. The ASF Header in ComparableVersion.java and SSLHostnameVerifier.java is not correct. Contributed by ZhangBing Lin.

2017-05-31 Thread haibochen
HADOOP-14449. The ASF Header in ComparableVersion.java and 
SSLHostnameVerifier.java is not correct. Contributed by ZhangBing Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7a0c0e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7a0c0e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7a0c0e7

Branch: refs/heads/YARN-1011
Commit: b7a0c0e7019528fac492e1cb32d374ed014f8673
Parents: 8bf1949
Author: Brahma Reddy Battula 
Authored: Thu May 25 01:03:22 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Thu May 25 01:03:22 2017 +0800

--
 .../security/ssl/SSLHostnameVerifier.java   | 11 
 .../apache/hadoop/util/ComparableVersion.java   | 29 ++--
 2 files changed, 15 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7a0c0e7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
index 27e4920..47546b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
@@ -1,9 +1,4 @@
 /*
- * $HeadURL$
- * $Revision$
- * $Date$
- *
- * 
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,12 +15,6 @@
  * KIND, either express or implied.  See the License for the
  * specific language governing permissions and limitations
  * under the License.
- * 
- *
- * This software consists of voluntary contributions made by many
- * individuals on behalf of the Apache Software Foundation.  For more
- * information on the Apache Software Foundation, please see
- * .
  *
  */
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7a0c0e7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
index 9d34518..1f34291 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
@@ -1,17 +1,3 @@
-// Code source of this file: 
-//   http://grepcode.com/file/repo1.maven.org/maven2/
-// org.apache.maven/maven-artifact/3.1.1/
-//   org/apache/maven/artifact/versioning/ComparableVersion.java/
-//
-// Modifications made on top of the source:
-//   1. Changed
-//package org.apache.maven.artifact.versioning;
-//  to
-//package org.apache.hadoop.util;
-//   2. Removed author tags to clear hadoop author tag warning
-//
-package org.apache.hadoop.util;
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -31,6 +17,21 @@ package org.apache.hadoop.util;
  * under the License.
  */
 
+// Code source of this file:
+//   http://grepcode.com/file/repo1.maven.org/maven2/
+// org.apache.maven/maven-artifact/3.1.1/
+//   org/apache/maven/artifact/versioning/ComparableVersion.java/
+//
+// Modifications made on top of the source:
+//   1. Changed
+//package org.apache.maven.artifact.versioning;
+//  to
+//package org.apache.hadoop.util;
+//   2. Removed author tags to clear hadoop author tag warning
+//
+package org.apache.hadoop.util;
+
+
 import java.math.BigInteger;
 import java.util.ArrayList;
 import java.util.Arrays;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: YARN-6635. Refactor yarn-app pages in new YARN UI. Contributed by Akhil PB.

2017-05-31 Thread haibochen
YARN-6635. Refactor yarn-app pages in new YARN UI. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af03c333
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af03c333
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af03c333

Branch: refs/heads/YARN-1011
Commit: af03c3334610bc4d8788e7c7b21d5aa6b946fe26
Parents: 07e60f8
Author: Sunil G 
Authored: Tue May 30 13:52:40 2017 +0530
Committer: Sunil G 
Committed: Tue May 30 13:52:40 2017 +0530

--
 .../webapp/app/controllers/app-table-columns.js |   4 +-
 .../webapp/app/controllers/yarn-app-attempt.js  |   8 +-
 .../webapp/app/controllers/yarn-app-attempts.js |  57 --
 .../src/main/webapp/app/controllers/yarn-app.js |  56 +++---
 .../webapp/app/controllers/yarn-app/attempts.js |  24 +++
 .../webapp/app/controllers/yarn-app/charts.js   |  28 +++
 .../webapp/app/controllers/yarn-app/info.js |  32 
 .../app/controllers/yarn-apps/services.js   |  31 
 .../webapp/app/controllers/yarn-flowrun/info.js |   2 +-
 .../src/main/webapp/app/router.js   |   8 +-
 .../main/webapp/app/routes/yarn-app-attempts.js |  35 
 .../src/main/webapp/app/routes/yarn-app.js  |  35 +---
 .../main/webapp/app/routes/yarn-app/attempts.js |  37 
 .../main/webapp/app/routes/yarn-app/charts.js   |  53 ++
 .../src/main/webapp/app/routes/yarn-app/info.js |  37 
 .../webapp/app/routes/yarn-apps/services.js |  33 
 .../main/webapp/app/templates/application.hbs   |   2 +-
 .../app/templates/components/app-table.hbs  |   6 +-
 .../webapp/app/templates/yarn-app-attempts.hbs  |  58 --
 .../src/main/webapp/app/templates/yarn-app.hbs  | 185 +--
 .../webapp/app/templates/yarn-app/attempts.hbs  |  29 +++
 .../webapp/app/templates/yarn-app/charts.hbs|  43 +
 .../main/webapp/app/templates/yarn-app/info.hbs | 167 +
 .../webapp/app/templates/yarn-app/loading.hbs   |  23 +++
 .../src/main/webapp/app/templates/yarn-apps.hbs |   5 +-
 .../webapp/app/templates/yarn-apps/services.hbs |  25 ---
 .../main/webapp/app/templates/yarn-services.hbs |   3 +-
 .../unit/controllers/yarn-app-attempts-test.js  |  30 ---
 .../unit/controllers/yarn-app/attempts-test.js  |  30 +++
 .../unit/controllers/yarn-app/charts-test.js|  30 +++
 .../unit/controllers/yarn-app/info-test.js  |  30 +++
 .../unit/controllers/yarn-apps/services-test.js |  30 ---
 .../tests/unit/routes/yarn-app-attempts-test.js |  29 ---
 .../tests/unit/routes/yarn-app/attempts-test.js |  29 +++
 .../tests/unit/routes/yarn-app/charts-test.js   |  29 +++
 .../tests/unit/routes/yarn-app/info-test.js |  29 +++
 36 files changed, 714 insertions(+), 578 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
index 704abfb..8a34f1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
@@ -38,7 +38,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.id,
-  href: `#/yarn-app/${row.id}`
+  href: `#/yarn-app/${row.id}/info`
 };
   }
   }, {
@@ -112,7 +112,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.get('appName'),
-  href: `#/yarn-app/${row.id}?service=${row.get('appName')}`
+  href: `#/yarn-app/${row.id}/info?service=${row.get('appName')}`
 };
   }
 }, {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
index fbe6fa9..1121a84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
@@ -34,10 +34,10 @@ export default Ember.Controller.extend({
 

[43/50] [abbrv] hadoop git commit: YARN-6477. Dispatcher no longer needs the raw types suppression. (Maya Wexler via Haibo Chen)

2017-05-31 Thread haibochen
YARN-6477. Dispatcher no longer needs the raw types suppression. (Maya Wexler 
via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b4a6524
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b4a6524
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b4a6524

Branch: refs/heads/YARN-1011
Commit: 4b4a6524f2df3a891e9d5486ec39f7987766d84f
Parents: 91d6fe1
Author: Haibo Chen 
Authored: Tue May 30 16:58:15 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 30 16:58:15 2017 -0700

--
 .../src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4a6524/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
index 0858a0b..ce5a513 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
@@ -26,7 +26,6 @@ import 
org.apache.hadoop.classification.InterfaceStability.Evolving;
  * event handlers based on event types.
  * 
  */
-@SuppressWarnings("rawtypes")
 @Public
 @Evolving
 public interface Dispatcher {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: MAPREDUCE-6887. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.

2017-05-31 Thread haibochen
MAPREDUCE-6887. Modifier 'static' is redundant for inner enums. Contributed by 
ZhangBing Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4015f86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4015f86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4015f86

Branch: refs/heads/YARN-1011
Commit: d4015f8628dd973c7433639451a9acc3e741d2a2
Parents: a7f085d
Author: Akira Ajisaka 
Authored: Tue May 30 14:48:58 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue May 30 14:48:58 2017 +0900

--
 .../hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java  | 2 +-
 .../main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java| 4 ++--
 .../src/main/java/org/apache/hadoop/mapred/FileInputFormat.java  | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/JobClient.java| 2 +-
 .../src/main/java/org/apache/hadoop/mapred/JobInProgress.java| 2 +-
 .../src/main/java/org/apache/hadoop/mapred/JobTracker.java   | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/Task.java | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/TaskLog.java  | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/TaskStatus.java   | 4 ++--
 .../main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java | 2 +-
 .../src/main/java/org/apache/hadoop/mapreduce/Cluster.java   | 2 +-
 .../src/main/java/org/apache/hadoop/mapreduce/Job.java   | 4 ++--
 .../src/main/java/org/apache/hadoop/mapreduce/JobStatus.java | 2 +-
 .../org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java   | 2 +-
 .../apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java| 2 +-
 .../org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java   | 2 +-
 .../org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java | 2 +-
 .../java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java| 2 +-
 .../java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java| 2 +-
 .../java/org/apache/hadoop/mapreduce/v2/hs/HSAuditLogger.java| 2 +-
 .../org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java| 2 +-
 .../src/test/java/org/apache/hadoop/RandomTextWriterJob.java | 2 +-
 .../src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java| 2 +-
 .../src/test/java/org/apache/hadoop/fs/TestDFSIO.java| 4 ++--
 .../test/java/org/apache/hadoop/fs/slive/OperationOutput.java| 2 +-
 .../src/test/java/org/apache/hadoop/fs/slive/PathFinder.java | 2 +-
 .../java/org/apache/hadoop/mapred/GenericMRLoadGenerator.java| 2 +-
 .../src/test/java/org/apache/hadoop/mapred/MRBench.java  | 2 +-
 .../test/java/org/apache/hadoop/mapred/ThreadedMapBenchmark.java | 2 +-
 .../java/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java | 2 +-
 .../src/test/java/org/apache/hadoop/mapreduce/LargeSorter.java   | 2 +-
 .../test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java  | 2 +-
 .../src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java  | 2 +-
 .../org/apache/hadoop/mapreduce/TimelineServicePerformance.java  | 2 +-
 .../main/java/org/apache/hadoop/examples/RandomTextWriter.java   | 2 +-
 .../src/main/java/org/apache/hadoop/examples/RandomWriter.java   | 2 +-
 .../main/java/org/apache/hadoop/examples/terasort/TeraGen.java   | 2 +-
 38 files changed, 42 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
index 58fd7b5..cb4f0c9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
@@ -102,7 +102,7 @@ public class ContainerLauncherImpl extends AbstractService 
implements
 }
   }
   
-  private static enum ContainerState {
+  private enum ContainerState {
 PREP, FAILED, RUNNING, DONE, KILLED_BEFORE_LAUNCH
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/u

[22/50] [abbrv] hadoop git commit: HDFS-11856. Ability to re-add Upgrading Nodes to pipeline for future pipeline updates. Contributed by Vinayakumar B.

2017-05-31 Thread haibochen
HDFS-11856. Ability to re-add Upgrading Nodes to pipeline for future pipeline 
updates. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29b7df96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29b7df96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29b7df96

Branch: refs/heads/YARN-1011
Commit: 29b7df960fc3d0a7d1416225c3106c7d4222f0ca
Parents: 4fb41b3
Author: Kihwal Lee 
Authored: Thu May 25 13:04:09 2017 -0500
Committer: Kihwal Lee 
Committed: Thu May 25 13:05:23 2017 -0500

--
 .../hadoop/hdfs/DFSClientFaultInjector.java |  4 +
 .../org/apache/hadoop/hdfs/DataStreamer.java| 70 +++
 .../hdfs/server/datanode/BlockReceiver.java |  6 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |  2 +-
 .../impl/FsDatasetAsyncDiskService.java | 14 ++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 85 --
 .../TestClientProtocolForPipelineRecovery.java  | 92 
 .../server/datanode/SimulatedFSDataset.java |  6 +-
 .../server/datanode/TestSimulatedFSDataset.java |  2 +-
 .../extdataset/ExternalDatasetImpl.java |  3 +-
 .../fsdataset/impl/TestWriteToReplica.java  | 20 +++--
 11 files changed, 241 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29b7df96/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
index 4eb4c52..748edcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
@@ -57,4 +57,8 @@ public class DFSClientFaultInjector {
   public void fetchFromDatanodeException() {}
 
   public void readFromDatanodeDelay() {}
+
+  public boolean skipRollingRestartWait() {
+return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29b7df96/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 49c17b9..f5ce0ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -327,6 +327,7 @@ class DataStreamer extends Daemon {
   static class ErrorState {
 ErrorType error = ErrorType.NONE;
 private int badNodeIndex = -1;
+private boolean waitForRestart = true;
 private int restartingNodeIndex = -1;
 private long restartingNodeDeadline = 0;
 private final long datanodeRestartTimeout;
@@ -342,6 +343,7 @@ class DataStreamer extends Daemon {
   badNodeIndex = -1;
   restartingNodeIndex = -1;
   restartingNodeDeadline = 0;
+  waitForRestart = true;
 }
 
 synchronized void reset() {
@@ -349,6 +351,7 @@ class DataStreamer extends Daemon {
   badNodeIndex = -1;
   restartingNodeIndex = -1;
   restartingNodeDeadline = 0;
+  waitForRestart = true;
 }
 
 synchronized boolean hasInternalError() {
@@ -389,14 +392,19 @@ class DataStreamer extends Daemon {
   return restartingNodeIndex;
 }
 
-synchronized void initRestartingNode(int i, String message) {
+synchronized void initRestartingNode(int i, String message,
+boolean shouldWait) {
   restartingNodeIndex = i;
-  restartingNodeDeadline =  Time.monotonicNow() + datanodeRestartTimeout;
-  // If the data streamer has already set the primary node
-  // bad, clear it. It is likely that the write failed due to
-  // the DN shutdown. Even if it was a real failure, the pipeline
-  // recovery will take care of it.
-  badNodeIndex = -1;
+  if (shouldWait) {
+restartingNodeDeadline = Time.monotonicNow() + datanodeRestartTimeout;
+// If the data streamer has already set the primary node
+// bad, clear it. It is likely that the write failed due to
+// the DN shutdown. Even if it was a real failure, the pipeline
+// recovery will take care of it.
+badNodeIndex = -1;
+  } else {
+this.waitForRestart = false;
+  }
   LOG.info(message);

[18/50] [abbrv] hadoop git commit: HADOOP-14399. Configuration does not correctly XInclude absolute file URIs. Contributed by Jonathan Eagles

2017-05-31 Thread haibochen
HADOOP-14399. Configuration does not correctly XInclude absolute file URIs.
Contributed by Jonathan Eagles


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ba9704e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ba9704e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ba9704e

Branch: refs/heads/YARN-1011
Commit: 1ba9704eec22c75f8aec653ee15eb6767b5a7f4b
Parents: 1a56a3d
Author: Steve Loughran 
Authored: Thu May 25 14:59:33 2017 +0100
Committer: Steve Loughran 
Committed: Thu May 25 14:59:33 2017 +0100

--
 .../org/apache/hadoop/conf/Configuration.java   | 37 
 .../apache/hadoop/conf/TestConfiguration.java   | 23 +---
 2 files changed, 42 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba9704e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 2ac52cb..1a6679b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2714,6 +2714,7 @@ public class Configuration implements 
Iterable>,
   StringBuilder token = new StringBuilder();
   String confName = null;
   String confValue = null;
+  String confInclude = null;
   boolean confFinal = false;
   boolean fallbackAllowed = false;
   boolean fallbackEntered = false;
@@ -2757,7 +2758,7 @@ public class Configuration implements 
Iterable>,
 break;
   case "include":
 // Determine href for xi:include
-String confInclude = null;
+confInclude = null;
 attrCount = reader.getAttributeCount();
 for (int i = 0; i < attrCount; i++) {
   String attrName = reader.getAttributeLocalName(i);
@@ -2776,18 +2777,25 @@ public class Configuration implements 
Iterable>,
   Resource classpathResource = new Resource(include, name);
   loadResource(properties, classpathResource, quiet);
 } else {
-  File href = new File(confInclude);
-  if (!href.isAbsolute()) {
-// Included resources are relative to the current resource
-File baseFile = new File(name).getParentFile();
-href = new File(baseFile, href.getPath());
+  URL url;
+  try {
+url = new URL(confInclude);
+url.openConnection().connect();
+  } catch (IOException ioe) {
+File href = new File(confInclude);
+if (!href.isAbsolute()) {
+  // Included resources are relative to the current resource
+  File baseFile = new File(name).getParentFile();
+  href = new File(baseFile, href.getPath());
+}
+if (!href.exists()) {
+  // Resource errors are non-fatal iff there is 1 xi:fallback
+  fallbackAllowed = true;
+  break;
+}
+url = href.toURI().toURL();
   }
-  if (!href.exists()) {
-// Resource errors are non-fatal iff there is 1 xi:fallback
-fallbackAllowed = true;
-break;
-  }
-  Resource uriResource = new Resource(href.toURI().toURL(), name);
+  Resource uriResource = new Resource(url, name);
   loadResource(properties, uriResource, quiet);
 }
 break;
@@ -2828,8 +2836,9 @@ public class Configuration implements 
Iterable>,
 break;
   case "include":
 if (fallbackAllowed && !fallbackEntered) {
-  throw new IOException("Fetch fail on include with no "
-  + "fallback while loading '" + name + "'");
+  throw new IOException("Fetch fail on include for '"
+  + confInclude + "' with no fallback while loading '"
+  + name + "'");
 }
 fallbackAllowed = false;
 fallbackEntered = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba9704e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-pro

[01/50] [abbrv] hadoop git commit: YARN-6493. Print requested node partition in assignContainer logs. (Jonathan Hung via wangda) [Forced Update!]

2017-05-31 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 6b65947e4 -> 153498bc3 (forced update)


YARN-6493. Print requested node partition in assignContainer logs. (Jonathan 
Hung via wangda)

Change-Id: Ib20326daac10acad363befb0ae0998441fa5b32d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e0f83e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e0f83e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e0f83e4

Branch: refs/heads/YARN-1011
Commit: 8e0f83e49a8987cf45a72c8a9bb8587b86e4c0ed
Parents: c583ab0
Author: Wangda Tan 
Authored: Mon May 22 14:28:55 2017 -0700
Committer: Wangda Tan 
Committed: Mon May 22 14:28:55 2017 -0700

--
 .../capacity/allocator/AbstractContainerAllocator.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e0f83e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
index a411f17..5809d86 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
@@ -126,7 +126,9 @@ public abstract class AbstractContainerAllocator {
 LOG.info("assignedContainer" + " application attempt=" + application
 .getApplicationAttemptId() + " container=" + updatedContainer
 .getContainerId() + " queue=" + this + " clusterResource="
-+ clusterResource + " type=" + assignment.getType());
++ clusterResource + " type=" + assignment.getType()
++ " requestedPartition="
++ updatedContainer.getNodeLabelExpression());
 
 assignment.getAssignmentInformation().addAllocationDetails(
 updatedContainer, application.getCSLeafQueue().getQueuePath());
@@ -183,4 +185,4 @@ public abstract class AbstractContainerAllocator {
   public abstract CSAssignment assignContainers(Resource clusterResource,
   PlacementSet ps, SchedulingMode schedulingMode,
   ResourceLimits resourceLimits, RMContainer reservedContainer);
-}
\ No newline at end of file
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HDFS-11891. DU#refresh should print the path of the directory when an exception is caught. Contributed by Chen Liang.

2017-05-31 Thread haibochen
HDFS-11891. DU#refresh should print the path of the directory when an exception 
is caught. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd6a2172
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd6a2172
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd6a2172

Branch: refs/heads/YARN-1011
Commit: bd6a2172e0442e5f02bad9bc5f0568045f57bd32
Parents: 2cd612b
Author: Arpit Agarwal 
Authored: Fri May 26 16:02:40 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri May 26 16:02:40 2017 -0700

--
 .../hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd6a2172/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
index b64a19d..6e374c9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
@@ -52,7 +52,8 @@ public class DU extends CachingGetSpaceUsed {
 try {
   duShell.startRefresh();
 } catch (IOException ioe) {
-  LOG.warn("Could not get disk usage information", ioe);
+  LOG.warn("Could not get disk usage information for path {}",
+  getDirPath(), ioe);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HDFS-11445. FSCK shows overall health stauts as corrupt even one replica is corrupt. Contributed by Brahma Reddy Battula.

2017-05-31 Thread haibochen
HDFS-11445. FSCK shows overall health stauts as corrupt even one replica is 
corrupt. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e41f880
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e41f880
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e41f880

Branch: refs/heads/YARN-1011
Commit: 2e41f8803dd46d1bab16c1b206c71be72ea260a1
Parents: 8bf0e2d
Author: Brahma Reddy Battula 
Authored: Thu May 25 22:35:10 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Thu May 25 22:35:10 2017 +0800

--
 .../hdfs/server/blockmanagement/BlockInfo.java  | 18 +--
 .../server/blockmanagement/BlockManager.java| 26 ++--
 .../hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 32 
 4 files changed, 63 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e41f880/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index df9cdc3..e9d235c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.LightWeightGSet;
 
 import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
@@ -286,28 +285,25 @@ public abstract class BlockInfo extends Block
* Process the recorded replicas. When about to commit or finish the
* pipeline recovery sort out bad replicas.
* @param genStamp  The final generation stamp for the block.
+   * @return staleReplica's List.
*/
-  public void setGenerationStampAndVerifyReplicas(long genStamp) {
+  public List setGenerationStampAndVerifyReplicas(
+  long genStamp) {
 Preconditions.checkState(uc != null && !isComplete());
 // Set the generation stamp for the block.
 setGenerationStamp(genStamp);
 
-// Remove the replicas with wrong gen stamp
-List staleReplicas = 
uc.getStaleReplicas(genStamp);
-for (ReplicaUnderConstruction r : staleReplicas) {
-  r.getExpectedStorageLocation().removeBlock(this);
-  NameNode.blockStateChangeLog.debug("BLOCK* Removing stale replica {}"
-  + " of {}", r, Block.toString(r));
-}
+return uc.getStaleReplicas(genStamp);
   }
 
   /**
* Commit block's length and generation stamp as reported by the client.
* Set block state to {@link BlockUCState#COMMITTED}.
* @param block - contains client reported block length and generation
+   * @return staleReplica's List.
* @throws IOException if block ids are inconsistent.
*/
-  void commitBlock(Block block) throws IOException {
+  List commitBlock(Block block) throws IOException {
 if (getBlockId() != block.getBlockId()) {
   throw new IOException("Trying to commit inconsistent block: id = "
   + block.getBlockId() + ", expected id = " + getBlockId());
@@ -316,6 +312,6 @@ public abstract class BlockInfo extends Block
 uc.commit();
 this.setNumBytes(block.getNumBytes());
 // Sort out invalid replicas.
-setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
+return setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e41f880/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a9592bf..f0c12cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -848,7 +848,7 @@ public class BlockManager implements BlockStatsMXBean {
  

hadoop git commit: HDFS-11792. [READ] Test cases for ProvidedVolumeDF and ProviderBlockIteratorImpl

2017-05-31 Thread virajith
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-9806 1fd812443 -> fc467d6bc


HDFS-11792. [READ] Test cases for ProvidedVolumeDF and ProviderBlockIteratorImpl


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc467d6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc467d6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc467d6b

Branch: refs/heads/HDFS-9806
Commit: fc467d6bc1c0a8088b4c46114bd5201388c416dd
Parents: 1fd8124
Author: Virajith Jalaparti 
Authored: Wed May 31 15:17:12 2017 -0700
Committer: Virajith Jalaparti 
Committed: Wed May 31 15:17:12 2017 -0700

--
 .../fsdataset/impl/ProvidedVolumeImpl.java  |  6 +-
 .../fsdataset/impl/TestProvidedImpl.java| 94 ++--
 2 files changed, 92 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc467d6b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
index a48e117..421b9cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
@@ -191,7 +191,11 @@ public class ProvidedVolumeImpl extends FsVolumeImpl {
 
   @Override
   long getBlockPoolUsed(String bpid) throws IOException {
-return df.getBlockPoolUsed(bpid);
+if (bpSlices.containsKey(bpid)) {
+  return df.getBlockPoolUsed(bpid);
+} else {
+  throw new IOException("block pool " + bpid + " is not found");
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc467d6b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
index 2c119fe..4753235 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
@@ -83,6 +83,7 @@ public class TestProvidedImpl {
   private static final String BASE_DIR =
   new FileSystemTestHelper().getTestRootDir();
   private static final int NUM_LOCAL_INIT_VOLUMES = 1;
+  //only support one provided volume for now.
   private static final int NUM_PROVIDED_INIT_VOLUMES = 1;
   private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"};
   private static final int NUM_PROVIDED_BLKS = 10;
@@ -208,6 +209,39 @@ public class TestProvidedImpl {
 }
   }
 
+  public static class TestProvidedVolumeDF
+  implements ProvidedVolumeDF, Configurable {
+
+@Override
+public void setConf(Configuration conf) {
+}
+
+@Override
+public Configuration getConf() {
+  return null;
+}
+
+@Override
+public long getCapacity() {
+  return Long.MAX_VALUE;
+}
+
+@Override
+public long getSpaceUsed() {
+  return -1;
+}
+
+@Override
+public long getBlockPoolUsed(String bpid) {
+  return -1;
+}
+
+@Override
+public long getAvailable() {
+  return Long.MAX_VALUE;
+}
+  }
+
   private static Storage.StorageDirectory createLocalStorageDirectory(
   File root, Configuration conf)
   throws SecurityException, IOException {
@@ -299,8 +333,8 @@ public class TestProvidedImpl {
   public void setUp() throws IOException {
 datanode = mock(DataNode.class);
 storage = mock(DataStorage.class);
-this.conf = new Configuration();
-this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
+conf = new Configuration();
+conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
 
 when(datanode.getConf()).thenReturn(conf);
 final DNConf dnConf = new DNConf(datanode);
@@ -312,8 +346,10 @@ public class TestProvidedImpl {
 new ShortCircuitRegistry(conf);
 when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
 
-this.conf.setClass(DFSConfigKeys.DFS_PROVIDER_CLASS,
+conf.setClass(DFSConfigKeys.DFS_PROVIDER_CLASS,
   

hadoop git commit: HDFS-11903. Ozone: Cleaning up local storage when closing MiniOzoneCluster. Contributed by Mingliang Liu

2017-05-31 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 9cb63fdba -> 9e2873db2


HDFS-11903. Ozone: Cleaning up local storage when closing MiniOzoneCluster. 
Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e2873db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e2873db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e2873db

Branch: refs/heads/HDFS-7240
Commit: 9e2873db29f763b46f06a05692679c81c81797d1
Parents: 9cb63fd
Author: Mingliang Liu 
Authored: Wed May 31 15:00:09 2017 -0700
Committer: Mingliang Liu 
Committed: Wed May 31 15:00:09 2017 -0700

--
 .../java/org/apache/hadoop/ozone/MiniOzoneCluster.java| 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e2873db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 5cfcaff..bc1ade9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone;
 
+import java.io.File;
 import java.util.Optional;
 import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
@@ -126,6 +127,15 @@ public final class MiniOzoneCluster extends MiniDFSCluster
   String errorMessage = "Cleaning up metadata directories failed." + e;
   assertFalse(errorMessage, true);
 }
+
+try {
+  final String localStorage =
+  conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+  OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
+  FileUtils.deleteDirectory(new File(localStorage));
+} catch (IOException e) {
+  LOG.error("Cleaning up local storage failed", e);
+}
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11774. Ozone: KSM: add deleteVolume. Contributed by Mukul Kumar Singh.

2017-05-31 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 3983e94f0 -> 9cb63fdba


HDFS-11774. Ozone: KSM: add deleteVolume. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9cb63fdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9cb63fdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9cb63fdb

Branch: refs/heads/HDFS-7240
Commit: 9cb63fdba0f10b3f42afb4a139f14e37664ee618
Parents: 3983e94
Author: Xiaoyu Yao 
Authored: Wed May 31 14:21:17 2017 -0700
Committer: Xiaoyu Yao 
Committed: Wed May 31 14:21:17 2017 -0700

--
 ...ceManagerProtocolClientSideTranslatorPB.java | 17 +-
 .../org/apache/hadoop/ozone/ksm/KSMMetrics.java | 20 +++
 .../hadoop/ozone/ksm/KeySpaceManager.java   |  8 ++-
 .../hadoop/ozone/ksm/MetadataManager.java   |  6 ++
 .../hadoop/ozone/ksm/MetadataManagerImpl.java   | 19 ++
 .../apache/hadoop/ozone/ksm/VolumeManager.java  |  8 +++
 .../hadoop/ozone/ksm/VolumeManagerImpl.java | 51 ++--
 .../ozone/ksm/exceptions/KSMException.java  |  1 +
 ...ceManagerProtocolServerSideTranslatorPB.java | 11 +++-
 .../web/storage/DistributedStorageHandler.java  |  2 +-
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   | 62 
 11 files changed, 195 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cb63fdb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
index 15b633c..edc9101 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
@@ -59,6 +59,10 @@ import org.apache.hadoop.ozone.protocol.proto
 import org.apache.hadoop.ozone.protocol.proto
 .KeySpaceManagerProtocolProtos.SetVolumePropertyResponse;
 import org.apache.hadoop.ozone.protocol.proto
+.KeySpaceManagerProtocolProtos.DeleteVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+.KeySpaceManagerProtocolProtos.DeleteVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
 .KeySpaceManagerProtocolProtos.InfoVolumeRequest;
 import org.apache.hadoop.ozone.protocol.proto
 .KeySpaceManagerProtocolProtos.InfoVolumeResponse;
@@ -233,7 +237,18 @@ public final class 
KeySpaceManagerProtocolClientSideTranslatorPB
*/
   @Override
   public void deleteVolume(String volume) throws IOException {
-
+DeleteVolumeRequest.Builder req = DeleteVolumeRequest.newBuilder();
+req.setVolumeName(volume);
+final DeleteVolumeResponse resp;
+try {
+  resp = rpcProxy.deleteVolume(NULL_RPC_CONTROLLER, req.build());
+} catch (ServiceException e) {
+  throw ProtobufHelper.getRemoteException(e);
+}
+if (resp.getStatus() != Status.OK) {
+  throw new
+  IOException("Delete Volume failed, error:" + resp.getStatus());
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cb63fdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
index 31e4756..88ffb1a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
@@ -32,6 +32,7 @@ public class KSMMetrics {
   private @Metric MutableCounterLong numVolumeModifies;
   private @Metric MutableCounterLong numVolumeInfos;
   private @Metric MutableCounterLong numBucketCreates;
+  private @Metric MutableCounterLong numVolumeDeletes;
   private @Metric MutableCounterLong numBucketInfos;
   private @Metric MutableCounterLong numBucketModifies;
   private @Metric MutableCounterLong numKeyAllocate;
@@ -41,6 +42,7 @@ public class KSMMetrics {
   private @Metric MutableCounterLong numVolumeCreateFails;
   private @Metric MutableCounterLong numVolumeModifyFails;
   private @Metric MutableCounterLong numVolumeInfoFails;
+  private @Metric MutableCounterLong numVolumeDeleteFails;
   pri

hadoop git commit: YARN-3666. Federation Intercepting and propagating AM- home RM communications. (Botong Huang via Subru).

2017-05-31 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2915 ef5ffa9e7 -> a573beca3


YARN-3666. Federation Intercepting and propagating AM- home RM communications. 
(Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a573beca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a573beca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a573beca

Branch: refs/heads/YARN-2915
Commit: a573beca3d579ad7b53a0fabd90e116d5a506ffe
Parents: ef5ffa9
Author: Subru Krishnan 
Authored: Wed May 31 13:21:09 2017 -0700
Committer: Subru Krishnan 
Committed: Wed May 31 13:21:09 2017 -0700

--
 .../dev-support/findbugs-exclude.xml|   7 +
 .../amrmproxy/FederationInterceptor.java| 510 +++
 .../amrmproxy/BaseAMRMProxyTest.java|  24 +-
 .../amrmproxy/TestFederationInterceptor.java| 165 ++
 .../TestableFederationInterceptor.java  | 133 +
 5 files changed, 829 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a573beca/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index ee51094..034f03c 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -594,4 +594,11 @@
 
   
 
+  
+  
+
+
+
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a573beca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
new file mode 100644
index 000..5f82d69
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
@@ -0,0 +1,510 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.amrmproxy;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
+import org.apache.hadoop.yarn.except

[1/2] hadoop git commit: HDFS-11648. Lazy construct the IIP pathname. Contributed by Daryn Sharp.

2017-05-31 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 fd834915f -> 1903665b2


HDFS-11648. Lazy construct the IIP pathname. Contributed by Daryn Sharp.

(cherry picked from commit 8ed230c805625549b1cecc830e909a7027bb4961)
(cherry picked from commit 9dfe0b35158cc0046a82cbfea2218a4ad61f329e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1903665b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1903665b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1903665b

Branch: refs/heads/branch-2.7
Commit: 1903665b232fce562fffd63c2da80f1d7f9c4ff2
Parents: 81712e8
Author: Kihwal Lee 
Authored: Wed Apr 12 13:30:41 2017 -0500
Committer: Zhe Zhang 
Committed: Wed May 31 08:50:26 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/INodesInPath.java   | 6 --
 2 files changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1903665b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c5940cb..e5567e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -99,6 +99,9 @@ Release 2.7.4 - UNRELEASED
 
 HDFS-10619. Cache path in InodesInPath. (daryn via kihwal, backported by 
zhz)
 
+HDFS-11648. Lazy construct the IIP pathname.
+(daryn via kihwal, backported by zhz)
+
   OPTIMIZATIONS
 
 HDFS-10896. Move lock logging logic from FSNamesystem into 
FSNamesystemLock.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1903665b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 0badf6d..90b6dbb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -270,7 +270,7 @@ public class INodesInPath {
   }
 
   private final byte[][] path;
-  private final String pathname;
+  private volatile String pathname;
 
   /**
* Array with the specified number of INodes resolved for a given path.
@@ -293,7 +293,6 @@ public class INodesInPath {
 Preconditions.checkArgument(inodes != null && path != null);
 this.inodes = inodes;
 this.path = path;
-this.pathname = DFSUtil.byteArray2PathString(path);
 this.isSnapshot = isSnapshot;
 this.snapshotId = snapshotId;
   }
@@ -349,6 +348,9 @@ public class INodesInPath {
 
   /** @return the full path in string form */
   public String getPath() {
+if (pathname == null) {
+  pathname = DFSUtil.byteArray2PathString(path);
+}
 return pathname;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HDFS-10619. Cache path in InodesInPath. Contributed by Daryn Sharp.

2017-05-31 Thread zhz
HDFS-10619. Cache path in InodesInPath. Contributed by Daryn Sharp.

(cherry picked from commit 90020624b05230ad4a7fbd666d0177ecb107a4d6)
(cherry picked from commit 3a89a8814f8e443a5f7ceddcecd6d953cfc2c6f3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81712e84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81712e84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81712e84

Branch: refs/heads/branch-2.7
Commit: 81712e84fba77e361b110f14f5388163a59cda25
Parents: fd83491
Author: Kihwal Lee 
Authored: Mon Oct 3 09:26:41 2016 -0500
Committer: Zhe Zhang 
Committed: Wed May 31 08:50:26 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 .../org/apache/hadoop/hdfs/server/namenode/INodesInPath.java| 5 -
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81712e84/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 09ee6cd..c5940cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -97,6 +97,8 @@ Release 2.7.4 - UNRELEASED
 HDFS-9726. Refactor IBR code to a new class. (Tsz-Wo Nicholas Sze
 Backport HDFS-11839 by Vinitha Reddy Gankidi)
 
+HDFS-10619. Cache path in InodesInPath. (daryn via kihwal, backported by 
zhz)
+
   OPTIMIZATIONS
 
 HDFS-10896. Move lock logging logic from FSNamesystem into 
FSNamesystemLock.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81712e84/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 3de8200..0badf6d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -270,6 +270,8 @@ public class INodesInPath {
   }
 
   private final byte[][] path;
+  private final String pathname;
+
   /**
* Array with the specified number of INodes resolved for a given path.
*/
@@ -291,6 +293,7 @@ public class INodesInPath {
 Preconditions.checkArgument(inodes != null && path != null);
 this.inodes = inodes;
 this.path = path;
+this.pathname = DFSUtil.byteArray2PathString(path);
 this.isSnapshot = isSnapshot;
 this.snapshotId = snapshotId;
   }
@@ -346,7 +349,7 @@ public class INodesInPath {
 
   /** @return the full path in string form */
   public String getPath() {
-return DFSUtil.byteArray2PathString(path);
+return pathname;
   }
 
   public String getParentPath() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object decoding ever fails runtime exception. Contributed by Jon Eagles.

2017-05-31 Thread nroberts
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.1 1d597defb -> 63244


YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object 
decoding ever fails runtime exception. Contributed by Jon Eagles.

(cherry picked from commit 4369690ce63566131aee28696bf2683a3cb20205)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6324
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6324
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6324

Branch: refs/heads/branch-2.8.1
Commit: 63244226b790952dee7991f77efdd0d052d4
Parents: 1d597de
Author: Nathan Roberts 
Authored: Tue May 30 16:10:33 2017 -0500
Committer: Nathan Roberts 
Committed: Wed May 31 12:55:34 2017 -0500

--
 .../timeline/RollingLevelDBTimelineStore.java   | 29 
 1 file changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6324/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 20e0379..d139346 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -473,9 +473,16 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 }
   } else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
 if (otherInfo) {
-  entity.addOtherInfo(
-  parseRemainingKey(key, prefixlen + OTHER_INFO_COLUMN.length),
-  fstConf.asObject(iterator.peekNext().getValue()));
+  Object o = null;
+  String keyStr = parseRemainingKey(key,
+  prefixlen + OTHER_INFO_COLUMN.length);
+  try {
+o = fstConf.asObject(iterator.peekNext().getValue());
+entity.addOtherInfo(keyStr, o);
+  } catch (Exception e) {
+LOG.warn("Error while decoding "
++ entityId + ":otherInfo:" + keyStr, e);
+  }
 }
   } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
 if (relatedEntities) {
@@ -1338,7 +1345,12 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   TimelineEvent event = new TimelineEvent();
   event.setTimestamp(ts);
   event.setEventType(tstype);
-  Object o = fstConf.asObject(value);
+  Object o = null;
+  try {
+o = fstConf.asObject(value);
+  } catch (Exception e) {
+LOG.warn("Error while decoding " + tstype, e);
+  }
   if (o == null) {
 event.setEventInfo(null);
   } else if (o instanceof Map) {
@@ -1362,8 +1374,13 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 KeyParser kp = new KeyParser(key, offset);
 String name = kp.getNextString();
 byte[] bytes = kp.getRemainingBytes();
-Object value = fstConf.asObject(bytes);
-entity.addPrimaryFilter(name, value);
+Object value = null;
+try {
+  value = fstConf.asObject(bytes);
+  entity.addPrimaryFilter(name, value);
+} catch (Exception e) {
+  LOG.warn("Error while decoding " + name, e);
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11856. Ability to re-add upgrading nodes to pipeline for future pipeline updates. Contributed by Vinayakumar B.

2017-05-31 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 1f077f45f -> fe6de5e59


HDFS-11856. Ability to re-add upgrading nodes to pipeline for future pipeline 
updates. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe6de5e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe6de5e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe6de5e5

Branch: refs/heads/branch-2.8
Commit: fe6de5e5948c18a15592a6fbd4732aa4ef0e7d94
Parents: 1f077f4
Author: Kihwal Lee 
Authored: Wed May 31 13:04:32 2017 -0500
Committer: Kihwal Lee 
Committed: Wed May 31 13:08:51 2017 -0500

--
 .../hadoop/hdfs/DFSClientFaultInjector.java |  4 +
 .../org/apache/hadoop/hdfs/DataStreamer.java| 69 +++
 .../hdfs/server/datanode/BlockReceiver.java |  4 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |  5 +-
 .../impl/FsDatasetAsyncDiskService.java | 14 ++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 93 +---
 .../TestClientProtocolForPipelineRecovery.java  | 92 +++
 .../server/datanode/SimulatedFSDataset.java |  6 +-
 .../server/datanode/TestSimulatedFSDataset.java |  2 +-
 .../extdataset/ExternalDatasetImpl.java |  4 +-
 .../fsdataset/impl/TestWriteToReplica.java  | 17 ++--
 11 files changed, 244 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe6de5e5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
index 4eb4c52..748edcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
@@ -57,4 +57,8 @@ public class DFSClientFaultInjector {
   public void fetchFromDatanodeException() {}
 
   public void readFromDatanodeDelay() {}
+
+  public boolean skipRollingRestartWait() {
+return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe6de5e5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 47728d3..92f13d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -328,6 +328,7 @@ class DataStreamer extends Daemon {
   static class ErrorState {
 private boolean error = false;
 private int badNodeIndex = -1;
+private boolean waitForRestart = true;
 private int restartingNodeIndex = -1;
 private long restartingNodeDeadline = 0;
 private final long datanodeRestartTimeout;
@@ -341,6 +342,7 @@ class DataStreamer extends Daemon {
   badNodeIndex = -1;
   restartingNodeIndex = -1;
   restartingNodeDeadline = 0;
+  waitForRestart = true;
 }
 
 synchronized boolean hasError() {
@@ -367,14 +369,19 @@ class DataStreamer extends Daemon {
   return restartingNodeIndex;
 }
 
-synchronized void initRestartingNode(int i, String message) {
+synchronized void initRestartingNode(int i, String message,
+boolean shouldWait) {
   restartingNodeIndex = i;
-  restartingNodeDeadline =  Time.monotonicNow() + datanodeRestartTimeout;
-  // If the data streamer has already set the primary node
-  // bad, clear it. It is likely that the write failed due to
-  // the DN shutdown. Even if it was a real failure, the pipeline
-  // recovery will take care of it.
-  badNodeIndex = -1;
+  if (shouldWait) {
+restartingNodeDeadline = Time.monotonicNow() + datanodeRestartTimeout;
+// If the data streamer has already set the primary node
+// bad, clear it. It is likely that the write failed due to
+// the DN shutdown. Even if it was a real failure, the pipeline
+// recovery will take care of it.
+badNodeIndex = -1;
+  } else {
+this.waitForRestart = false;
+  }
   LOG.info(message);
 }
 
@@ -383,7 +390,7 @@ class DataStreamer extends Daemon {
 }
 
 synchronized boolean isNodeMarked() {
-  return badNodeIndex >= 0 |

hadoop git commit: YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object decoding ever fails runtime exception. Contributed by Jon Eagles.

2017-05-31 Thread nroberts
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 f8a3de64f -> 1f077f45f


YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object 
decoding ever fails runtime exception. Contributed by Jon Eagles.

(cherry picked from commit 4369690ce63566131aee28696bf2683a3cb20205)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f077f45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f077f45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f077f45

Branch: refs/heads/branch-2.8
Commit: 1f077f45f9a7cd595078ed80c1350827efe8675a
Parents: f8a3de6
Author: Nathan Roberts 
Authored: Tue May 30 16:10:33 2017 -0500
Committer: Nathan Roberts 
Committed: Wed May 31 12:47:38 2017 -0500

--
 .../timeline/RollingLevelDBTimelineStore.java   | 29 
 1 file changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f077f45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 20e0379..d139346 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -473,9 +473,16 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 }
   } else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
 if (otherInfo) {
-  entity.addOtherInfo(
-  parseRemainingKey(key, prefixlen + OTHER_INFO_COLUMN.length),
-  fstConf.asObject(iterator.peekNext().getValue()));
+  Object o = null;
+  String keyStr = parseRemainingKey(key,
+  prefixlen + OTHER_INFO_COLUMN.length);
+  try {
+o = fstConf.asObject(iterator.peekNext().getValue());
+entity.addOtherInfo(keyStr, o);
+  } catch (Exception e) {
+LOG.warn("Error while decoding "
++ entityId + ":otherInfo:" + keyStr, e);
+  }
 }
   } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
 if (relatedEntities) {
@@ -1338,7 +1345,12 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   TimelineEvent event = new TimelineEvent();
   event.setTimestamp(ts);
   event.setEventType(tstype);
-  Object o = fstConf.asObject(value);
+  Object o = null;
+  try {
+o = fstConf.asObject(value);
+  } catch (Exception e) {
+LOG.warn("Error while decoding " + tstype, e);
+  }
   if (o == null) {
 event.setEventInfo(null);
   } else if (o instanceof Map) {
@@ -1362,8 +1374,13 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 KeyParser kp = new KeyParser(key, offset);
 String name = kp.getNextString();
 byte[] bytes = kp.getRemainingBytes();
-Object value = fstConf.asObject(bytes);
-entity.addPrimaryFilter(name, value);
+Object value = null;
+try {
+  value = fstConf.asObject(bytes);
+  entity.addPrimaryFilter(name, value);
+} catch (Exception e) {
+  LOG.warn("Error while decoding " + name, e);
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11856. Ability to re-add Upgrading nodes to pipeline for future pipeline updates. Contributed by Vinayakumar B.

2017-05-31 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c112bf683 -> 212a56608


HDFS-11856. Ability to re-add Upgrading nodes to pipeline for future pipeline 
updates. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/212a5660
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/212a5660
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/212a5660

Branch: refs/heads/branch-2
Commit: 212a566088bab20c47234c435ae784c4ee84469b
Parents: c112bf6
Author: Kihwal Lee 
Authored: Wed May 31 12:42:37 2017 -0500
Committer: Kihwal Lee 
Committed: Wed May 31 12:45:33 2017 -0500

--
 .../hadoop/hdfs/DFSClientFaultInjector.java |  4 +
 .../org/apache/hadoop/hdfs/DataStreamer.java| 69 +++
 .../hdfs/server/datanode/BlockReceiver.java |  4 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |  5 +-
 .../impl/FsDatasetAsyncDiskService.java | 14 ++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 93 +---
 .../TestClientProtocolForPipelineRecovery.java  | 92 +++
 .../server/datanode/SimulatedFSDataset.java |  6 +-
 .../server/datanode/TestSimulatedFSDataset.java |  2 +-
 .../extdataset/ExternalDatasetImpl.java |  4 +-
 .../fsdataset/impl/TestWriteToReplica.java  | 17 ++--
 11 files changed, 244 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/212a5660/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
index 4eb4c52..748edcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
@@ -57,4 +57,8 @@ public class DFSClientFaultInjector {
   public void fetchFromDatanodeException() {}
 
   public void readFromDatanodeDelay() {}
+
+  public boolean skipRollingRestartWait() {
+return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/212a5660/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index bcf740f..3279590 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -328,6 +328,7 @@ class DataStreamer extends Daemon {
   static class ErrorState {
 private boolean error = false;
 private int badNodeIndex = -1;
+private boolean waitForRestart = true;
 private int restartingNodeIndex = -1;
 private long restartingNodeDeadline = 0;
 private final long datanodeRestartTimeout;
@@ -341,6 +342,7 @@ class DataStreamer extends Daemon {
   badNodeIndex = -1;
   restartingNodeIndex = -1;
   restartingNodeDeadline = 0;
+  waitForRestart = true;
 }
 
 synchronized boolean hasError() {
@@ -367,14 +369,19 @@ class DataStreamer extends Daemon {
   return restartingNodeIndex;
 }
 
-synchronized void initRestartingNode(int i, String message) {
+synchronized void initRestartingNode(int i, String message,
+boolean shouldWait) {
   restartingNodeIndex = i;
-  restartingNodeDeadline =  Time.monotonicNow() + datanodeRestartTimeout;
-  // If the data streamer has already set the primary node
-  // bad, clear it. It is likely that the write failed due to
-  // the DN shutdown. Even if it was a real failure, the pipeline
-  // recovery will take care of it.
-  badNodeIndex = -1;
+  if (shouldWait) {
+restartingNodeDeadline = Time.monotonicNow() + datanodeRestartTimeout;
+// If the data streamer has already set the primary node
+// bad, clear it. It is likely that the write failed due to
+// the DN shutdown. Even if it was a real failure, the pipeline
+// recovery will take care of it.
+badNodeIndex = -1;
+  } else {
+this.waitForRestart = false;
+  }
   LOG.info(message);
 }
 
@@ -383,7 +390,7 @@ class DataStreamer extends Daemon {
 }
 
 synchronized boolean isNodeMarked() {
-  return badNodeIndex >= 0 || is

hadoop git commit: YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object decoding ever fails runtime exception. Contributed by Jon Eagles.

2017-05-31 Thread nroberts
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5b0baeab5 -> c112bf683


YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object 
decoding ever fails runtime exception. Contributed by Jon Eagles.

(cherry picked from commit 4369690ce63566131aee28696bf2683a3cb20205)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c112bf68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c112bf68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c112bf68

Branch: refs/heads/branch-2
Commit: c112bf68382edd1a2d423852ca1a99af1666116d
Parents: 5b0baea
Author: Nathan Roberts 
Authored: Tue May 30 16:10:33 2017 -0500
Committer: Nathan Roberts 
Committed: Wed May 31 12:35:50 2017 -0500

--
 .../timeline/RollingLevelDBTimelineStore.java   | 29 
 1 file changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c112bf68/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 20e0379..d139346 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -473,9 +473,16 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 }
   } else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
 if (otherInfo) {
-  entity.addOtherInfo(
-  parseRemainingKey(key, prefixlen + OTHER_INFO_COLUMN.length),
-  fstConf.asObject(iterator.peekNext().getValue()));
+  Object o = null;
+  String keyStr = parseRemainingKey(key,
+  prefixlen + OTHER_INFO_COLUMN.length);
+  try {
+o = fstConf.asObject(iterator.peekNext().getValue());
+entity.addOtherInfo(keyStr, o);
+  } catch (Exception e) {
+LOG.warn("Error while decoding "
++ entityId + ":otherInfo:" + keyStr, e);
+  }
 }
   } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
 if (relatedEntities) {
@@ -1338,7 +1345,12 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   TimelineEvent event = new TimelineEvent();
   event.setTimestamp(ts);
   event.setEventType(tstype);
-  Object o = fstConf.asObject(value);
+  Object o = null;
+  try {
+o = fstConf.asObject(value);
+  } catch (Exception e) {
+LOG.warn("Error while decoding " + tstype, e);
+  }
   if (o == null) {
 event.setEventInfo(null);
   } else if (o instanceof Map) {
@@ -1362,8 +1374,13 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 KeyParser kp = new KeyParser(key, offset);
 String name = kp.getNextString();
 byte[] bytes = kp.getRemainingBytes();
-Object value = fstConf.asObject(bytes);
-entity.addPrimaryFilter(name, value);
+Object value = null;
+try {
+  value = fstConf.asObject(bytes);
+  entity.addPrimaryFilter(name, value);
+} catch (Exception e) {
+  LOG.warn("Error while decoding " + name, e);
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object decoding ever fails runtime exception. Contributed by Jon Eagles.

2017-05-31 Thread nroberts
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1543d0f5b -> 4369690ce


YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object 
decoding ever fails runtime exception. Contributed by Jon Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4369690c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4369690c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4369690c

Branch: refs/heads/trunk
Commit: 4369690ce63566131aee28696bf2683a3cb20205
Parents: 1543d0f
Author: Nathan Roberts 
Authored: Tue May 30 16:10:33 2017 -0500
Committer: Nathan Roberts 
Committed: Wed May 31 11:32:32 2017 -0500

--
 .../timeline/RollingLevelDBTimelineStore.java   | 29 
 1 file changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4369690c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 20e0379..d139346 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -473,9 +473,16 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 }
   } else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
 if (otherInfo) {
-  entity.addOtherInfo(
-  parseRemainingKey(key, prefixlen + OTHER_INFO_COLUMN.length),
-  fstConf.asObject(iterator.peekNext().getValue()));
+  Object o = null;
+  String keyStr = parseRemainingKey(key,
+  prefixlen + OTHER_INFO_COLUMN.length);
+  try {
+o = fstConf.asObject(iterator.peekNext().getValue());
+entity.addOtherInfo(keyStr, o);
+  } catch (Exception e) {
+LOG.warn("Error while decoding "
++ entityId + ":otherInfo:" + keyStr, e);
+  }
 }
   } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
 if (relatedEntities) {
@@ -1338,7 +1345,12 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   TimelineEvent event = new TimelineEvent();
   event.setTimestamp(ts);
   event.setEventType(tstype);
-  Object o = fstConf.asObject(value);
+  Object o = null;
+  try {
+o = fstConf.asObject(value);
+  } catch (Exception e) {
+LOG.warn("Error while decoding " + tstype, e);
+  }
   if (o == null) {
 event.setEventInfo(null);
   } else if (o instanceof Map) {
@@ -1362,8 +1374,13 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 KeyParser kp = new KeyParser(key, offset);
 String name = kp.getNextString();
 byte[] bytes = kp.getRemainingBytes();
-Object value = fstConf.asObject(bytes);
-entity.addPrimaryFilter(name, value);
+Object value = null;
+try {
+  value = fstConf.asObject(bytes);
+  entity.addPrimaryFilter(name, value);
+} catch (Exception e) {
+  LOG.warn("Error while decoding " + name, e);
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11791. [READ] Test for increasing replication of provided files.

2017-05-31 Thread virajith
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-9806 5d021f38e -> 1fd812443


HDFS-11791. [READ] Test for increasing replication of provided files.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fd81244
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fd81244
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fd81244

Branch: refs/heads/HDFS-9806
Commit: 1fd812443b8ae2e8b8dbeb5c10b8e81d03e2c71c
Parents: 5d021f3
Author: Virajith Jalaparti 
Authored: Wed May 31 10:29:53 2017 -0700
Committer: Virajith Jalaparti 
Committed: Wed May 31 10:29:53 2017 -0700

--
 .../TestNameNodeProvidedImplementation.java | 55 
 1 file changed, 55 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fd81244/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index 5062439..e171557 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -23,6 +23,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
+import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.nio.channels.Channels;
 import java.nio.channels.ReadableByteChannel;
@@ -34,10 +35,15 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockFormatProvider;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockProvider;
 import org.apache.hadoop.hdfs.server.common.BlockFormat;
@@ -378,4 +384,53 @@ public class TestNameNodeProvidedImplementation {
 assertEquals(1, locations.length);
 assertEquals(2, locations[0].getHosts().length);
   }
+
+  private DatanodeInfo[] getAndCheckBlockLocations(DFSClient client,
+  String filename, int expectedLocations) throws IOException {
+LocatedBlocks locatedBlocks = client.getLocatedBlocks(
+filename, 0, baseFileLen);
+//given the start and length in the above call,
+//only one LocatedBlock in LocatedBlocks
+assertEquals(1, locatedBlocks.getLocatedBlocks().size());
+LocatedBlock locatedBlock = locatedBlocks.getLocatedBlocks().get(0);
+assertEquals(expectedLocations, locatedBlock.getLocations().length);
+return locatedBlock.getLocations();
+  }
+
+  /**
+   * Tests setting replication of provided files.
+   * @throws Exception
+   */
+  @Test
+  public void testSetReplicationForProvidedFiles() throws Exception {
+createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
+FixedBlockResolver.class);
+startCluster(NNDIRPATH, 2, null,
+new StorageType[][] {
+{StorageType.PROVIDED},
+{StorageType.DISK}},
+false);
+
+String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix;
+Path file = new Path(filename);
+FileSystem fs = cluster.getFileSystem();
+
+//set the replication to 2, and test that the file has
+//the required replication.
+fs.setReplication(file, (short) 2);
+DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
+file, (short) 2, 1);
+DFSClient client = new DFSClient(new InetSocketAddress("localhost",
+cluster.getNameNodePort()), cluster.getConfiguration(0));
+getAndCheckBlockLocations(client, filename, 2);
+
+//set the replication back to 1
+fs.setReplication(file, (short) 1);
+DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
+file, (short) 1, 1);
+//the only replica left should be the PROVIDED datanode
+DatanodeInfo[] infos = getAndCheckBlockLocations(client, filename, 1);
+assertEquals(cluster.getDataNodes().get(0).getDatanodeUuid(),
+infos[0].getDat

hadoop git commit: HDFS-5042. Completed files lost after power failure. Contributed by Vinayakumar B.

2017-05-31 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f5f12b576 -> 5b0baeab5


HDFS-5042. Completed files lost after power failure. Contributed by Vinayakumar 
B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b0baeab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b0baeab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b0baeab

Branch: refs/heads/branch-2
Commit: 5b0baeab5e6c20f161c2b25ba52bb5f6f5ff93c3
Parents: f5f12b5
Author: Kihwal Lee 
Authored: Wed May 31 11:02:34 2017 -0500
Committer: Kihwal Lee 
Committed: Wed May 31 11:02:34 2017 -0500

--
 .../main/java/org/apache/hadoop/io/IOUtils.java | 55 +++-
 .../hdfs/server/datanode/BlockReceiver.java |  9 +++-
 .../hdfs/server/datanode/FileIoProvider.java| 19 ++-
 .../server/datanode/fsdataset/FsDatasetSpi.java |  4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 38 +++---
 .../server/datanode/SimulatedFSDataset.java |  3 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|  6 ++-
 .../server/datanode/TestSimulatedFSDataset.java |  4 +-
 .../extdataset/ExternalDatasetImpl.java |  3 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |  2 +-
 10 files changed, 123 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b0baeab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 61827d1..a56ccfe 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -27,6 +27,7 @@ import java.nio.file.DirectoryStream;
 import java.nio.file.DirectoryIteratorException;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -36,7 +37,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.ChunkedArrayList;
+import org.apache.hadoop.util.Shell;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -355,4 +356,56 @@ public class IOUtils {
 }
 return list;
   }
+
+  /**
+   * Ensure that any writes to the given file is written to the storage device
+   * that contains it. This method opens channel on given File and closes it
+   * once the sync is done.
+   * Borrowed from Uwe Schindler in LUCENE-5588
+   * @param fileToSync the file to fsync
+   */
+  public static void fsync(File fileToSync) throws IOException {
+if (!fileToSync.exists()) {
+  throw new FileNotFoundException(
+  "File/Directory " + fileToSync.getAbsolutePath() + " does not 
exist");
+}
+boolean isDir = fileToSync.isDirectory();
+// If the file is a directory we have to open read-only, for regular files
+// we must open r/w for the fsync to have an effect. See
+// http://blog.httrack.com/blog/2013/11/15/
+// everything-you-always-wanted-to-know-about-fsync/
+try(FileChannel channel = FileChannel.open(fileToSync.toPath(),
+isDir ? StandardOpenOption.READ : StandardOpenOption.WRITE)){
+  fsync(channel, isDir);
+}
+  }
+
+  /**
+   * Ensure that any writes to the given file is written to the storage device
+   * that contains it. This method opens channel on given File and closes it
+   * once the sync is done.
+   * Borrowed from Uwe Schindler in LUCENE-5588
+   * @param channel Channel to sync
+   * @param isDir if true, the given file is a directory (Channel should be
+   *  opened for read and ignore IOExceptions, because not all file
+   *  systems and operating systems allow to fsync on a directory)
+   * @throws IOException
+   */
+  public static void fsync(FileChannel channel, boolean isDir)
+  throws IOException {
+try {
+  channel.force(true);
+} catch (IOException ioe) {
+  if (isDir) {
+assert !(Shell.LINUX
+|| Shell.MAC) : "On Linux and MacOSX fsyncing a directory"
++ " should not throw IOException, we just don't want to rely"
++ " on that in production (undocumented)" + ". Got: " + ioe;
+// Ignore exception if it is a directory
+return;
+  }
+  // Throw ori

hadoop git commit: HDFS-5042. Completed files lost after power failure. Contributed by Vinayakumar B.

2017-05-31 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk cbfed0e82 -> 1543d0f5b


HDFS-5042. Completed files lost after power failure. Contributed by Vinayakumar 
B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1543d0f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1543d0f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1543d0f5

Branch: refs/heads/trunk
Commit: 1543d0f5be6a02ad00e7a33e35d78af8516043e3
Parents: cbfed0e
Author: Kihwal Lee 
Authored: Wed May 31 10:55:03 2017 -0500
Committer: Kihwal Lee 
Committed: Wed May 31 10:55:03 2017 -0500

--
 .../main/java/org/apache/hadoop/io/IOUtils.java | 55 +++-
 .../hdfs/server/datanode/BlockReceiver.java |  9 +++-
 .../hdfs/server/datanode/FileIoProvider.java| 19 ++-
 .../hdfs/server/datanode/LocalReplica.java  | 13 +
 .../server/datanode/fsdataset/FsDatasetSpi.java |  4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 31 ---
 .../server/datanode/SimulatedFSDataset.java |  3 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|  6 ++-
 .../server/datanode/TestSimulatedFSDataset.java |  4 +-
 .../extdataset/ExternalDatasetImpl.java |  3 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |  2 +-
 11 files changed, 130 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 0d2e797..ee7264b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -27,6 +27,7 @@ import java.nio.file.DirectoryStream;
 import java.nio.file.DirectoryIteratorException;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -36,7 +37,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.ChunkedArrayList;
+import org.apache.hadoop.util.Shell;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -357,4 +358,56 @@ public class IOUtils {
 }
 return list;
   }
+
+  /**
+   * Ensure that any writes to the given file is written to the storage device
+   * that contains it. This method opens channel on given File and closes it
+   * once the sync is done.
+   * Borrowed from Uwe Schindler in LUCENE-5588
+   * @param fileToSync the file to fsync
+   */
+  public static void fsync(File fileToSync) throws IOException {
+if (!fileToSync.exists()) {
+  throw new FileNotFoundException(
+  "File/Directory " + fileToSync.getAbsolutePath() + " does not 
exist");
+}
+boolean isDir = fileToSync.isDirectory();
+// If the file is a directory we have to open read-only, for regular files
+// we must open r/w for the fsync to have an effect. See
+// http://blog.httrack.com/blog/2013/11/15/
+// everything-you-always-wanted-to-know-about-fsync/
+try(FileChannel channel = FileChannel.open(fileToSync.toPath(),
+isDir ? StandardOpenOption.READ : StandardOpenOption.WRITE)){
+  fsync(channel, isDir);
+}
+  }
+
+  /**
+   * Ensure that any writes to the given file is written to the storage device
+   * that contains it. This method opens channel on given File and closes it
+   * once the sync is done.
+   * Borrowed from Uwe Schindler in LUCENE-5588
+   * @param channel Channel to sync
+   * @param isDir if true, the given file is a directory (Channel should be
+   *  opened for read and ignore IOExceptions, because not all file
+   *  systems and operating systems allow to fsync on a directory)
+   * @throws IOException
+   */
+  public static void fsync(FileChannel channel, boolean isDir)
+  throws IOException {
+try {
+  channel.force(true);
+} catch (IOException ioe) {
+  if (isDir) {
+assert !(Shell.LINUX
+|| Shell.MAC) : "On Linux and MacOSX fsyncing a directory"
++ " should not throw IOException, we just don't want to rely"
++ " on that in production (undocumented)" + ". Got: " + ioe;
+// Ignore exception if it is a dire

hadoop git commit: HDFS-11776. Ozone: KSM: add SetBucketProperty. Contributed by Nandakumar Vadivelu.

2017-05-31 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 122d660f7 -> 3983e94f0


HDFS-11776. Ozone: KSM: add SetBucketProperty. Contributed by Nandakumar 
Vadivelu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3983e94f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3983e94f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3983e94f

Branch: refs/heads/HDFS-7240
Commit: 3983e94f0d160cd7ee812dbc6e069777edeb1cc1
Parents: 122d660
Author: Xiaoyu Yao 
Authored: Wed May 31 08:37:09 2017 -0700
Committer: Xiaoyu Yao 
Committed: Wed May 31 08:37:09 2017 -0700

--
 .../hadoop/ksm/helpers/KsmBucketArgs.java   |   9 +-
 .../ksm/protocol/KeySpaceManagerProtocol.java   |   8 ++
 ...ceManagerProtocolClientSideTranslatorPB.java |  31 +
 .../main/proto/KeySpaceManagerProtocol.proto|  14 ++
 .../apache/hadoop/ozone/ksm/BucketManager.java  |   8 ++
 .../hadoop/ozone/ksm/BucketManagerImpl.java | 113 
 .../org/apache/hadoop/ozone/ksm/KSMMetrics.java |  22 ++-
 .../hadoop/ozone/ksm/KeySpaceManager.java   |  20 +++
 ...ceManagerProtocolServerSideTranslatorPB.java |  21 +++
 .../web/storage/DistributedStorageHandler.java  |  35 -
 .../hadoop/ozone/ksm/TestBucketManagerImpl.java | 134 ++-
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   |   2 +
 12 files changed, 406 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3983e94f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/helpers/KsmBucketArgs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/helpers/KsmBucketArgs.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/helpers/KsmBucketArgs.java
index e649eb7..f867628 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/helpers/KsmBucketArgs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/helpers/KsmBucketArgs.java
@@ -68,7 +68,7 @@ public final class KsmBucketArgs {
*/
   private KsmBucketArgs(String volumeName, String bucketName,
   List addAcls, List removeAcls,
-  boolean isVersionEnabled, StorageTypeProto storageType) {
+  Boolean isVersionEnabled, StorageTypeProto storageType) {
 this.volumeName = volumeName;
 this.bucketName = bucketName;
 this.addAcls = addAcls;
@@ -113,7 +113,7 @@ public final class KsmBucketArgs {
* Returns true if bucket version is enabled, else false.
* @return isVersionEnabled
*/
-  public boolean getIsVersionEnabled() {
+  public Boolean getIsVersionEnabled() {
 return isVersionEnabled;
   }
 
@@ -219,7 +219,8 @@ public final class KsmBucketArgs {
 bucketArgs.getBucketName(),
 bucketArgs.getAddAclsList(),
 bucketArgs.getRemoveAclsList(),
-bucketArgs.getIsVersionEnabled(),
-bucketArgs.getStorageType());
+bucketArgs.hasIsVersionEnabled() ? bucketArgs.getIsVersionEnabled() :
+null,
+bucketArgs.hasStorageType() ? bucketArgs.getStorageType() : null);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3983e94f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocol/KeySpaceManagerProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocol/KeySpaceManagerProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocol/KeySpaceManagerProtocol.java
index 924994d..6efcb9e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocol/KeySpaceManagerProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocol/KeySpaceManagerProtocol.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ksm.protocol;
 
+import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
 import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
 import org.apache.hadoop.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
@@ -116,6 +117,13 @@ public interface KeySpaceManagerProtocol {
   throws IOException;
 
   /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  void setBucketProperty(KsmBucketArgs args) throws IOException;
+
+  /**
* Allocate a block to a container, the block is returned to the client.
*
* @param args the args of the key.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3983e94f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/protocolPB/KeySpa

hadoop git commit: YARN-6497. Method length of ResourceManager#serviceInit() is too long (Gergely Novák via jeagles)

2017-05-31 Thread jeagles
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1eecde335 -> f5f12b576


YARN-6497. Method length of ResourceManager#serviceInit() is too long (Gergely 
Novák via jeagles)

(cherry picked from commit cbfed0e82f57e96b8d5309e0613057963840554f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5f12b57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5f12b57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5f12b57

Branch: refs/heads/branch-2
Commit: f5f12b576efc7ab289fa88cca6e5760b5a7a12a9
Parents: 1eecde3
Author: Jonathan Eagles 
Authored: Wed May 31 10:18:09 2017 -0500
Committer: Jonathan Eagles 
Committed: Wed May 31 10:19:51 2017 -0500

--
 .../server/resourcemanager/ResourceManager.java | 27 ++--
 1 file changed, 13 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5f12b57/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 3a93c6e..0573690 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.EventDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import 
org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
@@ -220,13 +221,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 rmContext.setConfigurationProvider(configurationProvider);
 
 // load core-site.xml
-InputStream coreSiteXMLInputStream =
-this.configurationProvider.getConfigurationInputStream(this.conf,
-YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
-if (coreSiteXMLInputStream != null) {
-  this.conf.addResource(coreSiteXMLInputStream,
-  YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
-}
+loadConfigurationXml(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
 
 // Do refreshUserToGroupsMappings with loaded core-site.xml
 Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(this.conf)
@@ -239,13 +234,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 ProxyUsers.refreshSuperUserGroupsConfiguration(this.conf);
 
 // load yarn-site.xml
-InputStream yarnSiteXMLInputStream =
-this.configurationProvider.getConfigurationInputStream(this.conf,
-YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
-if (yarnSiteXMLInputStream != null) {
-  this.conf.addResource(yarnSiteXMLInputStream,
-  YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
-}
+loadConfigurationXml(YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
 
 validateConfigs(this.conf);
 
@@ -311,6 +300,16 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 super.serviceInit(this.conf);
   }
 
+  private void loadConfigurationXml(String configurationFile)
+  throws YarnException, IOException {
+InputStream configurationInputStream =
+this.configurationProvider.getConfigurationInputStream(this.conf,
+configurationFile);
+if (configurationInputStream != null) {
+  this.conf.addResource(configurationInputStream, configurationFile);
+}
+  }
+
   protected EmbeddedElector createEmbeddedElector() throws IOException {
 EmbeddedElector elector;
 curatorEnabled =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6497. Method length of ResourceManager#serviceInit() is too long (Gergely Novák via jeagles)

2017-05-31 Thread jeagles
Repository: hadoop
Updated Branches:
  refs/heads/trunk 13de636b4 -> cbfed0e82


YARN-6497. Method length of ResourceManager#serviceInit() is too long (Gergely 
Novák via jeagles)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbfed0e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbfed0e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbfed0e8

Branch: refs/heads/trunk
Commit: cbfed0e82f57e96b8d5309e0613057963840554f
Parents: 13de636
Author: Jonathan Eagles 
Authored: Wed May 31 10:18:09 2017 -0500
Committer: Jonathan Eagles 
Committed: Wed May 31 10:18:42 2017 -0500

--
 .../server/resourcemanager/ResourceManager.java | 27 ++--
 1 file changed, 13 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbfed0e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 8f2c121..f727f55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.EventDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import 
org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
@@ -238,13 +239,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 rmContext.setConfigurationProvider(configurationProvider);
 
 // load core-site.xml
-InputStream coreSiteXMLInputStream =
-this.configurationProvider.getConfigurationInputStream(this.conf,
-YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
-if (coreSiteXMLInputStream != null) {
-  this.conf.addResource(coreSiteXMLInputStream,
-  YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
-}
+loadConfigurationXml(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
 
 // Do refreshUserToGroupsMappings with loaded core-site.xml
 Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(this.conf)
@@ -257,13 +252,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 ProxyUsers.refreshSuperUserGroupsConfiguration(this.conf);
 
 // load yarn-site.xml
-InputStream yarnSiteXMLInputStream =
-this.configurationProvider.getConfigurationInputStream(this.conf,
-YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
-if (yarnSiteXMLInputStream != null) {
-  this.conf.addResource(yarnSiteXMLInputStream,
-  YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
-}
+loadConfigurationXml(YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
 
 validateConfigs(this.conf);
 
@@ -339,6 +328,16 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 super.serviceInit(this.conf);
   }
 
+  private void loadConfigurationXml(String configurationFile)
+  throws YarnException, IOException {
+InputStream configurationInputStream =
+this.configurationProvider.getConfigurationInputStream(this.conf,
+configurationFile);
+if (configurationInputStream != null) {
+  this.conf.addResource(configurationInputStream, configurationFile);
+}
+  }
+
   protected EmbeddedElector createEmbeddedElector() throws IOException {
 EmbeddedElector elector;
 curatorEnabled =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11901. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.

2017-05-31 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 547f18cb9 -> 13de636b4


HDFS-11901. Modifier 'static' is redundant for inner enums. Contributed by 
ZhangBing Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13de636b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13de636b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13de636b

Branch: refs/heads/trunk
Commit: 13de636b4079b077890ad10389ff350dcf8086a2
Parents: 547f18c
Author: Brahma Reddy Battula 
Authored: Wed May 31 23:09:08 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Wed May 31 23:09:08 2017 +0800

--
 .../java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java  | 4 ++--
 .../src/main/java/org/apache/hadoop/lib/server/Server.java   | 2 +-
 .../main/java/org/apache/hadoop/lib/server/ServerException.java  | 2 +-
 .../src/test/java/org/apache/hadoop/lib/lang/TestXException.java | 2 +-
 .../src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java  | 2 +-
 .../main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java  | 2 +-
 .../main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java | 2 +-
 .../hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java   | 2 +-
 .../hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java  | 2 +-
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java   | 2 +-
 .../org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java| 2 +-
 .../hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java   | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/Content.java | 2 +-
 .../hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java   | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java| 2 +-
 .../hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java   | 2 +-
 .../hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java| 2 +-
 .../hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/util/Diff.java  | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java   | 2 +-
 .../hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java   | 2 +-
 23 files changed, 24 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 055a57e..5922958 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -139,7 +139,7 @@ public class HttpFSFileSystem extends FileSystem
 
   public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream";
 
-  public static enum FILE_TYPE {
+  public enum FILE_TYPE {
 FILE, DIRECTORY, SYMLINK;
 
 public static FILE_TYPE getType(FileStatus fileStatus) {
@@ -210,7 +210,7 @@ public class HttpFSFileSystem extends FileSystem
   private static final String HTTP_DELETE = "DELETE";
 
   @InterfaceAudience.Private
-  public static enum Operation {
+  public enum Operation {
 OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
 GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
 GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
index 82be027..57f651a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
@@ -101,7 +101,7 @@ public class Server {
* Enumeration that defines the server status.
*/
   @InterfaceAudience.Private
-  public static enum Status {
+  public enum Status {
 UNDEF(false, false),
 BOOTING(false, true),
 HALTED(true, true),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoo

[1/4] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

2017-05-31 Thread vvasudev
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a3ad1a39c -> 1eecde335
  refs/heads/trunk 4b4a6524f -> 547f18cb9


http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
index 5f9b883..c1df562 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import static org.mockito.Matchers.argThat;
-import static org.mockito.Matchers.isNull;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -28,6 +27,7 @@ import java.io.File;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -58,19 +58,17 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
-import 
org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.ArgumentMatcher;
 
 public class TestNodeManagerReboot {
 
@@ -195,19 +193,18 @@ public class TestNodeManagerReboot {
 // restart the NodeManager
 restartNM(MAX_TRIES);
 checkNumOfLocalDirs();
-
-verify(delService, times(1)).delete(
-  (String) isNull(),
-  argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR
-  + "_DEL_")));
-verify(delService, times(1)).delete((String) isNull(),
-  argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_")));
-verify(delService, times(1)).scheduleFileDeletionTask(
-  argThat(new FileDeletionInclude(user, null,
-new String[] { destinationFile })));
-verify(delService, times(1)).scheduleFileDeletionTask(
-  argThat(new FileDeletionInclude(null, ContainerLocalizer.USERCACHE
-  + "_DEL_", new String[] {})));
+
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, null,
+new Path(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_"), 
null)));
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, null, new Path(ContainerLocalizer.FILECACHE + "_DEL_"),
+null)));
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, user, null, Arrays.asList(new Path(destinationFile);
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, null, new Path(ContainerLocalizer.USERCACHE + "_DEL_"),
+new ArrayList(;
 
 // restart the NodeManager again
 // this time usercache directory should be empty
@@ -329,72 +326,4 @@ public class TestNodeManagerReboot {
   return conf;
 }
   }
-
-  class PathInclude extends ArgumentMatcher {
-
-final String part;
-
-PathInclude(String part) {
-  this.part = part;
-}
-
-@Override
-public boolean matches(Object o) {
-  return ((Path) o).getName().indexOf(part) != -1;
-}
-  }
-  
-  class FileDeletionInclude extends ArgumentMatcher {
-final String user;
-final String subDirIncludes;
-final String[] baseDirIncludes;
-
-public FileDeletionInclude(String user, String su

[4/4] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

2017-05-31 Thread vvasudev
YARN-6366. Refactor the NodeManager DeletionService to support additional 
DeletionTask types. Contributed by Shane Kumpf.

(cherry picked from commit 547f18cb96aeda55cc19b38be2be4d631b3a5f4f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1eecde33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1eecde33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1eecde33

Branch: refs/heads/branch-2
Commit: 1eecde3355134f35ad08d6406198da09cbde8612
Parents: a3ad1a3
Author: Varun Vasudev 
Authored: Wed May 31 16:15:35 2017 +0530
Committer: Varun Vasudev 
Committed: Wed May 31 16:16:33 2017 +0530

--
 .../server/nodemanager/DeletionService.java | 468 ---
 .../nodemanager/api/impl/pb/NMProtoUtils.java   | 110 +
 .../nodemanager/api/impl/pb/package-info.java   |  25 +
 .../recovery/DeletionTaskRecoveryInfo.java  |  73 +++
 .../deletion/recovery/package-info.java |  25 +
 .../deletion/task/DeletionTask.java | 258 ++
 .../deletion/task/DeletionTaskType.java |  24 +
 .../deletion/task/FileDeletionTask.java | 202 
 .../deletion/task/package-info.java |  25 +
 .../localizer/LocalResourcesTrackerImpl.java|  13 +-
 .../localizer/ResourceLocalizationService.java  |  40 +-
 .../logaggregation/AppLogAggregatorImpl.java|  60 ++-
 .../loghandler/NonAggregatingLogHandler.java|   7 +-
 .../yarn_server_nodemanager_recovery.proto  |   1 +
 .../server/nodemanager/TestDeletionService.java |  57 ++-
 .../nodemanager/TestNodeManagerReboot.java  |  99 +---
 .../api/impl/pb/TestNMProtoUtils.java   |  91 
 .../BaseContainerManagerTest.java   |   7 +-
 .../deletion/task/FileDeletionMatcher.java  |  84 
 .../deletion/task/TestFileDeletionTask.java |  85 
 .../TestLocalResourcesTrackerImpl.java  |   5 +-
 .../TestResourceLocalizationService.java|  33 +-
 .../TestAppLogAggregatorImpl.java   |  15 +-
 .../TestLogAggregationService.java  |  17 +-
 .../TestNonAggregatingLogHandler.java   |   8 +-
 25 files changed, 1274 insertions(+), 558 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1eecde33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index aac0af9..38d69a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -21,11 +21,8 @@ package org.apache.hadoop.yarn.server.nodemanager;
 import static java.util.concurrent.TimeUnit.SECONDS;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -38,461 +35,176 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
-import 
org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
+import org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.NMProtoUtils;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery.DeletionTaskRecoveryInfo;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
 import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
-import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDele

[3/4] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

2017-05-31 Thread vvasudev
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1eecde33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
index 5f9b883..c1df562 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import static org.mockito.Matchers.argThat;
-import static org.mockito.Matchers.isNull;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -28,6 +27,7 @@ import java.io.File;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -58,19 +58,17 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
-import 
org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.ArgumentMatcher;
 
 public class TestNodeManagerReboot {
 
@@ -195,19 +193,18 @@ public class TestNodeManagerReboot {
 // restart the NodeManager
 restartNM(MAX_TRIES);
 checkNumOfLocalDirs();
-
-verify(delService, times(1)).delete(
-  (String) isNull(),
-  argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR
-  + "_DEL_")));
-verify(delService, times(1)).delete((String) isNull(),
-  argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_")));
-verify(delService, times(1)).scheduleFileDeletionTask(
-  argThat(new FileDeletionInclude(user, null,
-new String[] { destinationFile })));
-verify(delService, times(1)).scheduleFileDeletionTask(
-  argThat(new FileDeletionInclude(null, ContainerLocalizer.USERCACHE
-  + "_DEL_", new String[] {})));
+
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, null,
+new Path(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_"), 
null)));
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, null, new Path(ContainerLocalizer.FILECACHE + "_DEL_"),
+null)));
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, user, null, Arrays.asList(new Path(destinationFile);
+verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+delService, null, new Path(ContainerLocalizer.USERCACHE + "_DEL_"),
+new ArrayList(;
 
 // restart the NodeManager again
 // this time usercache directory should be empty
@@ -329,72 +326,4 @@ public class TestNodeManagerReboot {
   return conf;
 }
   }
-
-  class PathInclude extends ArgumentMatcher {
-
-final String part;
-
-PathInclude(String part) {
-  this.part = part;
-}
-
-@Override
-public boolean matches(Object o) {
-  return ((Path) o).getName().indexOf(part) != -1;
-}
-  }
-  
-  class FileDeletionInclude extends ArgumentMatcher {
-final String user;
-final String subDirIncludes;
-final String[] baseDirIncludes;
-
-public FileDeletionInclude(String user, String subDirIncludes,
-String [] baseDirIncludes) {
-  this.user = user;
-  this.subDirIncludes = subDirIncludes;
-   

[2/4] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

2017-05-31 Thread vvasudev
YARN-6366. Refactor the NodeManager DeletionService to support additional 
DeletionTask types. Contributed by Shane Kumpf.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/547f18cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/547f18cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/547f18cb

Branch: refs/heads/trunk
Commit: 547f18cb96aeda55cc19b38be2be4d631b3a5f4f
Parents: 4b4a652
Author: Varun Vasudev 
Authored: Wed May 31 16:15:35 2017 +0530
Committer: Varun Vasudev 
Committed: Wed May 31 16:15:35 2017 +0530

--
 .../server/nodemanager/DeletionService.java | 468 ---
 .../nodemanager/api/impl/pb/NMProtoUtils.java   | 110 +
 .../nodemanager/api/impl/pb/package-info.java   |  25 +
 .../recovery/DeletionTaskRecoveryInfo.java  |  73 +++
 .../deletion/recovery/package-info.java |  25 +
 .../deletion/task/DeletionTask.java | 258 ++
 .../deletion/task/DeletionTaskType.java |  24 +
 .../deletion/task/FileDeletionTask.java | 202 
 .../deletion/task/package-info.java |  25 +
 .../localizer/LocalResourcesTrackerImpl.java|  13 +-
 .../localizer/ResourceLocalizationService.java  |  40 +-
 .../logaggregation/AppLogAggregatorImpl.java|  60 ++-
 .../loghandler/NonAggregatingLogHandler.java|   7 +-
 .../yarn_server_nodemanager_recovery.proto  |   1 +
 .../server/nodemanager/TestDeletionService.java |  57 ++-
 .../nodemanager/TestNodeManagerReboot.java  |  99 +---
 .../api/impl/pb/TestNMProtoUtils.java   |  91 
 .../BaseContainerManagerTest.java   |   7 +-
 .../deletion/task/FileDeletionMatcher.java  |  84 
 .../deletion/task/TestFileDeletionTask.java |  85 
 .../TestLocalResourcesTrackerImpl.java  |   5 +-
 .../TestResourceLocalizationService.java|  33 +-
 .../TestAppLogAggregatorImpl.java   |  15 +-
 .../TestLogAggregationService.java  |  17 +-
 .../TestNonAggregatingLogHandler.java   |   8 +-
 25 files changed, 1274 insertions(+), 558 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index aac0af9..38d69a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -21,11 +21,8 @@ package org.apache.hadoop.yarn.server.nodemanager;
 import static java.util.concurrent.TimeUnit.SECONDS;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -38,461 +35,176 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
-import 
org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
+import org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.NMProtoUtils;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery.DeletionTaskRecoveryInfo;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
 import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
-import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState;
 
-import com.google.common.annotations.VisibleForTesti